-
Notifications
You must be signed in to change notification settings - Fork 0
/
runScrape.py
85 lines (68 loc) · 2.66 KB
/
runScrape.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
# This snippet can be used to run scrapy spiders independent of scrapyd or
# the scrapy command line tool and use it from a script.
# The multiprocessing library is used in order to work around a bug in
# Twisted, in which you cannot restart an already running reactor or in
# this case a scrapy instance.
# [Here](http://groups.google.com/group/scrapy-users/browse_thread/thread/f332fc5b749d401a) is the mailing-list discussion for this snippet.
# #!/usr/bin/python
# import os
# # Must be at the top before other imports
# os.environ.setdefault('SCRAPY_SETTINGS_MODULE', 'umichemploymentscrape.settings')
# from scrapy import log, signals, project
# from scrapy.xlib.pydispatch import dispatcher
# from scrapy.conf import settings
# from scrapy.crawler import CrawlerProcess
# from multiprocessing import Process, Queue
# class CrawlerScript():
# def __init__(self):
# self.crawler = CrawlerProcess(settings)
# if not hasattr(project, 'crawler'):
# self.crawler.install()
# self.crawler.configure()
# self.items = []
# dispatcher.connect(self._item_passed, signals.item_passed)
# def _item_passed(self, item):
# self.items.append(item)
# def _crawl(self, queue, spider_name):
# spider = self.crawler.spiders.create(spider_name)
# if spider:
# self.crawler.queue.append_spider(spider)
# self.crawler.start()
# self.crawler.stop()
# queue.put(self.items)
# def crawl(self, spider):
# queue = Queue()
# p = Process(target=self._crawl, args=(queue, spider,))
# p.start()
# p.join()
# return queue.get(True)
# # Usage
# if __name__ == "__main__":
# log.start()
# # This example runs spider1 and then spider2 three times.
# items = list()
# crawler = CrawlerScript()
# items.append(crawler.crawl('spider1'))
# for i in range(3):
# items.append(crawler.crawl('spider2'))
# print items
# Snippet imported from snippets.scrapy.org (which no longer works)
# author: joehillen
# date : Oct 24, 2010
from twisted.internet import reactor
from scrapy.crawler import Crawler
from scrapy import log, signals
from umichemploymentscrape.spiders.job_listing_spider import JobListingSpider
from scrapy.utils.project import get_project_settings
# if __name__ == "__main__":
spider = JobListingSpider()
settings = get_project_settings()
crawler = Crawler(settings)
crawler.signals.connect(reactor.stop, signal=signals.spider_closed)
crawler.configure()
items = list()
items.append(crawler.crawl(spider))
crawler.start()
log.start()
reactor.run() # the script will block here until the spider_closed signal was sent
print items