diff options
Diffstat (limited to 'scripts/crawler/tbc_web_crawler')
-rw-r--r-- | scripts/crawler/tbc_web_crawler/__init__.py | 0 | ||||
-rw-r--r-- | scripts/crawler/tbc_web_crawler/settings.py | 86 | ||||
-rw-r--r-- | scripts/crawler/tbc_web_crawler/spiders/__init__.py | 4 | ||||
-rw-r--r-- | scripts/crawler/tbc_web_crawler/spiders/items.py | 18 | ||||
-rw-r--r-- | scripts/crawler/tbc_web_crawler/spiders/tbc_spider.py | 76 |
5 files changed, 184 insertions, 0 deletions
diff --git a/scripts/crawler/tbc_web_crawler/__init__.py b/scripts/crawler/tbc_web_crawler/__init__.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/scripts/crawler/tbc_web_crawler/__init__.py diff --git a/scripts/crawler/tbc_web_crawler/settings.py b/scripts/crawler/tbc_web_crawler/settings.py new file mode 100644 index 0000000..03ba836 --- /dev/null +++ b/scripts/crawler/tbc_web_crawler/settings.py @@ -0,0 +1,86 @@ +# -*- coding: utf-8 -*- + +# Scrapy settings for tbc_web_crawler project +# +# For simplicity, this file contains only settings considered important or +# commonly used. You can find more settings consulting the documentation: +# +# http://doc.scrapy.org/en/latest/topics/settings.html +# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html +# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html + +BOT_NAME = 'tbc_web_crawler' + +SPIDER_MODULES = ['tbc_web_crawler.spiders'] +NEWSPIDER_MODULE = 'tbc_web_crawler.spiders' + + +# Crawl responsibly by identifying yourself (and your website) on the user-agent +#USER_AGENT = 'tbc_web_crawler (+http://www.yourdomain.com)' + +# Configure maximum concurrent requests performed by Scrapy (default: 16) +CONCURRENT_REQUESTS=100 + +# Configure a delay for requests for the same website (default: 0) +# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay +# See also autothrottle settings and docs +#DOWNLOAD_DELAY=3 +# The download delay setting will honor only one of: +#CONCURRENT_REQUESTS_PER_DOMAIN=16 +#CONCURRENT_REQUESTS_PER_IP=16 + +# Disable cookies (enabled by default) +#COOKIES_ENABLED=False + +# Disable Telnet Console (enabled by default) +#TELNETCONSOLE_ENABLED=False + +# Override the default request headers: +#DEFAULT_REQUEST_HEADERS = { +# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', +# 'Accept-Language': 'en', +#} + +# Enable or disable spider middlewares +# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html +#SPIDER_MIDDLEWARES = { +# 'tbc_web_crawler.middlewares.MyCustomSpiderMiddleware': 543, +#} + +# Enable or disable downloader middlewares +# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html +#DOWNLOADER_MIDDLEWARES = { + #'scrapy.downloadermiddlewares.retry.RetryMiddleware': None +# 'tbc_web_crawler.middlewares.MyCustomDownloaderMiddleware': 543, +#} + +# Enable or disable extensions +# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html +#EXTENSIONS = { +# 'scrapy.telnet.TelnetConsole': None, +#} + +# Configure item pipelines +# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html +#ITEM_PIPELINES = { +# 'tbc_web_crawler.pipelines.SomePipeline': 300, +#} + +# Enable and configure the AutoThrottle extension (disabled by default) +# See http://doc.scrapy.org/en/latest/topics/autothrottle.html +# NOTE: AutoThrottle will honour the standard settings for concurrency and delay +#AUTOTHROTTLE_ENABLED=True +# The initial download delay +#AUTOTHROTTLE_START_DELAY=5 +# The maximum download delay to be set in case of high latencies +#AUTOTHROTTLE_MAX_DELAY=60 +# Enable showing throttling stats for every response received: +#AUTOTHROTTLE_DEBUG=False + +# Enable and configure HTTP caching (disabled by default) +# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings +#HTTPCACHE_ENABLED=True +#HTTPCACHE_EXPIRATION_SECS=0 +#HTTPCACHE_DIR='httpcache' +#HTTPCACHE_IGNORE_HTTP_CODES=[] +#HTTPCACHE_STORAGE='scrapy.extensions.httpcache.FilesystemCacheStorage' diff --git a/scripts/crawler/tbc_web_crawler/spiders/__init__.py b/scripts/crawler/tbc_web_crawler/spiders/__init__.py new file mode 100644 index 0000000..ebd689a --- /dev/null +++ b/scripts/crawler/tbc_web_crawler/spiders/__init__.py @@ -0,0 +1,4 @@ +# This package will contain the spiders of your Scrapy project +# +# Please refer to the documentation for information on how to create and manage +# your spiders. diff --git a/scripts/crawler/tbc_web_crawler/spiders/items.py b/scripts/crawler/tbc_web_crawler/spiders/items.py new file mode 100644 index 0000000..9dda20f --- /dev/null +++ b/scripts/crawler/tbc_web_crawler/spiders/items.py @@ -0,0 +1,18 @@ +import scrapy + + +class TbcErrorItems(scrapy.Item): + + + chapter_name = scrapy.Field() + chapter_urls = scrapy.Field() + completed_book_urls = scrapy.Field() + number_of_errors = scrapy.Field() + error_messages = scrapy.Field() + + + +class TbcBrokenItems(scrapy.Item): + + broken_url = scrapy.Field() + broken_status = scrapy.Field() diff --git a/scripts/crawler/tbc_web_crawler/spiders/tbc_spider.py b/scripts/crawler/tbc_web_crawler/spiders/tbc_spider.py new file mode 100644 index 0000000..9688e70 --- /dev/null +++ b/scripts/crawler/tbc_web_crawler/spiders/tbc_spider.py @@ -0,0 +1,76 @@ +import scrapy +from items import TbcErrorItems, TbcBrokenItems +from scrapy.utils.response import get_base_url +from scrapy.utils.url import urljoin_rfc +from scrapy.http import Request + +import os, json + +if os.path.isfile('items.json'): + os.remove('items.json') +else: + pass + +class TbcSpider(scrapy.Spider): + + name = "tbc_spider" # Name of the crawler. Use this name when crawling from the terminal, for eg - scrapy crawl tbc_spider + + start_urls = ["http://tbc-python.fossee.aero.iitb.ac.in/completed-books/"] + handle_httpstatus_list = [404, 500, 502] # A list containing HTTP error codes. + + def parse(self,response): + """ This function looks for book links and returns the url""" + + for book_link in response.xpath('//a[contains(@href,"book-details")]/@href').extract(): + """ Searches for links with "book-details" in it """ + + first_base_url = get_base_url(response) + first_relative_url = urljoin_rfc(first_base_url,book_link) + """creates a url to be returned to the next function.""" + + yield scrapy.Request(first_relative_url,callback=self.parse_book_contents) + + + + def parse_book_contents(self, response): + + """ This function looks for chapter links through each book link and returns the url""" + + for chapter_link in response.xpath ('//a[contains(@href,"convert-notebook")]/@href').extract(): + """ Searches for chapters in each book list""" + second_base_url = get_base_url(response).split('/book-details')[0] + second_relative_url = urljoin_rfc(second_base_url,chapter_link) + """creates a url to be returned to the next function.""" + + yield scrapy.Request(second_relative_url,callback=self.parse_chapter_details) + + + + def parse_chapter_details(self, response): + + if not response.xpath('//h1/text()').extract(): + chapter_details = [response.url] + else: + chapter_details = response.xpath('//h1/text()').extract() + + + error_tag = response.xpath('//div[@class="output_subarea output_text output_error"]') + error_list = [error_notifications for error_notifications \ + in response.xpath \ + ('//div[@class="output_subarea output_text output_error"]/span/text()').extract()] + + if response.status in self.handle_httpstatus_list: + broken_items = TbcBrokenItems() + broken_items['broken_url'] = response.url + broken_items['broken_status'] = response.status + yield broken_items + else: + if len(error_tag) != 0: + items = TbcErrorItems() + items ['chapter_name'] = chapter_details[0] + items ['chapter_urls'] = response.url + items ['number_of_errors'] = len (error_tag) + #items ['completed_book_urls'] = response.request.headers.get('Referer', None) + #items ['error_messages'] = error_list + yield items + |