summaryrefslogtreecommitdiff
path: root/scripts
diff options
context:
space:
mode:
Diffstat (limited to 'scripts')
-rw-r--r--scripts/crawler/scrapy.cfg11
-rw-r--r--scripts/crawler/tbc_web_crawler/__init__.py0
-rw-r--r--scripts/crawler/tbc_web_crawler/settings.py86
-rw-r--r--scripts/crawler/tbc_web_crawler/spiders/__init__.py4
-rw-r--r--scripts/crawler/tbc_web_crawler/spiders/items.py18
-rw-r--r--scripts/crawler/tbc_web_crawler/spiders/tbc_spider.py76
-rw-r--r--scripts/cron.sh23
-rw-r--r--scripts/database_updater.py78
-rw-r--r--scripts/split_json.py20
9 files changed, 316 insertions, 0 deletions
diff --git a/scripts/crawler/scrapy.cfg b/scripts/crawler/scrapy.cfg
new file mode 100644
index 0000000..b99853f
--- /dev/null
+++ b/scripts/crawler/scrapy.cfg
@@ -0,0 +1,11 @@
+# Automatically created by: scrapy startproject
+#
+# For more information about the [deploy] section see:
+# https://scrapyd.readthedocs.org/en/latest/deploy.html
+
+[settings]
+default = tbc_web_crawler.settings
+
+[deploy]
+#url = http://localhost:6800/
+project = tbc_web_crawler
diff --git a/scripts/crawler/tbc_web_crawler/__init__.py b/scripts/crawler/tbc_web_crawler/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/scripts/crawler/tbc_web_crawler/__init__.py
diff --git a/scripts/crawler/tbc_web_crawler/settings.py b/scripts/crawler/tbc_web_crawler/settings.py
new file mode 100644
index 0000000..03ba836
--- /dev/null
+++ b/scripts/crawler/tbc_web_crawler/settings.py
@@ -0,0 +1,86 @@
+# -*- coding: utf-8 -*-
+
+# Scrapy settings for tbc_web_crawler project
+#
+# For simplicity, this file contains only settings considered important or
+# commonly used. You can find more settings consulting the documentation:
+#
+# http://doc.scrapy.org/en/latest/topics/settings.html
+# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
+# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
+
+BOT_NAME = 'tbc_web_crawler'
+
+SPIDER_MODULES = ['tbc_web_crawler.spiders']
+NEWSPIDER_MODULE = 'tbc_web_crawler.spiders'
+
+
+# Crawl responsibly by identifying yourself (and your website) on the user-agent
+#USER_AGENT = 'tbc_web_crawler (+http://www.yourdomain.com)'
+
+# Configure maximum concurrent requests performed by Scrapy (default: 16)
+CONCURRENT_REQUESTS=100
+
+# Configure a delay for requests for the same website (default: 0)
+# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
+# See also autothrottle settings and docs
+#DOWNLOAD_DELAY=3
+# The download delay setting will honor only one of:
+#CONCURRENT_REQUESTS_PER_DOMAIN=16
+#CONCURRENT_REQUESTS_PER_IP=16
+
+# Disable cookies (enabled by default)
+#COOKIES_ENABLED=False
+
+# Disable Telnet Console (enabled by default)
+#TELNETCONSOLE_ENABLED=False
+
+# Override the default request headers:
+#DEFAULT_REQUEST_HEADERS = {
+# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
+# 'Accept-Language': 'en',
+#}
+
+# Enable or disable spider middlewares
+# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
+#SPIDER_MIDDLEWARES = {
+# 'tbc_web_crawler.middlewares.MyCustomSpiderMiddleware': 543,
+#}
+
+# Enable or disable downloader middlewares
+# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
+#DOWNLOADER_MIDDLEWARES = {
+ #'scrapy.downloadermiddlewares.retry.RetryMiddleware': None
+# 'tbc_web_crawler.middlewares.MyCustomDownloaderMiddleware': 543,
+#}
+
+# Enable or disable extensions
+# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
+#EXTENSIONS = {
+# 'scrapy.telnet.TelnetConsole': None,
+#}
+
+# Configure item pipelines
+# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
+#ITEM_PIPELINES = {
+# 'tbc_web_crawler.pipelines.SomePipeline': 300,
+#}
+
+# Enable and configure the AutoThrottle extension (disabled by default)
+# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
+# NOTE: AutoThrottle will honour the standard settings for concurrency and delay
+#AUTOTHROTTLE_ENABLED=True
+# The initial download delay
+#AUTOTHROTTLE_START_DELAY=5
+# The maximum download delay to be set in case of high latencies
+#AUTOTHROTTLE_MAX_DELAY=60
+# Enable showing throttling stats for every response received:
+#AUTOTHROTTLE_DEBUG=False
+
+# Enable and configure HTTP caching (disabled by default)
+# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
+#HTTPCACHE_ENABLED=True
+#HTTPCACHE_EXPIRATION_SECS=0
+#HTTPCACHE_DIR='httpcache'
+#HTTPCACHE_IGNORE_HTTP_CODES=[]
+#HTTPCACHE_STORAGE='scrapy.extensions.httpcache.FilesystemCacheStorage'
diff --git a/scripts/crawler/tbc_web_crawler/spiders/__init__.py b/scripts/crawler/tbc_web_crawler/spiders/__init__.py
new file mode 100644
index 0000000..ebd689a
--- /dev/null
+++ b/scripts/crawler/tbc_web_crawler/spiders/__init__.py
@@ -0,0 +1,4 @@
+# This package will contain the spiders of your Scrapy project
+#
+# Please refer to the documentation for information on how to create and manage
+# your spiders.
diff --git a/scripts/crawler/tbc_web_crawler/spiders/items.py b/scripts/crawler/tbc_web_crawler/spiders/items.py
new file mode 100644
index 0000000..9dda20f
--- /dev/null
+++ b/scripts/crawler/tbc_web_crawler/spiders/items.py
@@ -0,0 +1,18 @@
+import scrapy
+
+
+class TbcErrorItems(scrapy.Item):
+
+
+ chapter_name = scrapy.Field()
+ chapter_urls = scrapy.Field()
+ completed_book_urls = scrapy.Field()
+ number_of_errors = scrapy.Field()
+ error_messages = scrapy.Field()
+
+
+
+class TbcBrokenItems(scrapy.Item):
+
+ broken_url = scrapy.Field()
+ broken_status = scrapy.Field()
diff --git a/scripts/crawler/tbc_web_crawler/spiders/tbc_spider.py b/scripts/crawler/tbc_web_crawler/spiders/tbc_spider.py
new file mode 100644
index 0000000..9688e70
--- /dev/null
+++ b/scripts/crawler/tbc_web_crawler/spiders/tbc_spider.py
@@ -0,0 +1,76 @@
+import scrapy
+from items import TbcErrorItems, TbcBrokenItems
+from scrapy.utils.response import get_base_url
+from scrapy.utils.url import urljoin_rfc
+from scrapy.http import Request
+
+import os, json
+
+if os.path.isfile('items.json'):
+ os.remove('items.json')
+else:
+ pass
+
+class TbcSpider(scrapy.Spider):
+
+ name = "tbc_spider" # Name of the crawler. Use this name when crawling from the terminal, for eg - scrapy crawl tbc_spider
+
+ start_urls = ["http://tbc-python.fossee.aero.iitb.ac.in/completed-books/"]
+ handle_httpstatus_list = [404, 500, 502] # A list containing HTTP error codes.
+
+ def parse(self,response):
+ """ This function looks for book links and returns the url"""
+
+ for book_link in response.xpath('//a[contains(@href,"book-details")]/@href').extract():
+ """ Searches for links with "book-details" in it """
+
+ first_base_url = get_base_url(response)
+ first_relative_url = urljoin_rfc(first_base_url,book_link)
+ """creates a url to be returned to the next function."""
+
+ yield scrapy.Request(first_relative_url,callback=self.parse_book_contents)
+
+
+
+ def parse_book_contents(self, response):
+
+ """ This function looks for chapter links through each book link and returns the url"""
+
+ for chapter_link in response.xpath ('//a[contains(@href,"convert-notebook")]/@href').extract():
+ """ Searches for chapters in each book list"""
+ second_base_url = get_base_url(response).split('/book-details')[0]
+ second_relative_url = urljoin_rfc(second_base_url,chapter_link)
+ """creates a url to be returned to the next function."""
+
+ yield scrapy.Request(second_relative_url,callback=self.parse_chapter_details)
+
+
+
+ def parse_chapter_details(self, response):
+
+ if not response.xpath('//h1/text()').extract():
+ chapter_details = [response.url]
+ else:
+ chapter_details = response.xpath('//h1/text()').extract()
+
+
+ error_tag = response.xpath('//div[@class="output_subarea output_text output_error"]')
+ error_list = [error_notifications for error_notifications \
+ in response.xpath \
+ ('//div[@class="output_subarea output_text output_error"]/span/text()').extract()]
+
+ if response.status in self.handle_httpstatus_list:
+ broken_items = TbcBrokenItems()
+ broken_items['broken_url'] = response.url
+ broken_items['broken_status'] = response.status
+ yield broken_items
+ else:
+ if len(error_tag) != 0:
+ items = TbcErrorItems()
+ items ['chapter_name'] = chapter_details[0]
+ items ['chapter_urls'] = response.url
+ items ['number_of_errors'] = len (error_tag)
+ #items ['completed_book_urls'] = response.request.headers.get('Referer', None)
+ #items ['error_messages'] = error_list
+ yield items
+
diff --git a/scripts/cron.sh b/scripts/cron.sh
new file mode 100644
index 0000000..bf219be
--- /dev/null
+++ b/scripts/cron.sh
@@ -0,0 +1,23 @@
+#!/usr/bin/env bash
+
+DIR="$( cd "$( dirname "$0" )" && pwd )"
+cd $DIR
+
+python database_updater.py
+
+source ../../../bin/activate
+# this is for the test server. Might differ on different machines. Ideally it should be "source ../../bin/activate"
+
+
+
+cd crawler/
+
+scrapy crawl tbc_spider -o items.json -t json
+#sadly scrapy can only be run in the folders containing scrapy.cfg
+
+cd ../.
+
+python split_json.py
+
+deactivate
+
diff --git a/scripts/database_updater.py b/scripts/database_updater.py
new file mode 100644
index 0000000..71813ea
--- /dev/null
+++ b/scripts/database_updater.py
@@ -0,0 +1,78 @@
+import os
+import sys
+
+os.environ.setdefault("DJANGO_SETTINGS_MODULE", "PythonTBC.settings")
+base_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+sys.path.append(base_path)
+
+from commentingapp.models import Url, Comments
+from commentingapp.commenting_new import DisqusCommenting
+from tbc.models import Book, Chapters
+from django.contrib.auth.models import User
+
+class CronForCommenting(object):
+
+ def fetch_comments_from_script(self):
+ """ Fetches comment from Commenting script"""
+
+ commenting_instance = DisqusCommenting()
+ check_net = commenting_instance.check_internet_connection()
+ check_auth = commenting_instance.check_authentication("enter your disqus api public key here",
+ "enter your forum name here"
+ )
+ thread = commenting_instance.get_thread_ids()
+ self.comments_for_db = commenting_instance.get_comments()
+
+ return self.comments_for_db
+
+
+
+ def add_comments_to_db(self):
+
+ if not Url.objects.exists():
+ """ Populates the db if empty"""
+ for comment_details in self.comments_for_db:
+ url_instance = Url(url = comment_details["chapter_urls"]) #url_instance is actually an object
+ url_instance.save()
+ for comment in comment_details["comment_list"]:
+ Comments.objects.create(url = url_instance, comments = comment)
+ return "Database is created"
+
+ else:
+ """ if the db isnt empty"""
+ for comment_details in self.comments_for_db:
+ url_object, url_status = Url.objects.get_or_create(url = comment_details["chapter_urls"])
+ url_primary_key = url_object.pk
+ for comment in comment_details["comment_list"]:
+ Comments.objects.get_or_create(comments = comment, url_id = url_primary_key)
+ return "Database is updated."
+
+
+ def delete_redundant_comments(self):
+ "delete urls that have no comments in them anymore"
+
+ url_list = [urls["chapter_urls"] for urls in self.comments_for_db]
+ url_list_db = Url.objects.values_list("url", flat = True)
+ url_difference = set(url_list_db)-set(url_list)
+ for delete_url in url_difference:
+ Url.objects.filter(url = delete_url).delete()
+
+ "delete comments that have been deleted from tbc notebooks"
+ for comment_details in self.comments_for_db:
+ url_instance = Url.objects.get(url = comment_details["chapter_urls"])
+ comment_list_db = url_instance.comments_set.values_list("comments", flat = True)
+ redundant_comment_list = set(comment_list_db)-set(comment_details["comment_list"])
+ for delete_comment in redundant_comment_list:
+ url_instance.comments_set.filter(comments = delete_comment).delete()
+ return "Redundant Comments deleted."
+
+
+
+if __name__ == '__main__':
+
+ a = CronForCommenting()
+ b = a.fetch_comments_from_script()
+ c = a.add_comments_to_db() #This should always be before delete_redundant_comments
+ d = a.delete_redundant_comments() #This should always be after add_comments_to_db
+ print c
+ print d
diff --git a/scripts/split_json.py b/scripts/split_json.py
new file mode 100644
index 0000000..baa0b90
--- /dev/null
+++ b/scripts/split_json.py
@@ -0,0 +1,20 @@
+import cPickle
+import json
+from os.path import dirname, abspath,join
+try:
+ with open('crawler/items.json', "r") as json_dump:
+ json_data = json.load(json_dump)
+ json_dump.close()
+ a = [saved_data for saved_data in json_data if str(saved_data).startswith("{u'ch")]
+ with open(join(dirname(abspath(dirname(__file__))),'tbc_error_page/error.pickle'), "w+") as error_json:
+ cPickle.dump(a, error_json)
+ error_json.close()
+
+ b = [saved_data for saved_data in json_data if str(saved_data).startswith("{u'br")]
+ with open(join(dirname(abspath(dirname(__file__))),'tbc_error_page/broken.pickle'), "w+") as broken_json:
+ cPickle.dump(b, broken_json)
+ broken_json.close()
+
+
+except ValueError:
+ print "Couldn't find file"