summaryrefslogtreecommitdiff
path: root/lib/python2.7/site-packages/django/http
diff options
context:
space:
mode:
authorttt2017-05-13 00:29:47 +0530
committerttt2017-05-13 00:29:47 +0530
commit4336f5f06f61de30ae3fa54650fce63a9d5ef5be (patch)
tree23b4ee9b8e8f24bf732acf2f7ad22ed50cdd5670 /lib/python2.7/site-packages/django/http
downloadSBHS-2018-Rpi-4336f5f06f61de30ae3fa54650fce63a9d5ef5be.tar.gz
SBHS-2018-Rpi-4336f5f06f61de30ae3fa54650fce63a9d5ef5be.tar.bz2
SBHS-2018-Rpi-4336f5f06f61de30ae3fa54650fce63a9d5ef5be.zip
added all server files
Diffstat (limited to 'lib/python2.7/site-packages/django/http')
-rw-r--r--lib/python2.7/site-packages/django/http/__init__.py10
-rw-r--r--lib/python2.7/site-packages/django/http/cookie.py86
-rw-r--r--lib/python2.7/site-packages/django/http/multipartparser.py633
-rw-r--r--lib/python2.7/site-packages/django/http/request.py514
-rw-r--r--lib/python2.7/site-packages/django/http/response.py518
-rw-r--r--lib/python2.7/site-packages/django/http/utils.py96
6 files changed, 1857 insertions, 0 deletions
diff --git a/lib/python2.7/site-packages/django/http/__init__.py b/lib/python2.7/site-packages/django/http/__init__.py
new file mode 100644
index 0000000..46afa34
--- /dev/null
+++ b/lib/python2.7/site-packages/django/http/__init__.py
@@ -0,0 +1,10 @@
+from django.http.cookie import SimpleCookie, parse_cookie
+from django.http.request import (HttpRequest, QueryDict, UnreadablePostError,
+ build_request_repr)
+from django.http.response import (HttpResponse, StreamingHttpResponse,
+ CompatibleStreamingHttpResponse, HttpResponsePermanentRedirect,
+ HttpResponseRedirect, HttpResponseNotModified, HttpResponseBadRequest,
+ HttpResponseForbidden, HttpResponseNotFound, HttpResponseNotAllowed,
+ HttpResponseGone, HttpResponseServerError, Http404, BadHeaderError)
+from django.http.utils import (fix_location_header, conditional_content_removal,
+ fix_IE_for_attach, fix_IE_for_vary)
diff --git a/lib/python2.7/site-packages/django/http/cookie.py b/lib/python2.7/site-packages/django/http/cookie.py
new file mode 100644
index 0000000..b0b5350
--- /dev/null
+++ b/lib/python2.7/site-packages/django/http/cookie.py
@@ -0,0 +1,86 @@
+from __future__ import absolute_import, unicode_literals
+
+from django.utils.encoding import force_str
+from django.utils import six
+from django.utils.six.moves import http_cookies
+
+
+# Some versions of Python 2.7 and later won't need this encoding bug fix:
+_cookie_encodes_correctly = http_cookies.SimpleCookie().value_encode(';') == (';', '"\\073"')
+# See ticket #13007, http://bugs.python.org/issue2193 and http://trac.edgewall.org/ticket/2256
+_tc = http_cookies.SimpleCookie()
+try:
+ _tc.load(str('foo:bar=1'))
+ _cookie_allows_colon_in_names = True
+except http_cookies.CookieError:
+ _cookie_allows_colon_in_names = False
+
+if _cookie_encodes_correctly and _cookie_allows_colon_in_names:
+ SimpleCookie = http_cookies.SimpleCookie
+else:
+ Morsel = http_cookies.Morsel
+
+ class SimpleCookie(http_cookies.SimpleCookie):
+ if not _cookie_encodes_correctly:
+ def value_encode(self, val):
+ # Some browsers do not support quoted-string from RFC 2109,
+ # including some versions of Safari and Internet Explorer.
+ # These browsers split on ';', and some versions of Safari
+ # are known to split on ', '. Therefore, we encode ';' and ','
+
+ # SimpleCookie already does the hard work of encoding and decoding.
+ # It uses octal sequences like '\\012' for newline etc.
+ # and non-ASCII chars. We just make use of this mechanism, to
+ # avoid introducing two encoding schemes which would be confusing
+ # and especially awkward for javascript.
+
+ # NB, contrary to Python docs, value_encode returns a tuple containing
+ # (real val, encoded_val)
+ val, encoded = super(SimpleCookie, self).value_encode(val)
+
+ encoded = encoded.replace(";", "\\073").replace(",","\\054")
+ # If encoded now contains any quoted chars, we need double quotes
+ # around the whole string.
+ if "\\" in encoded and not encoded.startswith('"'):
+ encoded = '"' + encoded + '"'
+
+ return val, encoded
+
+ if not _cookie_allows_colon_in_names:
+ def load(self, rawdata):
+ self.bad_cookies = set()
+ if six.PY2 and isinstance(rawdata, six.text_type):
+ rawdata = force_str(rawdata)
+ super(SimpleCookie, self).load(rawdata)
+ for key in self.bad_cookies:
+ del self[key]
+
+ # override private __set() method:
+ # (needed for using our Morsel, and for laxness with CookieError
+ def _BaseCookie__set(self, key, real_value, coded_value):
+ key = force_str(key)
+ try:
+ M = self.get(key, Morsel())
+ M.set(key, real_value, coded_value)
+ dict.__setitem__(self, key, M)
+ except http_cookies.CookieError:
+ self.bad_cookies.add(key)
+ dict.__setitem__(self, key, http_cookies.Morsel())
+
+
+def parse_cookie(cookie):
+ if cookie == '':
+ return {}
+ if not isinstance(cookie, http_cookies.BaseCookie):
+ try:
+ c = SimpleCookie()
+ c.load(cookie)
+ except http_cookies.CookieError:
+ # Invalid cookie
+ return {}
+ else:
+ c = cookie
+ cookiedict = {}
+ for key in c.keys():
+ cookiedict[key] = c.get(key).value
+ return cookiedict
diff --git a/lib/python2.7/site-packages/django/http/multipartparser.py b/lib/python2.7/site-packages/django/http/multipartparser.py
new file mode 100644
index 0000000..eeb435f
--- /dev/null
+++ b/lib/python2.7/site-packages/django/http/multipartparser.py
@@ -0,0 +1,633 @@
+"""
+Multi-part parsing for file uploads.
+
+Exposes one class, ``MultiPartParser``, which feeds chunks of uploaded data to
+file upload handlers for processing.
+"""
+from __future__ import unicode_literals
+
+import base64
+import cgi
+import sys
+
+from django.conf import settings
+from django.core.exceptions import SuspiciousMultipartForm
+from django.utils.datastructures import MultiValueDict
+from django.utils.encoding import force_text
+from django.utils import six
+from django.utils.text import unescape_entities
+from django.core.files.uploadhandler import StopUpload, SkipFile, StopFutureHandlers
+
+__all__ = ('MultiPartParser', 'MultiPartParserError', 'InputStreamExhausted')
+
+class MultiPartParserError(Exception):
+ pass
+
+class InputStreamExhausted(Exception):
+ """
+ No more reads are allowed from this device.
+ """
+ pass
+
+RAW = "raw"
+FILE = "file"
+FIELD = "field"
+
+class MultiPartParser(object):
+ """
+ A rfc2388 multipart/form-data parser.
+
+ ``MultiValueDict.parse()`` reads the input stream in ``chunk_size`` chunks
+ and returns a tuple of ``(MultiValueDict(POST), MultiValueDict(FILES))``.
+ """
+ def __init__(self, META, input_data, upload_handlers, encoding=None):
+ """
+ Initialize the MultiPartParser object.
+
+ :META:
+ The standard ``META`` dictionary in Django request objects.
+ :input_data:
+ The raw post data, as a file-like object.
+ :upload_handlers:
+ A list of UploadHandler instances that perform operations on the uploaded
+ data.
+ :encoding:
+ The encoding with which to treat the incoming data.
+ """
+
+ #
+ # Content-Type should containt multipart and the boundary information.
+ #
+
+ content_type = META.get('HTTP_CONTENT_TYPE', META.get('CONTENT_TYPE', ''))
+ if not content_type.startswith('multipart/'):
+ raise MultiPartParserError('Invalid Content-Type: %s' % content_type)
+
+ # Parse the header to get the boundary to split the parts.
+ ctypes, opts = parse_header(content_type.encode('ascii'))
+ boundary = opts.get('boundary')
+ if not boundary or not cgi.valid_boundary(boundary):
+ raise MultiPartParserError('Invalid boundary in multipart: %s' % boundary)
+
+ # Content-Length should contain the length of the body we are about
+ # to receive.
+ try:
+ content_length = int(META.get('HTTP_CONTENT_LENGTH', META.get('CONTENT_LENGTH', 0)))
+ except (ValueError, TypeError):
+ content_length = 0
+
+ if content_length < 0:
+ # This means we shouldn't continue...raise an error.
+ raise MultiPartParserError("Invalid content length: %r" % content_length)
+
+ if isinstance(boundary, six.text_type):
+ boundary = boundary.encode('ascii')
+ self._boundary = boundary
+ self._input_data = input_data
+
+ # For compatibility with low-level network APIs (with 32-bit integers),
+ # the chunk size should be < 2^31, but still divisible by 4.
+ possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size]
+ self._chunk_size = min([2**31-4] + possible_sizes)
+
+ self._meta = META
+ self._encoding = encoding or settings.DEFAULT_CHARSET
+ self._content_length = content_length
+ self._upload_handlers = upload_handlers
+
+ def parse(self):
+ """
+ Parse the POST data and break it into a FILES MultiValueDict and a POST
+ MultiValueDict.
+
+ Returns a tuple containing the POST and FILES dictionary, respectively.
+ """
+ # We have to import QueryDict down here to avoid a circular import.
+ from django.http import QueryDict
+
+ encoding = self._encoding
+ handlers = self._upload_handlers
+
+ # HTTP spec says that Content-Length >= 0 is valid
+ # handling content-length == 0 before continuing
+ if self._content_length == 0:
+ return QueryDict('', encoding=self._encoding), MultiValueDict()
+
+ # See if any of the handlers take care of the parsing.
+ # This allows overriding everything if need be.
+ for handler in handlers:
+ result = handler.handle_raw_input(self._input_data,
+ self._meta,
+ self._content_length,
+ self._boundary,
+ encoding)
+ #Check to see if it was handled
+ if result is not None:
+ return result[0], result[1]
+
+ # Create the data structures to be used later.
+ self._post = QueryDict('', mutable=True)
+ self._files = MultiValueDict()
+
+ # Instantiate the parser and stream:
+ stream = LazyStream(ChunkIter(self._input_data, self._chunk_size))
+
+ # Whether or not to signal a file-completion at the beginning of the loop.
+ old_field_name = None
+ counters = [0] * len(handlers)
+
+ try:
+ for item_type, meta_data, field_stream in Parser(stream, self._boundary):
+ if old_field_name:
+ # We run this at the beginning of the next loop
+ # since we cannot be sure a file is complete until
+ # we hit the next boundary/part of the multipart content.
+ self.handle_file_complete(old_field_name, counters)
+ old_field_name = None
+
+ try:
+ disposition = meta_data['content-disposition'][1]
+ field_name = disposition['name'].strip()
+ except (KeyError, IndexError, AttributeError):
+ continue
+
+ transfer_encoding = meta_data.get('content-transfer-encoding')
+ if transfer_encoding is not None:
+ transfer_encoding = transfer_encoding[0].strip()
+ field_name = force_text(field_name, encoding, errors='replace')
+
+ if item_type == FIELD:
+ # This is a post field, we can just set it in the post
+ if transfer_encoding == 'base64':
+ raw_data = field_stream.read()
+ try:
+ data = str(raw_data).decode('base64')
+ except:
+ data = raw_data
+ else:
+ data = field_stream.read()
+
+ self._post.appendlist(field_name,
+ force_text(data, encoding, errors='replace'))
+ elif item_type == FILE:
+ # This is a file, use the handler...
+ file_name = disposition.get('filename')
+ if not file_name:
+ continue
+ file_name = force_text(file_name, encoding, errors='replace')
+ file_name = self.IE_sanitize(unescape_entities(file_name))
+
+ content_type = meta_data.get('content-type', ('',))[0].strip()
+ try:
+ charset = meta_data.get('content-type', (0, {}))[1].get('charset', None)
+ except:
+ charset = None
+
+ try:
+ content_length = int(meta_data.get('content-length')[0])
+ except (IndexError, TypeError, ValueError):
+ content_length = None
+
+ counters = [0] * len(handlers)
+ try:
+ for handler in handlers:
+ try:
+ handler.new_file(field_name, file_name,
+ content_type, content_length,
+ charset)
+ except StopFutureHandlers:
+ break
+
+ for chunk in field_stream:
+ if transfer_encoding == 'base64':
+ # We only special-case base64 transfer encoding
+ # We should always read base64 streams by multiple of 4
+ over_bytes = len(chunk) % 4
+ if over_bytes:
+ over_chunk = field_stream.read(4 - over_bytes)
+ chunk += over_chunk
+
+ try:
+ chunk = base64.b64decode(chunk)
+ except Exception as e:
+ # Since this is only a chunk, any error is an unfixable error.
+ msg = "Could not decode base64 data: %r" % e
+ six.reraise(MultiPartParserError, MultiPartParserError(msg), sys.exc_info()[2])
+
+ for i, handler in enumerate(handlers):
+ chunk_length = len(chunk)
+ chunk = handler.receive_data_chunk(chunk,
+ counters[i])
+ counters[i] += chunk_length
+ if chunk is None:
+ # If the chunk received by the handler is None, then don't continue.
+ break
+
+ except SkipFile:
+ # Just use up the rest of this file...
+ exhaust(field_stream)
+ else:
+ # Handle file upload completions on next iteration.
+ old_field_name = field_name
+ else:
+ # If this is neither a FIELD or a FILE, just exhaust the stream.
+ exhaust(stream)
+ except StopUpload as e:
+ if not e.connection_reset:
+ exhaust(self._input_data)
+ else:
+ # Make sure that the request data is all fed
+ exhaust(self._input_data)
+
+ # Signal that the upload has completed.
+ for handler in handlers:
+ retval = handler.upload_complete()
+ if retval:
+ break
+
+ return self._post, self._files
+
+ def handle_file_complete(self, old_field_name, counters):
+ """
+ Handle all the signalling that takes place when a file is complete.
+ """
+ for i, handler in enumerate(self._upload_handlers):
+ file_obj = handler.file_complete(counters[i])
+ if file_obj:
+ # If it returns a file object, then set the files dict.
+ self._files.appendlist(force_text(old_field_name,
+ self._encoding,
+ errors='replace'),
+ file_obj)
+ break
+
+ def IE_sanitize(self, filename):
+ """Cleanup filename from Internet Explorer full paths."""
+ return filename and filename[filename.rfind("\\")+1:].strip()
+
+class LazyStream(six.Iterator):
+ """
+ The LazyStream wrapper allows one to get and "unget" bytes from a stream.
+
+ Given a producer object (an iterator that yields bytestrings), the
+ LazyStream object will support iteration, reading, and keeping a "look-back"
+ variable in case you need to "unget" some bytes.
+ """
+ def __init__(self, producer, length=None):
+ """
+ Every LazyStream must have a producer when instantiated.
+
+ A producer is an iterable that returns a string each time it
+ is called.
+ """
+ self._producer = producer
+ self._empty = False
+ self._leftover = b''
+ self.length = length
+ self.position = 0
+ self._remaining = length
+ self._unget_history = []
+
+ def tell(self):
+ return self.position
+
+ def read(self, size=None):
+ def parts():
+ remaining = self._remaining if size is None else size
+ # do the whole thing in one shot if no limit was provided.
+ if remaining is None:
+ yield b''.join(self)
+ return
+
+ # otherwise do some bookkeeping to return exactly enough
+ # of the stream and stashing any extra content we get from
+ # the producer
+ while remaining != 0:
+ assert remaining > 0, 'remaining bytes to read should never go negative'
+
+ chunk = next(self)
+
+ emitting = chunk[:remaining]
+ self.unget(chunk[remaining:])
+ remaining -= len(emitting)
+ yield emitting
+
+ out = b''.join(parts())
+ return out
+
+ def __next__(self):
+ """
+ Used when the exact number of bytes to read is unimportant.
+
+ This procedure just returns whatever is chunk is conveniently returned
+ from the iterator instead. Useful to avoid unnecessary bookkeeping if
+ performance is an issue.
+ """
+ if self._leftover:
+ output = self._leftover
+ self._leftover = b''
+ else:
+ output = next(self._producer)
+ self._unget_history = []
+ self.position += len(output)
+ return output
+
+ def close(self):
+ """
+ Used to invalidate/disable this lazy stream.
+
+ Replaces the producer with an empty list. Any leftover bytes that have
+ already been read will still be reported upon read() and/or next().
+ """
+ self._producer = []
+
+ def __iter__(self):
+ return self
+
+ def unget(self, bytes):
+ """
+ Places bytes back onto the front of the lazy stream.
+
+ Future calls to read() will return those bytes first. The
+ stream position and thus tell() will be rewound.
+ """
+ if not bytes:
+ return
+ self._update_unget_history(len(bytes))
+ self.position -= len(bytes)
+ self._leftover = b''.join([bytes, self._leftover])
+
+ def _update_unget_history(self, num_bytes):
+ """
+ Updates the unget history as a sanity check to see if we've pushed
+ back the same number of bytes in one chunk. If we keep ungetting the
+ same number of bytes many times (here, 50), we're mostly likely in an
+ infinite loop of some sort. This is usually caused by a
+ maliciously-malformed MIME request.
+ """
+ self._unget_history = [num_bytes] + self._unget_history[:49]
+ number_equal = len([current_number for current_number in self._unget_history
+ if current_number == num_bytes])
+
+ if number_equal > 40:
+ raise SuspiciousMultipartForm(
+ "The multipart parser got stuck, which shouldn't happen with"
+ " normal uploaded files. Check for malicious upload activity;"
+ " if there is none, report this to the Django developers."
+ )
+
+class ChunkIter(six.Iterator):
+ """
+ An iterable that will yield chunks of data. Given a file-like object as the
+ constructor, this object will yield chunks of read operations from that
+ object.
+ """
+ def __init__(self, flo, chunk_size=64 * 1024):
+ self.flo = flo
+ self.chunk_size = chunk_size
+
+ def __next__(self):
+ try:
+ data = self.flo.read(self.chunk_size)
+ except InputStreamExhausted:
+ raise StopIteration()
+ if data:
+ return data
+ else:
+ raise StopIteration()
+
+ def __iter__(self):
+ return self
+
+class InterBoundaryIter(six.Iterator):
+ """
+ A Producer that will iterate over boundaries.
+ """
+ def __init__(self, stream, boundary):
+ self._stream = stream
+ self._boundary = boundary
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ try:
+ return LazyStream(BoundaryIter(self._stream, self._boundary))
+ except InputStreamExhausted:
+ raise StopIteration()
+
+class BoundaryIter(six.Iterator):
+ """
+ A Producer that is sensitive to boundaries.
+
+ Will happily yield bytes until a boundary is found. Will yield the bytes
+ before the boundary, throw away the boundary bytes themselves, and push the
+ post-boundary bytes back on the stream.
+
+ The future calls to next() after locating the boundary will raise a
+ StopIteration exception.
+ """
+
+ def __init__(self, stream, boundary):
+ self._stream = stream
+ self._boundary = boundary
+ self._done = False
+ # rollback an additional six bytes because the format is like
+ # this: CRLF<boundary>[--CRLF]
+ self._rollback = len(boundary) + 6
+
+ # Try to use mx fast string search if available. Otherwise
+ # use Python find. Wrap the latter for consistency.
+ unused_char = self._stream.read(1)
+ if not unused_char:
+ raise InputStreamExhausted()
+ self._stream.unget(unused_char)
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ if self._done:
+ raise StopIteration()
+
+ stream = self._stream
+ rollback = self._rollback
+
+ bytes_read = 0
+ chunks = []
+ for bytes in stream:
+ bytes_read += len(bytes)
+ chunks.append(bytes)
+ if bytes_read > rollback:
+ break
+ if not bytes:
+ break
+ else:
+ self._done = True
+
+ if not chunks:
+ raise StopIteration()
+
+ chunk = b''.join(chunks)
+ boundary = self._find_boundary(chunk, len(chunk) < self._rollback)
+
+ if boundary:
+ end, next = boundary
+ stream.unget(chunk[next:])
+ self._done = True
+ return chunk[:end]
+ else:
+ # make sure we dont treat a partial boundary (and
+ # its separators) as data
+ if not chunk[:-rollback]:# and len(chunk) >= (len(self._boundary) + 6):
+ # There's nothing left, we should just return and mark as done.
+ self._done = True
+ return chunk
+ else:
+ stream.unget(chunk[-rollback:])
+ return chunk[:-rollback]
+
+ def _find_boundary(self, data, eof = False):
+ """
+ Finds a multipart boundary in data.
+
+ Should no boundry exist in the data None is returned instead. Otherwise
+ a tuple containing the indices of the following are returned:
+
+ * the end of current encapsulation
+ * the start of the next encapsulation
+ """
+ index = data.find(self._boundary)
+ if index < 0:
+ return None
+ else:
+ end = index
+ next = index + len(self._boundary)
+ # backup over CRLF
+ last = max(0, end-1)
+ if data[last:last+1] == b'\n':
+ end -= 1
+ last = max(0, end-1)
+ if data[last:last+1] == b'\r':
+ end -= 1
+ return end, next
+
+def exhaust(stream_or_iterable):
+ """
+ Completely exhausts an iterator or stream.
+
+ Raise a MultiPartParserError if the argument is not a stream or an iterable.
+ """
+ iterator = None
+ try:
+ iterator = iter(stream_or_iterable)
+ except TypeError:
+ iterator = ChunkIter(stream_or_iterable, 16384)
+
+ if iterator is None:
+ raise MultiPartParserError('multipartparser.exhaust() was passed a non-iterable or stream parameter')
+
+ for __ in iterator:
+ pass
+
+def parse_boundary_stream(stream, max_header_size):
+ """
+ Parses one and exactly one stream that encapsulates a boundary.
+ """
+ # Stream at beginning of header, look for end of header
+ # and parse it if found. The header must fit within one
+ # chunk.
+ chunk = stream.read(max_header_size)
+
+ # 'find' returns the top of these four bytes, so we'll
+ # need to munch them later to prevent them from polluting
+ # the payload.
+ header_end = chunk.find(b'\r\n\r\n')
+
+ def _parse_header(line):
+ main_value_pair, params = parse_header(line)
+ try:
+ name, value = main_value_pair.split(':', 1)
+ except:
+ raise ValueError("Invalid header: %r" % line)
+ return name, (value, params)
+
+ if header_end == -1:
+ # we find no header, so we just mark this fact and pass on
+ # the stream verbatim
+ stream.unget(chunk)
+ return (RAW, {}, stream)
+
+ header = chunk[:header_end]
+
+ # here we place any excess chunk back onto the stream, as
+ # well as throwing away the CRLFCRLF bytes from above.
+ stream.unget(chunk[header_end + 4:])
+
+ TYPE = RAW
+ outdict = {}
+
+ # Eliminate blank lines
+ for line in header.split(b'\r\n'):
+ # This terminology ("main value" and "dictionary of
+ # parameters") is from the Python docs.
+ try:
+ name, (value, params) = _parse_header(line)
+ except:
+ continue
+
+ if name == 'content-disposition':
+ TYPE = FIELD
+ if params.get('filename'):
+ TYPE = FILE
+
+ outdict[name] = value, params
+
+ if TYPE == RAW:
+ stream.unget(chunk)
+
+ return (TYPE, outdict, stream)
+
+class Parser(object):
+ def __init__(self, stream, boundary):
+ self._stream = stream
+ self._separator = b'--' + boundary
+
+ def __iter__(self):
+ boundarystream = InterBoundaryIter(self._stream, self._separator)
+ for sub_stream in boundarystream:
+ # Iterate over each part
+ yield parse_boundary_stream(sub_stream, 1024)
+
+def parse_header(line):
+ """ Parse the header into a key-value.
+ Input (line): bytes, output: unicode for key/name, bytes for value which
+ will be decoded later
+ """
+ plist = _parse_header_params(b';' + line)
+ key = plist.pop(0).lower().decode('ascii')
+ pdict = {}
+ for p in plist:
+ i = p.find(b'=')
+ if i >= 0:
+ name = p[:i].strip().lower().decode('ascii')
+ value = p[i+1:].strip()
+ if len(value) >= 2 and value[:1] == value[-1:] == b'"':
+ value = value[1:-1]
+ value = value.replace(b'\\\\', b'\\').replace(b'\\"', b'"')
+ pdict[name] = value
+ return key, pdict
+
+def _parse_header_params(s):
+ plist = []
+ while s[:1] == b';':
+ s = s[1:]
+ end = s.find(b';')
+ while end > 0 and s.count(b'"', 0, end) % 2:
+ end = s.find(b';', end + 1)
+ if end < 0:
+ end = len(s)
+ f = s[:end]
+ plist.append(f.strip())
+ s = s[end:]
+ return plist
diff --git a/lib/python2.7/site-packages/django/http/request.py b/lib/python2.7/site-packages/django/http/request.py
new file mode 100644
index 0000000..dee6910
--- /dev/null
+++ b/lib/python2.7/site-packages/django/http/request.py
@@ -0,0 +1,514 @@
+from __future__ import absolute_import, unicode_literals
+
+import copy
+import os
+import re
+import sys
+from io import BytesIO
+from pprint import pformat
+
+from django.conf import settings
+from django.core import signing
+from django.core.exceptions import DisallowedHost, ImproperlyConfigured
+from django.core.files import uploadhandler
+from django.http.multipartparser import MultiPartParser
+from django.utils import six
+from django.utils.datastructures import MultiValueDict, ImmutableList
+from django.utils.encoding import force_bytes, force_text, force_str, iri_to_uri
+from django.utils.six.moves.urllib.parse import parse_qsl, urlencode, quote, urljoin
+
+
+RAISE_ERROR = object()
+absolute_http_url_re = re.compile(r"^https?://", re.I)
+host_validation_re = re.compile(r"^([a-z0-9.-]+|\[[a-f0-9]*:[a-f0-9:]+\])(:\d+)?$")
+
+
+class UnreadablePostError(IOError):
+ pass
+
+
+class HttpRequest(object):
+ """A basic HTTP request."""
+
+ # The encoding used in GET/POST dicts. None means use default setting.
+ _encoding = None
+ _upload_handlers = []
+
+ def __init__(self):
+ # WARNING: The `WSGIRequest` subclass doesn't call `super`.
+ # Any variable assignment made here should also happen in
+ # `WSGIRequest.__init__()`.
+
+ self.GET, self.POST, self.COOKIES, self.META, self.FILES = {}, {}, {}, {}, {}
+ self.path = ''
+ self.path_info = ''
+ self.method = None
+ self.resolver_match = None
+ self._post_parse_error = False
+
+ def __repr__(self):
+ return build_request_repr(self)
+
+ def get_host(self):
+ """Returns the HTTP host using the environment or request headers."""
+ # We try three options, in order of decreasing preference.
+ if settings.USE_X_FORWARDED_HOST and (
+ 'HTTP_X_FORWARDED_HOST' in self.META):
+ host = self.META['HTTP_X_FORWARDED_HOST']
+ elif 'HTTP_HOST' in self.META:
+ host = self.META['HTTP_HOST']
+ else:
+ # Reconstruct the host using the algorithm from PEP 333.
+ host = self.META['SERVER_NAME']
+ server_port = str(self.META['SERVER_PORT'])
+ if server_port != ('443' if self.is_secure() else '80'):
+ host = '%s:%s' % (host, server_port)
+
+ allowed_hosts = ['*'] if settings.DEBUG else settings.ALLOWED_HOSTS
+ domain, port = split_domain_port(host)
+ if domain and validate_host(domain, allowed_hosts):
+ return host
+ else:
+ msg = "Invalid HTTP_HOST header: %r." % host
+ if domain:
+ msg += "You may need to add %r to ALLOWED_HOSTS." % domain
+ raise DisallowedHost(msg)
+
+ def get_full_path(self):
+ # RFC 3986 requires query string arguments to be in the ASCII range.
+ # Rather than crash if this doesn't happen, we encode defensively.
+ return '%s%s' % (self.path, ('?' + iri_to_uri(self.META.get('QUERY_STRING', ''))) if self.META.get('QUERY_STRING', '') else '')
+
+ def get_signed_cookie(self, key, default=RAISE_ERROR, salt='', max_age=None):
+ """
+ Attempts to return a signed cookie. If the signature fails or the
+ cookie has expired, raises an exception... unless you provide the
+ default argument in which case that value will be returned instead.
+ """
+ try:
+ cookie_value = self.COOKIES[key]
+ except KeyError:
+ if default is not RAISE_ERROR:
+ return default
+ else:
+ raise
+ try:
+ value = signing.get_cookie_signer(salt=key + salt).unsign(
+ cookie_value, max_age=max_age)
+ except signing.BadSignature:
+ if default is not RAISE_ERROR:
+ return default
+ else:
+ raise
+ return value
+
+ def build_absolute_uri(self, location=None):
+ """
+ Builds an absolute URI from the location and the variables available in
+ this request. If no location is specified, the absolute URI is built on
+ ``request.get_full_path()``.
+ """
+ if not location:
+ location = self.get_full_path()
+ if not absolute_http_url_re.match(location):
+ current_uri = '%s://%s%s' % ('https' if self.is_secure() else 'http',
+ self.get_host(), self.path)
+ location = urljoin(current_uri, location)
+ return iri_to_uri(location)
+
+ def _is_secure(self):
+ return os.environ.get("HTTPS") == "on"
+
+ def is_secure(self):
+ # First, check the SECURE_PROXY_SSL_HEADER setting.
+ if settings.SECURE_PROXY_SSL_HEADER:
+ try:
+ header, value = settings.SECURE_PROXY_SSL_HEADER
+ except ValueError:
+ raise ImproperlyConfigured('The SECURE_PROXY_SSL_HEADER setting must be a tuple containing two values.')
+ if self.META.get(header, None) == value:
+ return True
+
+ # Failing that, fall back to _is_secure(), which is a hook for
+ # subclasses to implement.
+ return self._is_secure()
+
+ def is_ajax(self):
+ return self.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest'
+
+ @property
+ def encoding(self):
+ return self._encoding
+
+ @encoding.setter
+ def encoding(self, val):
+ """
+ Sets the encoding used for GET/POST accesses. If the GET or POST
+ dictionary has already been created, it is removed and recreated on the
+ next access (so that it is decoded correctly).
+ """
+ self._encoding = val
+ if hasattr(self, '_get'):
+ del self._get
+ if hasattr(self, '_post'):
+ del self._post
+
+ def _initialize_handlers(self):
+ self._upload_handlers = [uploadhandler.load_handler(handler, self)
+ for handler in settings.FILE_UPLOAD_HANDLERS]
+
+ @property
+ def upload_handlers(self):
+ if not self._upload_handlers:
+ # If there are no upload handlers defined, initialize them from settings.
+ self._initialize_handlers()
+ return self._upload_handlers
+
+ @upload_handlers.setter
+ def upload_handlers(self, upload_handlers):
+ if hasattr(self, '_files'):
+ raise AttributeError("You cannot set the upload handlers after the upload has been processed.")
+ self._upload_handlers = upload_handlers
+
+ def parse_file_upload(self, META, post_data):
+ """Returns a tuple of (POST QueryDict, FILES MultiValueDict)."""
+ self.upload_handlers = ImmutableList(
+ self.upload_handlers,
+ warning="You cannot alter upload handlers after the upload has been processed."
+ )
+ parser = MultiPartParser(META, post_data, self.upload_handlers, self.encoding)
+ return parser.parse()
+
+ @property
+ def body(self):
+ if not hasattr(self, '_body'):
+ if self._read_started:
+ raise Exception("You cannot access body after reading from request's data stream")
+ try:
+ self._body = self.read()
+ except IOError as e:
+ six.reraise(UnreadablePostError, UnreadablePostError(*e.args), sys.exc_info()[2])
+ self._stream = BytesIO(self._body)
+ return self._body
+
+ def _mark_post_parse_error(self):
+ self._post = QueryDict('')
+ self._files = MultiValueDict()
+ self._post_parse_error = True
+
+ def _load_post_and_files(self):
+ """Populate self._post and self._files if the content-type is a form type"""
+ if self.method != 'POST':
+ self._post, self._files = QueryDict('', encoding=self._encoding), MultiValueDict()
+ return
+ if self._read_started and not hasattr(self, '_body'):
+ self._mark_post_parse_error()
+ return
+
+ if self.META.get('CONTENT_TYPE', '').startswith('multipart/form-data'):
+ if hasattr(self, '_body'):
+ # Use already read data
+ data = BytesIO(self._body)
+ else:
+ data = self
+ try:
+ self._post, self._files = self.parse_file_upload(self.META, data)
+ except:
+ # An error occured while parsing POST data. Since when
+ # formatting the error the request handler might access
+ # self.POST, set self._post and self._file to prevent
+ # attempts to parse POST data again.
+ # Mark that an error occured. This allows self.__repr__ to
+ # be explicit about it instead of simply representing an
+ # empty POST
+ self._mark_post_parse_error()
+ raise
+ elif self.META.get('CONTENT_TYPE', '').startswith('application/x-www-form-urlencoded'):
+ self._post, self._files = QueryDict(self.body, encoding=self._encoding), MultiValueDict()
+ else:
+ self._post, self._files = QueryDict('', encoding=self._encoding), MultiValueDict()
+
+ ## File-like and iterator interface.
+ ##
+ ## Expects self._stream to be set to an appropriate source of bytes by
+ ## a corresponding request subclass (e.g. WSGIRequest).
+ ## Also when request data has already been read by request.POST or
+ ## request.body, self._stream points to a BytesIO instance
+ ## containing that data.
+
+ def read(self, *args, **kwargs):
+ self._read_started = True
+ try:
+ return self._stream.read(*args, **kwargs)
+ except IOError as e:
+ six.reraise(UnreadablePostError, UnreadablePostError(*e.args), sys.exc_info()[2])
+
+ def readline(self, *args, **kwargs):
+ self._read_started = True
+ try:
+ return self._stream.readline(*args, **kwargs)
+ except IOError as e:
+ six.reraise(UnreadablePostError, UnreadablePostError(*e.args), sys.exc_info()[2])
+
+ def xreadlines(self):
+ while True:
+ buf = self.readline()
+ if not buf:
+ break
+ yield buf
+
+ __iter__ = xreadlines
+
+ def readlines(self):
+ return list(iter(self))
+
+
+class QueryDict(MultiValueDict):
+ """
+ A specialized MultiValueDict that takes a query string when initialized.
+ This is immutable unless you create a copy of it.
+
+ Values retrieved from this class are converted from the given encoding
+ (DEFAULT_CHARSET by default) to unicode.
+ """
+ # These are both reset in __init__, but is specified here at the class
+ # level so that unpickling will have valid values
+ _mutable = True
+ _encoding = None
+
+ def __init__(self, query_string, mutable=False, encoding=None):
+ super(QueryDict, self).__init__()
+ if not encoding:
+ encoding = settings.DEFAULT_CHARSET
+ self.encoding = encoding
+ if six.PY3:
+ if isinstance(query_string, bytes):
+ # query_string contains URL-encoded data, a subset of ASCII.
+ query_string = query_string.decode()
+ for key, value in parse_qsl(query_string or '',
+ keep_blank_values=True,
+ encoding=encoding):
+ self.appendlist(key, value)
+ else:
+ for key, value in parse_qsl(query_string or '',
+ keep_blank_values=True):
+ self.appendlist(force_text(key, encoding, errors='replace'),
+ force_text(value, encoding, errors='replace'))
+ self._mutable = mutable
+
+ @property
+ def encoding(self):
+ if self._encoding is None:
+ self._encoding = settings.DEFAULT_CHARSET
+ return self._encoding
+
+ @encoding.setter
+ def encoding(self, value):
+ self._encoding = value
+
+ def _assert_mutable(self):
+ if not self._mutable:
+ raise AttributeError("This QueryDict instance is immutable")
+
+ def __setitem__(self, key, value):
+ self._assert_mutable()
+ key = bytes_to_text(key, self.encoding)
+ value = bytes_to_text(value, self.encoding)
+ super(QueryDict, self).__setitem__(key, value)
+
+ def __delitem__(self, key):
+ self._assert_mutable()
+ super(QueryDict, self).__delitem__(key)
+
+ def __copy__(self):
+ result = self.__class__('', mutable=True, encoding=self.encoding)
+ for key, value in six.iterlists(self):
+ result.setlist(key, value)
+ return result
+
+ def __deepcopy__(self, memo):
+ result = self.__class__('', mutable=True, encoding=self.encoding)
+ memo[id(self)] = result
+ for key, value in six.iterlists(self):
+ result.setlist(copy.deepcopy(key, memo), copy.deepcopy(value, memo))
+ return result
+
+ def setlist(self, key, list_):
+ self._assert_mutable()
+ key = bytes_to_text(key, self.encoding)
+ list_ = [bytes_to_text(elt, self.encoding) for elt in list_]
+ super(QueryDict, self).setlist(key, list_)
+
+ def setlistdefault(self, key, default_list=None):
+ self._assert_mutable()
+ return super(QueryDict, self).setlistdefault(key, default_list)
+
+ def appendlist(self, key, value):
+ self._assert_mutable()
+ key = bytes_to_text(key, self.encoding)
+ value = bytes_to_text(value, self.encoding)
+ super(QueryDict, self).appendlist(key, value)
+
+ def pop(self, key, *args):
+ self._assert_mutable()
+ return super(QueryDict, self).pop(key, *args)
+
+ def popitem(self):
+ self._assert_mutable()
+ return super(QueryDict, self).popitem()
+
+ def clear(self):
+ self._assert_mutable()
+ super(QueryDict, self).clear()
+
+ def setdefault(self, key, default=None):
+ self._assert_mutable()
+ key = bytes_to_text(key, self.encoding)
+ default = bytes_to_text(default, self.encoding)
+ return super(QueryDict, self).setdefault(key, default)
+
+ def copy(self):
+ """Returns a mutable copy of this object."""
+ return self.__deepcopy__({})
+
+ def urlencode(self, safe=None):
+ """
+ Returns an encoded string of all query string arguments.
+
+ :arg safe: Used to specify characters which do not require quoting, for
+ example::
+
+ >>> q = QueryDict('', mutable=True)
+ >>> q['next'] = '/a&b/'
+ >>> q.urlencode()
+ 'next=%2Fa%26b%2F'
+ >>> q.urlencode(safe='/')
+ 'next=/a%26b/'
+
+ """
+ output = []
+ if safe:
+ safe = force_bytes(safe, self.encoding)
+ encode = lambda k, v: '%s=%s' % ((quote(k, safe), quote(v, safe)))
+ else:
+ encode = lambda k, v: urlencode({k: v})
+ for k, list_ in self.lists():
+ k = force_bytes(k, self.encoding)
+ output.extend([encode(k, force_bytes(v, self.encoding))
+ for v in list_])
+ return '&'.join(output)
+
+
+def build_request_repr(request, path_override=None, GET_override=None,
+ POST_override=None, COOKIES_override=None,
+ META_override=None):
+ """
+ Builds and returns the request's representation string. The request's
+ attributes may be overridden by pre-processed values.
+ """
+ # Since this is called as part of error handling, we need to be very
+ # robust against potentially malformed input.
+ try:
+ get = (pformat(GET_override)
+ if GET_override is not None
+ else pformat(request.GET))
+ except Exception:
+ get = '<could not parse>'
+ if request._post_parse_error:
+ post = '<could not parse>'
+ else:
+ try:
+ post = (pformat(POST_override)
+ if POST_override is not None
+ else pformat(request.POST))
+ except Exception:
+ post = '<could not parse>'
+ try:
+ cookies = (pformat(COOKIES_override)
+ if COOKIES_override is not None
+ else pformat(request.COOKIES))
+ except Exception:
+ cookies = '<could not parse>'
+ try:
+ meta = (pformat(META_override)
+ if META_override is not None
+ else pformat(request.META))
+ except Exception:
+ meta = '<could not parse>'
+ path = path_override if path_override is not None else request.path
+ return force_str('<%s\npath:%s,\nGET:%s,\nPOST:%s,\nCOOKIES:%s,\nMETA:%s>' %
+ (request.__class__.__name__,
+ path,
+ six.text_type(get),
+ six.text_type(post),
+ six.text_type(cookies),
+ six.text_type(meta)))
+
+
+# It's neither necessary nor appropriate to use
+# django.utils.encoding.smart_text for parsing URLs and form inputs. Thus,
+# this slightly more restricted function, used by QueryDict.
+def bytes_to_text(s, encoding):
+ """
+ Converts basestring objects to unicode, using the given encoding. Illegally
+ encoded input characters are replaced with Unicode "unknown" codepoint
+ (\ufffd).
+
+ Returns any non-basestring objects without change.
+ """
+ if isinstance(s, bytes):
+ return six.text_type(s, encoding, 'replace')
+ else:
+ return s
+
+
+def split_domain_port(host):
+ """
+ Return a (domain, port) tuple from a given host.
+
+ Returned domain is lower-cased. If the host is invalid, the domain will be
+ empty.
+ """
+ host = host.lower()
+
+ if not host_validation_re.match(host):
+ return '', ''
+
+ if host[-1] == ']':
+ # It's an IPv6 address without a port.
+ return host, ''
+ bits = host.rsplit(':', 1)
+ if len(bits) == 2:
+ return tuple(bits)
+ return bits[0], ''
+
+
+def validate_host(host, allowed_hosts):
+ """
+ Validate the given host for this site.
+
+ Check that the host looks valid and matches a host or host pattern in the
+ given list of ``allowed_hosts``. Any pattern beginning with a period
+ matches a domain and all its subdomains (e.g. ``.example.com`` matches
+ ``example.com`` and any subdomain), ``*`` matches anything, and anything
+ else must match exactly.
+
+ Note: This function assumes that the given host is lower-cased and has
+ already had the port, if any, stripped off.
+
+ Return ``True`` for a valid host, ``False`` otherwise.
+
+ """
+ for pattern in allowed_hosts:
+ pattern = pattern.lower()
+ match = (
+ pattern == '*' or
+ pattern.startswith('.') and (
+ host.endswith(pattern) or host == pattern[1:]
+ ) or
+ pattern == host
+ )
+ if match:
+ return True
+
+ return False
diff --git a/lib/python2.7/site-packages/django/http/response.py b/lib/python2.7/site-packages/django/http/response.py
new file mode 100644
index 0000000..60739ab
--- /dev/null
+++ b/lib/python2.7/site-packages/django/http/response.py
@@ -0,0 +1,518 @@
+from __future__ import absolute_import, unicode_literals
+
+import datetime
+import time
+import warnings
+from email.header import Header
+
+from django.conf import settings
+from django.core import signals
+from django.core import signing
+from django.core.exceptions import DisallowedRedirect
+from django.http.cookie import SimpleCookie
+from django.utils import six, timezone
+from django.utils.encoding import force_bytes, force_text, iri_to_uri
+from django.utils.http import cookie_date
+from django.utils.six.moves import map
+from django.utils.six.moves.urllib.parse import urlparse
+
+
+# See http://www.iana.org/assignments/http-status-codes
+REASON_PHRASES = {
+ 100: 'CONTINUE',
+ 101: 'SWITCHING PROTOCOLS',
+ 102: 'PROCESSING',
+ 200: 'OK',
+ 201: 'CREATED',
+ 202: 'ACCEPTED',
+ 203: 'NON-AUTHORITATIVE INFORMATION',
+ 204: 'NO CONTENT',
+ 205: 'RESET CONTENT',
+ 206: 'PARTIAL CONTENT',
+ 207: 'MULTI-STATUS',
+ 208: 'ALREADY REPORTED',
+ 226: 'IM USED',
+ 300: 'MULTIPLE CHOICES',
+ 301: 'MOVED PERMANENTLY',
+ 302: 'FOUND',
+ 303: 'SEE OTHER',
+ 304: 'NOT MODIFIED',
+ 305: 'USE PROXY',
+ 306: 'RESERVED',
+ 307: 'TEMPORARY REDIRECT',
+ 400: 'BAD REQUEST',
+ 401: 'UNAUTHORIZED',
+ 402: 'PAYMENT REQUIRED',
+ 403: 'FORBIDDEN',
+ 404: 'NOT FOUND',
+ 405: 'METHOD NOT ALLOWED',
+ 406: 'NOT ACCEPTABLE',
+ 407: 'PROXY AUTHENTICATION REQUIRED',
+ 408: 'REQUEST TIMEOUT',
+ 409: 'CONFLICT',
+ 410: 'GONE',
+ 411: 'LENGTH REQUIRED',
+ 412: 'PRECONDITION FAILED',
+ 413: 'REQUEST ENTITY TOO LARGE',
+ 414: 'REQUEST-URI TOO LONG',
+ 415: 'UNSUPPORTED MEDIA TYPE',
+ 416: 'REQUESTED RANGE NOT SATISFIABLE',
+ 417: 'EXPECTATION FAILED',
+ 418: "I'M A TEAPOT",
+ 422: 'UNPROCESSABLE ENTITY',
+ 423: 'LOCKED',
+ 424: 'FAILED DEPENDENCY',
+ 426: 'UPGRADE REQUIRED',
+ 428: 'PRECONDITION REQUIRED',
+ 429: 'TOO MANY REQUESTS',
+ 431: 'REQUEST HEADER FIELDS TOO LARGE',
+ 500: 'INTERNAL SERVER ERROR',
+ 501: 'NOT IMPLEMENTED',
+ 502: 'BAD GATEWAY',
+ 503: 'SERVICE UNAVAILABLE',
+ 504: 'GATEWAY TIMEOUT',
+ 505: 'HTTP VERSION NOT SUPPORTED',
+ 506: 'VARIANT ALSO NEGOTIATES',
+ 507: 'INSUFFICIENT STORAGE',
+ 508: 'LOOP DETECTED',
+ 510: 'NOT EXTENDED',
+ 511: 'NETWORK AUTHENTICATION REQUIRED',
+}
+
+
+class BadHeaderError(ValueError):
+ pass
+
+
+class HttpResponseBase(six.Iterator):
+ """
+ An HTTP response base class with dictionary-accessed headers.
+
+ This class doesn't handle content. It should not be used directly.
+ Use the HttpResponse and StreamingHttpResponse subclasses instead.
+ """
+
+ status_code = 200
+ reason_phrase = None # Use default reason phrase for status code.
+
+ def __init__(self, content_type=None, status=None, reason=None, mimetype=None):
+ # _headers is a mapping of the lower-case name to the original case of
+ # the header (required for working with legacy systems) and the header
+ # value. Both the name of the header and its value are ASCII strings.
+ self._headers = {}
+ self._charset = settings.DEFAULT_CHARSET
+ self._closable_objects = []
+ # This parameter is set by the handler. It's necessary to preserve the
+ # historical behavior of request_finished.
+ self._handler_class = None
+ if mimetype:
+ warnings.warn("Using mimetype keyword argument is deprecated, use"
+ " content_type instead",
+ DeprecationWarning, stacklevel=2)
+ content_type = mimetype
+ if not content_type:
+ content_type = "%s; charset=%s" % (settings.DEFAULT_CONTENT_TYPE,
+ self._charset)
+ self.cookies = SimpleCookie()
+ if status is not None:
+ self.status_code = status
+ if reason is not None:
+ self.reason_phrase = reason
+ elif self.reason_phrase is None:
+ self.reason_phrase = REASON_PHRASES.get(self.status_code,
+ 'UNKNOWN STATUS CODE')
+ self['Content-Type'] = content_type
+
+ def serialize_headers(self):
+ """HTTP headers as a bytestring."""
+ def to_bytes(val, encoding):
+ return val if isinstance(val, bytes) else val.encode(encoding)
+
+ headers = [
+ (b': '.join([to_bytes(key, 'ascii'), to_bytes(value, 'latin-1')]))
+ for key, value in self._headers.values()
+ ]
+ return b'\r\n'.join(headers)
+
+ if six.PY3:
+ __bytes__ = serialize_headers
+ else:
+ __str__ = serialize_headers
+
+ def _convert_to_charset(self, value, charset, mime_encode=False):
+ """Converts headers key/value to ascii/latin-1 native strings.
+
+ `charset` must be 'ascii' or 'latin-1'. If `mime_encode` is True and
+ `value` value can't be represented in the given charset, MIME-encoding
+ is applied.
+ """
+ if not isinstance(value, (bytes, six.text_type)):
+ value = str(value)
+ try:
+ if six.PY3:
+ if isinstance(value, str):
+ # Ensure string is valid in given charset
+ value.encode(charset)
+ else:
+ # Convert bytestring using given charset
+ value = value.decode(charset)
+ else:
+ if isinstance(value, str):
+ # Ensure string is valid in given charset
+ value.decode(charset)
+ else:
+ # Convert unicode string to given charset
+ value = value.encode(charset)
+ except UnicodeError as e:
+ if mime_encode:
+ # Wrapping in str() is a workaround for #12422 under Python 2.
+ value = str(Header(value, 'utf-8').encode())
+ else:
+ e.reason += ', HTTP response headers must be in %s format' % charset
+ raise
+ if str('\n') in value or str('\r') in value:
+ raise BadHeaderError("Header values can't contain newlines (got %r)" % value)
+ return value
+
+ def __setitem__(self, header, value):
+ header = self._convert_to_charset(header, 'ascii')
+ value = self._convert_to_charset(value, 'latin-1', mime_encode=True)
+ self._headers[header.lower()] = (header, value)
+
+ def __delitem__(self, header):
+ try:
+ del self._headers[header.lower()]
+ except KeyError:
+ pass
+
+ def __getitem__(self, header):
+ return self._headers[header.lower()][1]
+
+ def __getstate__(self):
+ # SimpleCookie is not pickeable with pickle.HIGHEST_PROTOCOL, so we
+ # serialise to a string instead
+ state = self.__dict__.copy()
+ state['cookies'] = str(state['cookies'])
+ return state
+
+ def __setstate__(self, state):
+ self.__dict__.update(state)
+ self.cookies = SimpleCookie(self.cookies)
+
+ def has_header(self, header):
+ """Case-insensitive check for a header."""
+ return header.lower() in self._headers
+
+ __contains__ = has_header
+
+ def items(self):
+ return self._headers.values()
+
+ def get(self, header, alternate=None):
+ return self._headers.get(header.lower(), (None, alternate))[1]
+
+ def set_cookie(self, key, value='', max_age=None, expires=None, path='/',
+ domain=None, secure=False, httponly=False):
+ """
+ Sets a cookie.
+
+ ``expires`` can be:
+ - a string in the correct format,
+ - a naive ``datetime.datetime`` object in UTC,
+ - an aware ``datetime.datetime`` object in any time zone.
+ If it is a ``datetime.datetime`` object then ``max_age`` will be calculated.
+
+ """
+ self.cookies[key] = value
+ if expires is not None:
+ if isinstance(expires, datetime.datetime):
+ if timezone.is_aware(expires):
+ expires = timezone.make_naive(expires, timezone.utc)
+ delta = expires - expires.utcnow()
+ # Add one second so the date matches exactly (a fraction of
+ # time gets lost between converting to a timedelta and
+ # then the date string).
+ delta = delta + datetime.timedelta(seconds=1)
+ # Just set max_age - the max_age logic will set expires.
+ expires = None
+ max_age = max(0, delta.days * 86400 + delta.seconds)
+ else:
+ self.cookies[key]['expires'] = expires
+ if max_age is not None:
+ self.cookies[key]['max-age'] = max_age
+ # IE requires expires, so set it if hasn't been already.
+ if not expires:
+ self.cookies[key]['expires'] = cookie_date(time.time() +
+ max_age)
+ if path is not None:
+ self.cookies[key]['path'] = path
+ if domain is not None:
+ self.cookies[key]['domain'] = domain
+ if secure:
+ self.cookies[key]['secure'] = True
+ if httponly:
+ self.cookies[key]['httponly'] = True
+
+ def set_signed_cookie(self, key, value, salt='', **kwargs):
+ value = signing.get_cookie_signer(salt=key + salt).sign(value)
+ return self.set_cookie(key, value, **kwargs)
+
+ def delete_cookie(self, key, path='/', domain=None):
+ self.set_cookie(key, max_age=0, path=path, domain=domain,
+ expires='Thu, 01-Jan-1970 00:00:00 GMT')
+
+ # Common methods used by subclasses
+
+ def make_bytes(self, value):
+ """Turn a value into a bytestring encoded in the output charset."""
+ # Per PEP 3333, this response body must be bytes. To avoid returning
+ # an instance of a subclass, this function returns `bytes(value)`.
+ # This doesn't make a copy when `value` already contains bytes.
+
+ # If content is already encoded (eg. gzip), assume bytes.
+ if self.has_header('Content-Encoding'):
+ return bytes(value)
+
+ # Handle string types -- we can't rely on force_bytes here because:
+ # - under Python 3 it attemps str conversion first
+ # - when self._charset != 'utf-8' it re-encodes the content
+ if isinstance(value, bytes):
+ return bytes(value)
+ if isinstance(value, six.text_type):
+ return bytes(value.encode(self._charset))
+
+ # Handle non-string types (#16494)
+ return force_bytes(value, self._charset)
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ # Subclasses must define self._iterator for this function.
+ return self.make_bytes(next(self._iterator))
+
+ # These methods partially implement the file-like object interface.
+ # See http://docs.python.org/lib/bltin-file-objects.html
+
+ # The WSGI server must call this method upon completion of the request.
+ # See http://blog.dscpl.com.au/2012/10/obligations-for-calling-close-on.html
+ def close(self):
+ for closable in self._closable_objects:
+ try:
+ closable.close()
+ except Exception:
+ pass
+ signals.request_finished.send(sender=self._handler_class)
+
+ def write(self, content):
+ raise Exception("This %s instance is not writable" % self.__class__.__name__)
+
+ def flush(self):
+ pass
+
+ def tell(self):
+ raise Exception("This %s instance cannot tell its position" % self.__class__.__name__)
+
+
+class HttpResponse(HttpResponseBase):
+ """
+ An HTTP response class with a string as content.
+
+ This content that can be read, appended to or replaced.
+ """
+
+ streaming = False
+
+ def __init__(self, content=b'', *args, **kwargs):
+ super(HttpResponse, self).__init__(*args, **kwargs)
+ # Content is a bytestring. See the `content` property methods.
+ self.content = content
+
+ def serialize(self):
+ """Full HTTP message, including headers, as a bytestring."""
+ return self.serialize_headers() + b'\r\n\r\n' + self.content
+
+ if six.PY3:
+ __bytes__ = serialize
+ else:
+ __str__ = serialize
+
+ def _consume_content(self):
+ # If the response was instantiated with an iterator, when its content
+ # is accessed, the iterator is going be exhausted and the content
+ # loaded in memory. At this point, it's better to abandon the original
+ # iterator and save the content for later reuse. This is a temporary
+ # solution. See the comment in __iter__ below for the long term plan.
+ if self._base_content_is_iter:
+ self.content = b''.join(self.make_bytes(e) for e in self._container)
+
+ @property
+ def content(self):
+ self._consume_content()
+ return b''.join(self.make_bytes(e) for e in self._container)
+
+ @content.setter
+ def content(self, value):
+ if hasattr(value, '__iter__') and not isinstance(value, (bytes, six.string_types)):
+ self._container = value
+ self._base_content_is_iter = True
+ if hasattr(value, 'close'):
+ self._closable_objects.append(value)
+ else:
+ self._container = [value]
+ self._base_content_is_iter = False
+
+ def __iter__(self):
+ # Raise a deprecation warning only if the content wasn't consumed yet,
+ # because the response may be intended to be streamed.
+ # Once the deprecation completes, iterators should be consumed upon
+ # assignment rather than upon access. The _consume_content method
+ # should be removed. See #6527.
+ if self._base_content_is_iter:
+ warnings.warn(
+ 'Creating streaming responses with `HttpResponse` is '
+ 'deprecated. Use `StreamingHttpResponse` instead '
+ 'if you need the streaming behavior.',
+ DeprecationWarning, stacklevel=2)
+ if not hasattr(self, '_iterator'):
+ self._iterator = iter(self._container)
+ return self
+
+ def write(self, content):
+ self._consume_content()
+ self._container.append(content)
+
+ def tell(self):
+ self._consume_content()
+ return len(self.content)
+
+
+class StreamingHttpResponse(HttpResponseBase):
+ """
+ A streaming HTTP response class with an iterator as content.
+
+ This should only be iterated once, when the response is streamed to the
+ client. However, it can be appended to or replaced with a new iterator
+ that wraps the original content (or yields entirely new content).
+ """
+
+ streaming = True
+
+ def __init__(self, streaming_content=(), *args, **kwargs):
+ super(StreamingHttpResponse, self).__init__(*args, **kwargs)
+ # `streaming_content` should be an iterable of bytestrings.
+ # See the `streaming_content` property methods.
+ self.streaming_content = streaming_content
+
+ @property
+ def content(self):
+ raise AttributeError("This %s instance has no `content` attribute. "
+ "Use `streaming_content` instead." % self.__class__.__name__)
+
+ @property
+ def streaming_content(self):
+ return map(self.make_bytes, self._iterator)
+
+ @streaming_content.setter
+ def streaming_content(self, value):
+ # Ensure we can never iterate on "value" more than once.
+ self._iterator = iter(value)
+ if hasattr(value, 'close'):
+ self._closable_objects.append(value)
+
+
+class CompatibleStreamingHttpResponse(StreamingHttpResponse):
+ """
+ This class maintains compatibility with middleware that doesn't know how
+ to handle the content of a streaming response by exposing a `content`
+ attribute that will consume and cache the content iterator when accessed.
+
+ These responses will stream only if no middleware attempts to access the
+ `content` attribute. Otherwise, they will behave like a regular response,
+ and raise a `DeprecationWarning`.
+ """
+ @property
+ def content(self):
+ warnings.warn(
+ 'Accessing the `content` attribute on a streaming response is '
+ 'deprecated. Use the `streaming_content` attribute instead.',
+ DeprecationWarning, stacklevel=2)
+ content = b''.join(self)
+ self.streaming_content = [content]
+ return content
+
+ @content.setter
+ def content(self, content):
+ warnings.warn(
+ 'Accessing the `content` attribute on a streaming response is '
+ 'deprecated. Use the `streaming_content` attribute instead.',
+ DeprecationWarning, stacklevel=2)
+ self.streaming_content = [content]
+
+
+class HttpResponseRedirectBase(HttpResponse):
+ allowed_schemes = ['http', 'https', 'ftp']
+
+ def __init__(self, redirect_to, *args, **kwargs):
+ parsed = urlparse(force_text(redirect_to))
+ if parsed.scheme and parsed.scheme not in self.allowed_schemes:
+ raise DisallowedRedirect("Unsafe redirect to URL with protocol '%s'" % parsed.scheme)
+ super(HttpResponseRedirectBase, self).__init__(*args, **kwargs)
+ self['Location'] = iri_to_uri(redirect_to)
+
+ url = property(lambda self: self['Location'])
+
+
+class HttpResponseRedirect(HttpResponseRedirectBase):
+ status_code = 302
+
+
+class HttpResponsePermanentRedirect(HttpResponseRedirectBase):
+ status_code = 301
+
+
+class HttpResponseNotModified(HttpResponse):
+ status_code = 304
+
+ def __init__(self, *args, **kwargs):
+ super(HttpResponseNotModified, self).__init__(*args, **kwargs)
+ del self['content-type']
+
+ @HttpResponse.content.setter
+ def content(self, value):
+ if value:
+ raise AttributeError("You cannot set content to a 304 (Not Modified) response")
+ self._container = []
+ self._base_content_is_iter = False
+
+
+class HttpResponseBadRequest(HttpResponse):
+ status_code = 400
+
+
+class HttpResponseNotFound(HttpResponse):
+ status_code = 404
+
+
+class HttpResponseForbidden(HttpResponse):
+ status_code = 403
+
+
+class HttpResponseNotAllowed(HttpResponse):
+ status_code = 405
+
+ def __init__(self, permitted_methods, *args, **kwargs):
+ super(HttpResponseNotAllowed, self).__init__(*args, **kwargs)
+ self['Allow'] = ', '.join(permitted_methods)
+
+
+class HttpResponseGone(HttpResponse):
+ status_code = 410
+
+
+class HttpResponseServerError(HttpResponse):
+ status_code = 500
+
+
+class Http404(Exception):
+ pass
diff --git a/lib/python2.7/site-packages/django/http/utils.py b/lib/python2.7/site-packages/django/http/utils.py
new file mode 100644
index 0000000..e13dc4c
--- /dev/null
+++ b/lib/python2.7/site-packages/django/http/utils.py
@@ -0,0 +1,96 @@
+"""
+Functions that modify an HTTP request or response in some way.
+"""
+
+# This group of functions are run as part of the response handling, after
+# everything else, including all response middleware. Think of them as
+# "compulsory response middleware". Be careful about what goes here, because
+# it's a little fiddly to override this behavior, so they should be truly
+# universally applicable.
+
+
+def fix_location_header(request, response):
+ """
+ Ensures that we always use an absolute URI in any location header in the
+ response. This is required by RFC 2616, section 14.30.
+
+ Code constructing response objects is free to insert relative paths, as
+ this function converts them to absolute paths.
+ """
+ if 'Location' in response and request.get_host():
+ response['Location'] = request.build_absolute_uri(response['Location'])
+ return response
+
+
+def conditional_content_removal(request, response):
+ """
+ Removes the content of responses for HEAD requests, 1xx, 204 and 304
+ responses. Ensures compliance with RFC 2616, section 4.3.
+ """
+ if 100 <= response.status_code < 200 or response.status_code in (204, 304):
+ if response.streaming:
+ response.streaming_content = []
+ else:
+ response.content = b''
+ response['Content-Length'] = '0'
+ if request.method == 'HEAD':
+ if response.streaming:
+ response.streaming_content = []
+ else:
+ response.content = b''
+ return response
+
+
+def fix_IE_for_attach(request, response):
+ """
+ This function will prevent Django from serving a Content-Disposition header
+ while expecting the browser to cache it (only when the browser is IE). This
+ leads to IE not allowing the client to download.
+ """
+ useragent = request.META.get('HTTP_USER_AGENT', '').upper()
+ if 'MSIE' not in useragent and 'CHROMEFRAME' not in useragent:
+ return response
+
+ offending_headers = ('no-cache', 'no-store')
+ if response.has_header('Content-Disposition'):
+ try:
+ del response['Pragma']
+ except KeyError:
+ pass
+ if response.has_header('Cache-Control'):
+ cache_control_values = [value.strip() for value in
+ response['Cache-Control'].split(',')
+ if value.strip().lower() not in offending_headers]
+
+ if not len(cache_control_values):
+ del response['Cache-Control']
+ else:
+ response['Cache-Control'] = ', '.join(cache_control_values)
+
+ return response
+
+
+def fix_IE_for_vary(request, response):
+ """
+ This function will fix the bug reported at
+ http://support.microsoft.com/kb/824847/en-us?spid=8722&sid=global
+ by clearing the Vary header whenever the mime-type is not safe
+ enough for Internet Explorer to handle. Poor thing.
+ """
+ useragent = request.META.get('HTTP_USER_AGENT', '').upper()
+ if 'MSIE' not in useragent and 'CHROMEFRAME' not in useragent:
+ return response
+
+ # These mime-types that are decreed "Vary-safe" for IE:
+ safe_mime_types = ('text/html', 'text/plain', 'text/sgml')
+
+ # The first part of the Content-Type field will be the MIME type,
+ # everything after ';', such as character-set, can be ignored.
+ mime_type = response.get('Content-Type', '').partition(';')[0]
+ if mime_type not in safe_mime_types:
+ try:
+ del response['Vary']
+ except KeyError:
+ pass
+
+ return response