summaryrefslogtreecommitdiff
path: root/lib/python2.7/site-packages/django/utils
diff options
context:
space:
mode:
Diffstat (limited to 'lib/python2.7/site-packages/django/utils')
-rw-r--r--lib/python2.7/site-packages/django/utils/2to3_fixes/__init__.py0
-rw-r--r--lib/python2.7/site-packages/django/utils/2to3_fixes/fix_unicode.py36
-rw-r--r--lib/python2.7/site-packages/django/utils/__init__.py0
-rw-r--r--lib/python2.7/site-packages/django/utils/_os.py98
-rw-r--r--lib/python2.7/site-packages/django/utils/archive.py215
-rw-r--r--lib/python2.7/site-packages/django/utils/autoreload.py182
-rw-r--r--lib/python2.7/site-packages/django/utils/baseconv.py99
-rw-r--r--lib/python2.7/site-packages/django/utils/cache.py264
-rw-r--r--lib/python2.7/site-packages/django/utils/checksums.py24
-rw-r--r--lib/python2.7/site-packages/django/utils/crypto.py166
-rw-r--r--lib/python2.7/site-packages/django/utils/daemonize.py58
-rw-r--r--lib/python2.7/site-packages/django/utils/datastructures.py524
-rw-r--r--lib/python2.7/site-packages/django/utils/dateformat.py317
-rw-r--r--lib/python2.7/site-packages/django/utils/dateparse.py82
-rw-r--r--lib/python2.7/site-packages/django/utils/dates.py57
-rw-r--r--lib/python2.7/site-packages/django/utils/datetime_safe.py92
-rw-r--r--lib/python2.7/site-packages/django/utils/decorators.py120
-rw-r--r--lib/python2.7/site-packages/django/utils/deprecation.py62
-rw-r--r--lib/python2.7/site-packages/django/utils/dictconfig.py555
-rw-r--r--lib/python2.7/site-packages/django/utils/encoding.py253
-rw-r--r--lib/python2.7/site-packages/django/utils/feedgenerator.py391
-rw-r--r--lib/python2.7/site-packages/django/utils/formats.py209
-rw-r--r--lib/python2.7/site-packages/django/utils/functional.py430
-rw-r--r--lib/python2.7/site-packages/django/utils/html.py330
-rw-r--r--lib/python2.7/site-packages/django/utils/html_parser.py113
-rw-r--r--lib/python2.7/site-packages/django/utils/http.py261
-rw-r--r--lib/python2.7/site-packages/django/utils/image.py154
-rw-r--r--lib/python2.7/site-packages/django/utils/importlib.py41
-rw-r--r--lib/python2.7/site-packages/django/utils/ipv6.py268
-rw-r--r--lib/python2.7/site-packages/django/utils/itercompat.py36
-rw-r--r--lib/python2.7/site-packages/django/utils/jslex.py219
-rw-r--r--lib/python2.7/site-packages/django/utils/log.py160
-rw-r--r--lib/python2.7/site-packages/django/utils/module_loading.py98
-rw-r--r--lib/python2.7/site-packages/django/utils/numberformat.py48
-rw-r--r--lib/python2.7/site-packages/django/utils/regex_helper.py342
-rw-r--r--lib/python2.7/site-packages/django/utils/safestring.py134
-rw-r--r--lib/python2.7/site-packages/django/utils/simplejson.py31
-rw-r--r--lib/python2.7/site-packages/django/utils/six.py676
-rw-r--r--lib/python2.7/site-packages/django/utils/synch.py93
-rw-r--r--lib/python2.7/site-packages/django/utils/termcolors.py200
-rw-r--r--lib/python2.7/site-packages/django/utils/text.py412
-rw-r--r--lib/python2.7/site-packages/django/utils/timesince.py64
-rw-r--r--lib/python2.7/site-packages/django/utils/timezone.py317
-rw-r--r--lib/python2.7/site-packages/django/utils/translation/__init__.py196
-rw-r--r--lib/python2.7/site-packages/django/utils/translation/trans_null.py63
-rw-r--r--lib/python2.7/site-packages/django/utils/translation/trans_real.py676
-rw-r--r--lib/python2.7/site-packages/django/utils/tree.py136
-rw-r--r--lib/python2.7/site-packages/django/utils/tzinfo.py100
-rw-r--r--lib/python2.7/site-packages/django/utils/unittest/__init__.py80
-rw-r--r--lib/python2.7/site-packages/django/utils/unittest/__main__.py10
-rw-r--r--lib/python2.7/site-packages/django/utils/unittest/case.py1076
-rw-r--r--lib/python2.7/site-packages/django/utils/unittest/collector.py9
-rw-r--r--lib/python2.7/site-packages/django/utils/unittest/compatibility.py64
-rw-r--r--lib/python2.7/site-packages/django/utils/unittest/loader.py322
-rw-r--r--lib/python2.7/site-packages/django/utils/unittest/main.py241
-rw-r--r--lib/python2.7/site-packages/django/utils/unittest/result.py183
-rw-r--r--lib/python2.7/site-packages/django/utils/unittest/runner.py206
-rw-r--r--lib/python2.7/site-packages/django/utils/unittest/signals.py57
-rw-r--r--lib/python2.7/site-packages/django/utils/unittest/suite.py287
-rw-r--r--lib/python2.7/site-packages/django/utils/unittest/util.py99
-rw-r--r--lib/python2.7/site-packages/django/utils/version.py51
-rw-r--r--lib/python2.7/site-packages/django/utils/xmlutils.py14
62 files changed, 12101 insertions, 0 deletions
diff --git a/lib/python2.7/site-packages/django/utils/2to3_fixes/__init__.py b/lib/python2.7/site-packages/django/utils/2to3_fixes/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/2to3_fixes/__init__.py
diff --git a/lib/python2.7/site-packages/django/utils/2to3_fixes/fix_unicode.py b/lib/python2.7/site-packages/django/utils/2to3_fixes/fix_unicode.py
new file mode 100644
index 0000000..613734c
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/2to3_fixes/fix_unicode.py
@@ -0,0 +1,36 @@
+"""Fixer for __unicode__ methods.
+
+Uses the django.utils.encoding.python_2_unicode_compatible decorator.
+"""
+
+from __future__ import unicode_literals
+
+from lib2to3 import fixer_base
+from lib2to3.fixer_util import find_indentation, Name, syms, touch_import
+from lib2to3.pgen2 import token
+from lib2to3.pytree import Leaf, Node
+
+
+class FixUnicode(fixer_base.BaseFix):
+
+ BM_compatible = True
+ PATTERN = """
+ classdef< 'class' any+ ':'
+ suite< any*
+ funcdef< 'def' unifunc='__unicode__'
+ parameters< '(' NAME ')' > any+ >
+ any* > >
+ """
+
+ def transform(self, node, results):
+ unifunc = results["unifunc"]
+ strfunc = Name("__str__", prefix=unifunc.prefix)
+ unifunc.replace(strfunc)
+
+ klass = node.clone()
+ klass.prefix = '\n' + find_indentation(node)
+ decorator = Node(syms.decorator, [Leaf(token.AT, "@"), Name('python_2_unicode_compatible')])
+ decorated = Node(syms.decorated, [decorator, klass], prefix=node.prefix)
+ node.replace(decorated)
+
+ touch_import('django.utils.encoding', 'python_2_unicode_compatible', decorated)
diff --git a/lib/python2.7/site-packages/django/utils/__init__.py b/lib/python2.7/site-packages/django/utils/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/__init__.py
diff --git a/lib/python2.7/site-packages/django/utils/_os.py b/lib/python2.7/site-packages/django/utils/_os.py
new file mode 100644
index 0000000..3e60c10
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/_os.py
@@ -0,0 +1,98 @@
+import os
+import stat
+import sys
+from os.path import join, normcase, normpath, abspath, isabs, sep, dirname
+
+from django.utils.encoding import force_text
+from django.utils import six
+
+try:
+ WindowsError = WindowsError
+except NameError:
+ class WindowsError(Exception):
+ pass
+
+if six.PY2:
+ fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
+
+
+# Under Python 2, define our own abspath function that can handle joining
+# unicode paths to a current working directory that has non-ASCII characters
+# in it. This isn't necessary on Windows since the Windows version of abspath
+# handles this correctly. It also handles drive letters differently than the
+# pure Python implementation, so it's best not to replace it.
+if six.PY3 or os.name == 'nt':
+ abspathu = abspath
+else:
+ def abspathu(path):
+ """
+ Version of os.path.abspath that uses the unicode representation
+ of the current working directory, thus avoiding a UnicodeDecodeError
+ in join when the cwd has non-ASCII characters.
+ """
+ if not isabs(path):
+ path = join(os.getcwdu(), path)
+ return normpath(path)
+
+def upath(path):
+ """
+ Always return a unicode path.
+ """
+ if six.PY2 and not isinstance(path, six.text_type):
+ return path.decode(fs_encoding)
+ return path
+
+def npath(path):
+ """
+ Always return a native path, that is unicode on Python 3 and bytestring on
+ Python 2.
+ """
+ if six.PY2 and not isinstance(path, bytes):
+ return path.encode(fs_encoding)
+ return path
+
+def safe_join(base, *paths):
+ """
+ Joins one or more path components to the base path component intelligently.
+ Returns a normalized, absolute version of the final path.
+
+ The final path must be located inside of the base path component (otherwise
+ a ValueError is raised).
+ """
+ base = force_text(base)
+ paths = [force_text(p) for p in paths]
+ final_path = abspathu(join(base, *paths))
+ base_path = abspathu(base)
+ # Ensure final_path starts with base_path (using normcase to ensure we
+ # don't false-negative on case insensitive operating systems like Windows),
+ # further, one of the following conditions must be true:
+ # a) The next character is the path separator (to prevent conditions like
+ # safe_join("/dir", "/../d"))
+ # b) The final path must be the same as the base path.
+ # c) The base path must be the most root path (meaning either "/" or "C:\\")
+ if (not normcase(final_path).startswith(normcase(base_path + sep)) and
+ normcase(final_path) != normcase(base_path) and
+ dirname(normcase(base_path)) != normcase(base_path)):
+ raise ValueError('The joined path (%s) is located outside of the base '
+ 'path component (%s)' % (final_path, base_path))
+ return final_path
+
+
+def rmtree_errorhandler(func, path, exc_info):
+ """
+ On Windows, some files are read-only (e.g. in in .svn dirs), so when
+ rmtree() tries to remove them, an exception is thrown.
+ We catch that here, remove the read-only attribute, and hopefully
+ continue without problems.
+ """
+ exctype, value = exc_info[:2]
+ # looking for a windows error
+ if exctype is not WindowsError or 'Access is denied' not in str(value):
+ raise
+ # file type should currently be read only
+ if ((os.stat(path).st_mode & stat.S_IREAD) != stat.S_IREAD):
+ raise
+ # convert to read/write
+ os.chmod(path, stat.S_IWRITE)
+ # use the original function to repeat the operation
+ func(path)
diff --git a/lib/python2.7/site-packages/django/utils/archive.py b/lib/python2.7/site-packages/django/utils/archive.py
new file mode 100644
index 0000000..0faf1fa
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/archive.py
@@ -0,0 +1,215 @@
+"""
+Based on "python-archive" -- http://pypi.python.org/pypi/python-archive/
+
+Copyright (c) 2010 Gary Wilson Jr. <gary.wilson@gmail.com> and contributors.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+"""
+import os
+import shutil
+import tarfile
+import zipfile
+
+from django.utils import six
+
+
+class ArchiveException(Exception):
+ """
+ Base exception class for all archive errors.
+ """
+
+
+class UnrecognizedArchiveFormat(ArchiveException):
+ """
+ Error raised when passed file is not a recognized archive format.
+ """
+
+
+def extract(path, to_path=''):
+ """
+ Unpack the tar or zip file at the specified path to the directory
+ specified by to_path.
+ """
+ with Archive(path) as archive:
+ archive.extract(to_path)
+
+
+class Archive(object):
+ """
+ The external API class that encapsulates an archive implementation.
+ """
+ def __init__(self, file):
+ self._archive = self._archive_cls(file)(file)
+
+ @staticmethod
+ def _archive_cls(file):
+ cls = None
+ if isinstance(file, six.string_types):
+ filename = file
+ else:
+ try:
+ filename = file.name
+ except AttributeError:
+ raise UnrecognizedArchiveFormat(
+ "File object not a recognized archive format.")
+ base, tail_ext = os.path.splitext(filename.lower())
+ cls = extension_map.get(tail_ext)
+ if not cls:
+ base, ext = os.path.splitext(base)
+ cls = extension_map.get(ext)
+ if not cls:
+ raise UnrecognizedArchiveFormat(
+ "Path not a recognized archive format: %s" % filename)
+ return cls
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ self.close()
+
+ def extract(self, to_path=''):
+ self._archive.extract(to_path)
+
+ def list(self):
+ self._archive.list()
+
+ def close(self):
+ self._archive.close()
+
+
+class BaseArchive(object):
+ """
+ Base Archive class. Implementations should inherit this class.
+ """
+ def split_leading_dir(self, path):
+ path = str(path)
+ path = path.lstrip('/').lstrip('\\')
+ if '/' in path and (('\\' in path and path.find('/') < path.find('\\'))
+ or '\\' not in path):
+ return path.split('/', 1)
+ elif '\\' in path:
+ return path.split('\\', 1)
+ else:
+ return path, ''
+
+ def has_leading_dir(self, paths):
+ """
+ Returns true if all the paths have the same leading path name
+ (i.e., everything is in one subdirectory in an archive)
+ """
+ common_prefix = None
+ for path in paths:
+ prefix, rest = self.split_leading_dir(path)
+ if not prefix:
+ return False
+ elif common_prefix is None:
+ common_prefix = prefix
+ elif prefix != common_prefix:
+ return False
+ return True
+
+ def extract(self):
+ raise NotImplementedError
+
+ def list(self):
+ raise NotImplementedError
+
+
+class TarArchive(BaseArchive):
+
+ def __init__(self, file):
+ self._archive = tarfile.open(file)
+
+ def list(self, *args, **kwargs):
+ self._archive.list(*args, **kwargs)
+
+ def extract(self, to_path):
+ # note: python<=2.5 doesnt seem to know about pax headers, filter them
+ members = [member for member in self._archive.getmembers()
+ if member.name != 'pax_global_header']
+ leading = self.has_leading_dir(members)
+ for member in members:
+ name = member.name
+ if leading:
+ name = self.split_leading_dir(name)[1]
+ filename = os.path.join(to_path, name)
+ if member.isdir():
+ if filename and not os.path.exists(filename):
+ os.makedirs(filename)
+ else:
+ try:
+ extracted = self._archive.extractfile(member)
+ except (KeyError, AttributeError) as exc:
+ # Some corrupt tar files seem to produce this
+ # (specifically bad symlinks)
+ print("In the tar file %s the member %s is invalid: %s" %
+ (name, member.name, exc))
+ else:
+ dirname = os.path.dirname(filename)
+ if dirname and not os.path.exists(dirname):
+ os.makedirs(dirname)
+ with open(filename, 'wb') as outfile:
+ shutil.copyfileobj(extracted, outfile)
+ finally:
+ if extracted:
+ extracted.close()
+
+ def close(self):
+ self._archive.close()
+
+
+class ZipArchive(BaseArchive):
+
+ def __init__(self, file):
+ self._archive = zipfile.ZipFile(file)
+
+ def list(self, *args, **kwargs):
+ self._archive.printdir(*args, **kwargs)
+
+ def extract(self, to_path):
+ namelist = self._archive.namelist()
+ leading = self.has_leading_dir(namelist)
+ for name in namelist:
+ data = self._archive.read(name)
+ if leading:
+ name = self.split_leading_dir(name)[1]
+ filename = os.path.join(to_path, name)
+ dirname = os.path.dirname(filename)
+ if dirname and not os.path.exists(dirname):
+ os.makedirs(dirname)
+ if filename.endswith(('/', '\\')):
+ # A directory
+ if not os.path.exists(filename):
+ os.makedirs(filename)
+ else:
+ with open(filename, 'wb') as outfile:
+ outfile.write(data)
+
+ def close(self):
+ self._archive.close()
+
+extension_map = {
+ '.tar': TarArchive,
+ '.tar.bz2': TarArchive,
+ '.tar.gz': TarArchive,
+ '.tgz': TarArchive,
+ '.tz2': TarArchive,
+ '.zip': ZipArchive,
+}
diff --git a/lib/python2.7/site-packages/django/utils/autoreload.py b/lib/python2.7/site-packages/django/utils/autoreload.py
new file mode 100644
index 0000000..6de15a2
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/autoreload.py
@@ -0,0 +1,182 @@
+# Autoreloading launcher.
+# Borrowed from Peter Hunt and the CherryPy project (http://www.cherrypy.org).
+# Some taken from Ian Bicking's Paste (http://pythonpaste.org/).
+#
+# Portions copyright (c) 2004, CherryPy Team (team@cherrypy.org)
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+# * Neither the name of the CherryPy Team nor the names of its contributors
+# may be used to endorse or promote products derived from this software
+# without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os, sys, time, signal, traceback
+
+try:
+ from django.utils.six.moves import _thread as thread
+except ImportError:
+ from django.utils.six.moves import _dummy_thread as thread
+
+# This import does nothing, but it's necessary to avoid some race conditions
+# in the threading module. See http://code.djangoproject.com/ticket/2330 .
+try:
+ import threading
+except ImportError:
+ pass
+
+try:
+ import termios
+except ImportError:
+ termios = None
+
+RUN_RELOADER = True
+
+_mtimes = {}
+_win = (sys.platform == "win32")
+
+_error_files = []
+
+def code_changed():
+ global _mtimes, _win
+ filenames = []
+ for m in list(sys.modules.values()):
+ try:
+ filenames.append(m.__file__)
+ except AttributeError:
+ pass
+ for filename in filenames + _error_files:
+ if not filename:
+ continue
+ if filename.endswith(".pyc") or filename.endswith(".pyo"):
+ filename = filename[:-1]
+ if filename.endswith("$py.class"):
+ filename = filename[:-9] + ".py"
+ if not os.path.exists(filename):
+ continue # File might be in an egg, so it can't be reloaded.
+ stat = os.stat(filename)
+ mtime = stat.st_mtime
+ if _win:
+ mtime -= stat.st_ctime
+ if filename not in _mtimes:
+ _mtimes[filename] = mtime
+ continue
+ if mtime != _mtimes[filename]:
+ _mtimes = {}
+ try:
+ del _error_files[_error_files.index(filename)]
+ except ValueError:
+ pass
+ return True
+ return False
+
+def check_errors(fn):
+ def wrapper(*args, **kwargs):
+ try:
+ fn(*args, **kwargs)
+ except (ImportError, IndentationError, NameError, SyntaxError,
+ TypeError, AttributeError):
+ et, ev, tb = sys.exc_info()
+
+ if getattr(ev, 'filename', None) is None:
+ # get the filename from the last item in the stack
+ filename = traceback.extract_tb(tb)[-1][0]
+ else:
+ filename = ev.filename
+
+ if filename not in _error_files:
+ _error_files.append(filename)
+
+ raise
+
+ return wrapper
+
+def ensure_echo_on():
+ if termios:
+ fd = sys.stdin
+ if fd.isatty():
+ attr_list = termios.tcgetattr(fd)
+ if not attr_list[3] & termios.ECHO:
+ attr_list[3] |= termios.ECHO
+ if hasattr(signal, 'SIGTTOU'):
+ old_handler = signal.signal(signal.SIGTTOU, signal.SIG_IGN)
+ else:
+ old_handler = None
+ termios.tcsetattr(fd, termios.TCSANOW, attr_list)
+ if old_handler is not None:
+ signal.signal(signal.SIGTTOU, old_handler)
+
+def reloader_thread():
+ ensure_echo_on()
+ while RUN_RELOADER:
+ if code_changed():
+ sys.exit(3) # force reload
+ time.sleep(1)
+
+def restart_with_reloader():
+ while True:
+ args = [sys.executable] + ['-W%s' % o for o in sys.warnoptions] + sys.argv
+ if sys.platform == "win32":
+ args = ['"%s"' % arg for arg in args]
+ new_environ = os.environ.copy()
+ new_environ["RUN_MAIN"] = 'true'
+ exit_code = os.spawnve(os.P_WAIT, sys.executable, args, new_environ)
+ if exit_code != 3:
+ return exit_code
+
+def python_reloader(main_func, args, kwargs):
+ if os.environ.get("RUN_MAIN") == "true":
+ thread.start_new_thread(main_func, args, kwargs)
+ try:
+ reloader_thread()
+ except KeyboardInterrupt:
+ pass
+ else:
+ try:
+ exit_code = restart_with_reloader()
+ if exit_code < 0:
+ os.kill(os.getpid(), -exit_code)
+ else:
+ sys.exit(exit_code)
+ except KeyboardInterrupt:
+ pass
+
+def jython_reloader(main_func, args, kwargs):
+ from _systemrestart import SystemRestart
+ thread.start_new_thread(main_func, args)
+ while True:
+ if code_changed():
+ raise SystemRestart
+ time.sleep(1)
+
+
+def main(main_func, args=None, kwargs=None):
+ if args is None:
+ args = ()
+ if kwargs is None:
+ kwargs = {}
+ if sys.platform.startswith('java'):
+ reloader = jython_reloader
+ else:
+ reloader = python_reloader
+
+ wrapped_main_func = check_errors(main_func)
+ reloader(wrapped_main_func, args, kwargs)
+
diff --git a/lib/python2.7/site-packages/django/utils/baseconv.py b/lib/python2.7/site-packages/django/utils/baseconv.py
new file mode 100644
index 0000000..053ce3e
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/baseconv.py
@@ -0,0 +1,99 @@
+# Copyright (c) 2010 Guilherme Gondim. All rights reserved.
+# Copyright (c) 2009 Simon Willison. All rights reserved.
+# Copyright (c) 2002 Drew Perttula. All rights reserved.
+#
+# License:
+# Python Software Foundation License version 2
+#
+# See the file "LICENSE" for terms & conditions for usage, and a DISCLAIMER OF
+# ALL WARRANTIES.
+#
+# This Baseconv distribution contains no GNU General Public Licensed (GPLed)
+# code so it may be used in proprietary projects just like prior ``baseconv``
+# distributions.
+#
+# All trademarks referenced herein are property of their respective holders.
+#
+
+"""
+Convert numbers from base 10 integers to base X strings and back again.
+
+Sample usage::
+
+ >>> base20 = BaseConverter('0123456789abcdefghij')
+ >>> base20.encode(1234)
+ '31e'
+ >>> base20.decode('31e')
+ 1234
+ >>> base20.encode(-1234)
+ '-31e'
+ >>> base20.decode('-31e')
+ -1234
+ >>> base11 = BaseConverter('0123456789-', sign='$')
+ >>> base11.encode('$1234')
+ '$-22'
+ >>> base11.decode('$-22')
+ '$1234'
+
+"""
+
+BASE2_ALPHABET = '01'
+BASE16_ALPHABET = '0123456789ABCDEF'
+BASE56_ALPHABET = '23456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnpqrstuvwxyz'
+BASE36_ALPHABET = '0123456789abcdefghijklmnopqrstuvwxyz'
+BASE62_ALPHABET = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
+BASE64_ALPHABET = BASE62_ALPHABET + '-_'
+
+class BaseConverter(object):
+ decimal_digits = '0123456789'
+
+ def __init__(self, digits, sign='-'):
+ self.sign = sign
+ self.digits = digits
+ if sign in self.digits:
+ raise ValueError('Sign character found in converter base digits.')
+
+ def __repr__(self):
+ return "<BaseConverter: base%s (%s)>" % (len(self.digits), self.digits)
+
+ def encode(self, i):
+ neg, value = self.convert(i, self.decimal_digits, self.digits, '-')
+ if neg:
+ return self.sign + value
+ return value
+
+ def decode(self, s):
+ neg, value = self.convert(s, self.digits, self.decimal_digits, self.sign)
+ if neg:
+ value = '-' + value
+ return int(value)
+
+ def convert(self, number, from_digits, to_digits, sign):
+ if str(number)[0] == sign:
+ number = str(number)[1:]
+ neg = 1
+ else:
+ neg = 0
+
+ # make an integer out of the number
+ x = 0
+ for digit in str(number):
+ x = x * len(from_digits) + from_digits.index(digit)
+
+ # create the result in base 'len(to_digits)'
+ if x == 0:
+ res = to_digits[0]
+ else:
+ res = ''
+ while x > 0:
+ digit = x % len(to_digits)
+ res = to_digits[digit] + res
+ x = int(x // len(to_digits))
+ return neg, res
+
+base2 = BaseConverter(BASE2_ALPHABET)
+base16 = BaseConverter(BASE16_ALPHABET)
+base36 = BaseConverter(BASE36_ALPHABET)
+base56 = BaseConverter(BASE56_ALPHABET)
+base62 = BaseConverter(BASE62_ALPHABET)
+base64 = BaseConverter(BASE64_ALPHABET, sign='$')
diff --git a/lib/python2.7/site-packages/django/utils/cache.py b/lib/python2.7/site-packages/django/utils/cache.py
new file mode 100644
index 0000000..1c8587d
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/cache.py
@@ -0,0 +1,264 @@
+"""
+This module contains helper functions for controlling caching. It does so by
+managing the "Vary" header of responses. It includes functions to patch the
+header of response objects directly and decorators that change functions to do
+that header-patching themselves.
+
+For information on the Vary header, see:
+
+ http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.44
+
+Essentially, the "Vary" HTTP header defines which headers a cache should take
+into account when building its cache key. Requests with the same path but
+different header content for headers named in "Vary" need to get different
+cache keys to prevent delivery of wrong content.
+
+An example: i18n middleware would need to distinguish caches by the
+"Accept-language" header.
+"""
+from __future__ import unicode_literals
+
+import hashlib
+import re
+import time
+
+from django.conf import settings
+from django.core.cache import get_cache
+from django.utils.encoding import iri_to_uri, force_bytes, force_text
+from django.utils.http import http_date
+from django.utils.timezone import get_current_timezone_name
+from django.utils.translation import get_language
+
+cc_delim_re = re.compile(r'\s*,\s*')
+
+def patch_cache_control(response, **kwargs):
+ """
+ This function patches the Cache-Control header by adding all
+ keyword arguments to it. The transformation is as follows:
+
+ * All keyword parameter names are turned to lowercase, and underscores
+ are converted to hyphens.
+ * If the value of a parameter is True (exactly True, not just a
+ true value), only the parameter name is added to the header.
+ * All other parameters are added with their value, after applying
+ str() to it.
+ """
+ def dictitem(s):
+ t = s.split('=', 1)
+ if len(t) > 1:
+ return (t[0].lower(), t[1])
+ else:
+ return (t[0].lower(), True)
+
+ def dictvalue(t):
+ if t[1] is True:
+ return t[0]
+ else:
+ return '%s=%s' % (t[0], t[1])
+
+ if response.has_header('Cache-Control'):
+ cc = cc_delim_re.split(response['Cache-Control'])
+ cc = dict([dictitem(el) for el in cc])
+ else:
+ cc = {}
+
+ # If there's already a max-age header but we're being asked to set a new
+ # max-age, use the minimum of the two ages. In practice this happens when
+ # a decorator and a piece of middleware both operate on a given view.
+ if 'max-age' in cc and 'max_age' in kwargs:
+ kwargs['max_age'] = min(int(cc['max-age']), kwargs['max_age'])
+
+ # Allow overriding private caching and vice versa
+ if 'private' in cc and 'public' in kwargs:
+ del cc['private']
+ elif 'public' in cc and 'private' in kwargs:
+ del cc['public']
+
+ for (k, v) in kwargs.items():
+ cc[k.replace('_', '-')] = v
+ cc = ', '.join([dictvalue(el) for el in cc.items()])
+ response['Cache-Control'] = cc
+
+def get_max_age(response):
+ """
+ Returns the max-age from the response Cache-Control header as an integer
+ (or ``None`` if it wasn't found or wasn't an integer.
+ """
+ if not response.has_header('Cache-Control'):
+ return
+ cc = dict([_to_tuple(el) for el in
+ cc_delim_re.split(response['Cache-Control'])])
+ if 'max-age' in cc:
+ try:
+ return int(cc['max-age'])
+ except (ValueError, TypeError):
+ pass
+
+def _set_response_etag(response):
+ if not response.streaming:
+ response['ETag'] = '"%s"' % hashlib.md5(response.content).hexdigest()
+ return response
+
+def patch_response_headers(response, cache_timeout=None):
+ """
+ Adds some useful headers to the given HttpResponse object:
+ ETag, Last-Modified, Expires and Cache-Control
+
+ Each header is only added if it isn't already set.
+
+ cache_timeout is in seconds. The CACHE_MIDDLEWARE_SECONDS setting is used
+ by default.
+ """
+ if cache_timeout is None:
+ cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
+ if cache_timeout < 0:
+ cache_timeout = 0 # Can't have max-age negative
+ if settings.USE_ETAGS and not response.has_header('ETag'):
+ if hasattr(response, 'render') and callable(response.render):
+ response.add_post_render_callback(_set_response_etag)
+ else:
+ response = _set_response_etag(response)
+ if not response.has_header('Last-Modified'):
+ response['Last-Modified'] = http_date()
+ if not response.has_header('Expires'):
+ response['Expires'] = http_date(time.time() + cache_timeout)
+ patch_cache_control(response, max_age=cache_timeout)
+
+def add_never_cache_headers(response):
+ """
+ Adds headers to a response to indicate that a page should never be cached.
+ """
+ patch_response_headers(response, cache_timeout=-1)
+
+def patch_vary_headers(response, newheaders):
+ """
+ Adds (or updates) the "Vary" header in the given HttpResponse object.
+ newheaders is a list of header names that should be in "Vary". Existing
+ headers in "Vary" aren't removed.
+ """
+ # Note that we need to keep the original order intact, because cache
+ # implementations may rely on the order of the Vary contents in, say,
+ # computing an MD5 hash.
+ if response.has_header('Vary'):
+ vary_headers = cc_delim_re.split(response['Vary'])
+ else:
+ vary_headers = []
+ # Use .lower() here so we treat headers as case-insensitive.
+ existing_headers = set([header.lower() for header in vary_headers])
+ additional_headers = [newheader for newheader in newheaders
+ if newheader.lower() not in existing_headers]
+ response['Vary'] = ', '.join(vary_headers + additional_headers)
+
+def has_vary_header(response, header_query):
+ """
+ Checks to see if the response has a given header name in its Vary header.
+ """
+ if not response.has_header('Vary'):
+ return False
+ vary_headers = cc_delim_re.split(response['Vary'])
+ existing_headers = set([header.lower() for header in vary_headers])
+ return header_query.lower() in existing_headers
+
+def _i18n_cache_key_suffix(request, cache_key):
+ """If necessary, adds the current locale or time zone to the cache key."""
+ if settings.USE_I18N or settings.USE_L10N:
+ # first check if LocaleMiddleware or another middleware added
+ # LANGUAGE_CODE to request, then fall back to the active language
+ # which in turn can also fall back to settings.LANGUAGE_CODE
+ cache_key += '.%s' % getattr(request, 'LANGUAGE_CODE', get_language())
+ if settings.USE_TZ:
+ # The datetime module doesn't restrict the output of tzname().
+ # Windows is known to use non-standard, locale-dependent names.
+ # User-defined tzinfo classes may return absolutely anything.
+ # Hence this paranoid conversion to create a valid cache key.
+ tz_name = force_text(get_current_timezone_name(), errors='ignore')
+ cache_key += '.%s' % tz_name.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
+ return cache_key
+
+def _generate_cache_key(request, method, headerlist, key_prefix):
+ """Returns a cache key from the headers given in the header list."""
+ ctx = hashlib.md5()
+ for header in headerlist:
+ value = request.META.get(header, None)
+ if value is not None:
+ ctx.update(force_bytes(value))
+ path = hashlib.md5(force_bytes(iri_to_uri(request.get_full_path())))
+ cache_key = 'views.decorators.cache.cache_page.%s.%s.%s.%s' % (
+ key_prefix, method, path.hexdigest(), ctx.hexdigest())
+ return _i18n_cache_key_suffix(request, cache_key)
+
+def _generate_cache_header_key(key_prefix, request):
+ """Returns a cache key for the header cache."""
+ path = hashlib.md5(force_bytes(iri_to_uri(request.get_full_path())))
+ cache_key = 'views.decorators.cache.cache_header.%s.%s' % (
+ key_prefix, path.hexdigest())
+ return _i18n_cache_key_suffix(request, cache_key)
+
+def get_cache_key(request, key_prefix=None, method='GET', cache=None):
+ """
+ Returns a cache key based on the request path and query. It can be used
+ in the request phase because it pulls the list of headers to take into
+ account from the global path registry and uses those to build a cache key
+ to check against.
+
+ If there is no headerlist stored, the page needs to be rebuilt, so this
+ function returns None.
+ """
+ if key_prefix is None:
+ key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
+ cache_key = _generate_cache_header_key(key_prefix, request)
+ if cache is None:
+ cache = get_cache(settings.CACHE_MIDDLEWARE_ALIAS)
+ headerlist = cache.get(cache_key, None)
+ if headerlist is not None:
+ return _generate_cache_key(request, method, headerlist, key_prefix)
+ else:
+ return None
+
+def learn_cache_key(request, response, cache_timeout=None, key_prefix=None, cache=None):
+ """
+ Learns what headers to take into account for some request path from the
+ response object. It stores those headers in a global path registry so that
+ later access to that path will know what headers to take into account
+ without building the response object itself. The headers are named in the
+ Vary header of the response, but we want to prevent response generation.
+
+ The list of headers to use for cache key generation is stored in the same
+ cache as the pages themselves. If the cache ages some data out of the
+ cache, this just means that we have to build the response once to get at
+ the Vary header and so at the list of headers to use for the cache key.
+ """
+ if key_prefix is None:
+ key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
+ if cache_timeout is None:
+ cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
+ cache_key = _generate_cache_header_key(key_prefix, request)
+ if cache is None:
+ cache = get_cache(settings.CACHE_MIDDLEWARE_ALIAS)
+ if response.has_header('Vary'):
+ is_accept_language_redundant = settings.USE_I18N or settings.USE_L10N
+ # If i18n or l10n are used, the generated cache key will be suffixed
+ # with the current locale. Adding the raw value of Accept-Language is
+ # redundant in that case and would result in storing the same content
+ # under multiple keys in the cache. See #18191 for details.
+ headerlist = []
+ for header in cc_delim_re.split(response['Vary']):
+ header = header.upper().replace('-', '_')
+ if header == 'ACCEPT_LANGUAGE' and is_accept_language_redundant:
+ continue
+ headerlist.append('HTTP_' + header)
+ headerlist.sort()
+ cache.set(cache_key, headerlist, cache_timeout)
+ return _generate_cache_key(request, request.method, headerlist, key_prefix)
+ else:
+ # if there is no Vary header, we still need a cache key
+ # for the request.get_full_path()
+ cache.set(cache_key, [], cache_timeout)
+ return _generate_cache_key(request, request.method, [], key_prefix)
+
+
+def _to_tuple(s):
+ t = s.split('=',1)
+ if len(t) == 2:
+ return t[0].lower(), t[1]
+ return t[0].lower(), True
diff --git a/lib/python2.7/site-packages/django/utils/checksums.py b/lib/python2.7/site-packages/django/utils/checksums.py
new file mode 100644
index 0000000..8617e22
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/checksums.py
@@ -0,0 +1,24 @@
+"""
+Common checksum routines.
+"""
+
+__all__ = ['luhn',]
+
+from django.utils import six
+
+LUHN_ODD_LOOKUP = (0, 2, 4, 6, 8, 1, 3, 5, 7, 9) # sum_of_digits(index * 2)
+
+def luhn(candidate):
+ """
+ Checks a candidate number for validity according to the Luhn
+ algorithm (used in validation of, for example, credit cards).
+ Both numeric and string candidates are accepted.
+ """
+ if not isinstance(candidate, six.string_types):
+ candidate = str(candidate)
+ try:
+ evens = sum([int(c) for c in candidate[-1::-2]])
+ odds = sum([LUHN_ODD_LOOKUP[int(c)] for c in candidate[-2::-2]])
+ return ((evens + odds) % 10 == 0)
+ except ValueError: # Raised if an int conversion fails
+ return False
diff --git a/lib/python2.7/site-packages/django/utils/crypto.py b/lib/python2.7/site-packages/django/utils/crypto.py
new file mode 100644
index 0000000..6463709
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/crypto.py
@@ -0,0 +1,166 @@
+"""
+Django's standard crypto functions and utilities.
+"""
+from __future__ import unicode_literals
+
+import hmac
+import struct
+import hashlib
+import binascii
+import operator
+import time
+from functools import reduce
+
+# Use the system PRNG if possible
+import random
+try:
+ random = random.SystemRandom()
+ using_sysrandom = True
+except NotImplementedError:
+ import warnings
+ warnings.warn('A secure pseudo-random number generator is not available '
+ 'on your system. Falling back to Mersenne Twister.')
+ using_sysrandom = False
+
+from django.conf import settings
+from django.utils.encoding import force_bytes
+from django.utils import six
+from django.utils.six.moves import xrange
+
+
+def salted_hmac(key_salt, value, secret=None):
+ """
+ Returns the HMAC-SHA1 of 'value', using a key generated from key_salt and a
+ secret (which defaults to settings.SECRET_KEY).
+
+ A different key_salt should be passed in for every application of HMAC.
+ """
+ if secret is None:
+ secret = settings.SECRET_KEY
+
+ # We need to generate a derived key from our base key. We can do this by
+ # passing the key_salt and our base key through a pseudo-random function and
+ # SHA1 works nicely.
+ key = hashlib.sha1((key_salt + secret).encode('utf-8')).digest()
+
+ # If len(key_salt + secret) > sha_constructor().block_size, the above
+ # line is redundant and could be replaced by key = key_salt + secret, since
+ # the hmac module does the same thing for keys longer than the block size.
+ # However, we need to ensure that we *always* do this.
+ return hmac.new(key, msg=force_bytes(value), digestmod=hashlib.sha1)
+
+
+def get_random_string(length=12,
+ allowed_chars='abcdefghijklmnopqrstuvwxyz'
+ 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'):
+ """
+ Returns a securely generated random string.
+
+ The default length of 12 with the a-z, A-Z, 0-9 character set returns
+ a 71-bit value. log_2((26+26+10)^12) =~ 71 bits
+ """
+ if not using_sysrandom:
+ # This is ugly, and a hack, but it makes things better than
+ # the alternative of predictability. This re-seeds the PRNG
+ # using a value that is hard for an attacker to predict, every
+ # time a random string is required. This may change the
+ # properties of the chosen random sequence slightly, but this
+ # is better than absolute predictability.
+ random.seed(
+ hashlib.sha256(
+ ("%s%s%s" % (
+ random.getstate(),
+ time.time(),
+ settings.SECRET_KEY)).encode('utf-8')
+ ).digest())
+ return ''.join([random.choice(allowed_chars) for i in range(length)])
+
+
+def constant_time_compare(val1, val2):
+ """
+ Returns True if the two strings are equal, False otherwise.
+
+ The time taken is independent of the number of characters that match.
+
+ For the sake of simplicity, this function executes in constant time only
+ when the two strings have the same length. It short-circuits when they
+ have different lengths. Since Django only uses it to compare hashes of
+ known expected length, this is acceptable.
+ """
+ if len(val1) != len(val2):
+ return False
+ result = 0
+ if six.PY3 and isinstance(val1, bytes) and isinstance(val2, bytes):
+ for x, y in zip(val1, val2):
+ result |= x ^ y
+ else:
+ for x, y in zip(val1, val2):
+ result |= ord(x) ^ ord(y)
+ return result == 0
+
+
+def _bin_to_long(x):
+ """
+ Convert a binary string into a long integer
+
+ This is a clever optimization for fast xor vector math
+ """
+ return int(binascii.hexlify(x), 16)
+
+
+def _long_to_bin(x, hex_format_string):
+ """
+ Convert a long integer into a binary string.
+ hex_format_string is like "%020x" for padding 10 characters.
+ """
+ return binascii.unhexlify((hex_format_string % x).encode('ascii'))
+
+
+def pbkdf2(password, salt, iterations, dklen=0, digest=None):
+ """
+ Implements PBKDF2 as defined in RFC 2898, section 5.2
+
+ HMAC+SHA256 is used as the default pseudo random function.
+
+ As of 2011, 10,000 iterations was the recommended default which
+ took 100ms on a 2.2Ghz Core 2 Duo. This is probably the bare
+ minimum for security given 1000 iterations was recommended in
+ 2001. This code is very well optimized for CPython and is only
+ four times slower than openssl's implementation. Look in
+ django.contrib.auth.hashers for the present default.
+ """
+ assert iterations > 0
+ if not digest:
+ digest = hashlib.sha256
+ password = force_bytes(password)
+ salt = force_bytes(salt)
+ hlen = digest().digest_size
+ if not dklen:
+ dklen = hlen
+ if dklen > (2 ** 32 - 1) * hlen:
+ raise OverflowError('dklen too big')
+ l = -(-dklen // hlen)
+ r = dklen - (l - 1) * hlen
+
+ hex_format_string = "%%0%ix" % (hlen * 2)
+
+ inner, outer = digest(), digest()
+ if len(password) > inner.block_size:
+ password = digest(password).digest()
+ password += b'\x00' * (inner.block_size - len(password))
+ inner.update(password.translate(hmac.trans_36))
+ outer.update(password.translate(hmac.trans_5C))
+
+ def F(i):
+ def U():
+ u = salt + struct.pack(b'>I', i)
+ for j in xrange(int(iterations)):
+ dig1, dig2 = inner.copy(), outer.copy()
+ dig1.update(u)
+ dig2.update(dig1.digest())
+ u = dig2.digest()
+ yield _bin_to_long(u)
+ return _long_to_bin(reduce(operator.xor, U()), hex_format_string)
+
+ T = [F(x) for x in range(1, l + 1)]
+ return b''.join(T[:-1]) + T[-1][:r]
diff --git a/lib/python2.7/site-packages/django/utils/daemonize.py b/lib/python2.7/site-packages/django/utils/daemonize.py
new file mode 100644
index 0000000..763a9db
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/daemonize.py
@@ -0,0 +1,58 @@
+import os
+import sys
+
+if os.name == 'posix':
+ def become_daemon(our_home_dir='.', out_log='/dev/null',
+ err_log='/dev/null', umask=0o022):
+ "Robustly turn into a UNIX daemon, running in our_home_dir."
+ # First fork
+ try:
+ if os.fork() > 0:
+ sys.exit(0) # kill off parent
+ except OSError as e:
+ sys.stderr.write("fork #1 failed: (%d) %s\n" % (e.errno, e.strerror))
+ sys.exit(1)
+ os.setsid()
+ os.chdir(our_home_dir)
+ os.umask(umask)
+
+ # Second fork
+ try:
+ if os.fork() > 0:
+ os._exit(0)
+ except OSError as e:
+ sys.stderr.write("fork #2 failed: (%d) %s\n" % (e.errno, e.strerror))
+ os._exit(1)
+
+ si = open('/dev/null', 'r')
+ so = open(out_log, 'a+', 0)
+ se = open(err_log, 'a+', 0)
+ os.dup2(si.fileno(), sys.stdin.fileno())
+ os.dup2(so.fileno(), sys.stdout.fileno())
+ os.dup2(se.fileno(), sys.stderr.fileno())
+ # Set custom file descriptors so that they get proper buffering.
+ sys.stdout, sys.stderr = so, se
+else:
+ def become_daemon(our_home_dir='.', out_log=None, err_log=None, umask=0o022):
+ """
+ If we're not running under a POSIX system, just simulate the daemon
+ mode by doing redirections and directory changing.
+ """
+ os.chdir(our_home_dir)
+ os.umask(umask)
+ sys.stdin.close()
+ sys.stdout.close()
+ sys.stderr.close()
+ if err_log:
+ sys.stderr = open(err_log, 'a', 0)
+ else:
+ sys.stderr = NullDevice()
+ if out_log:
+ sys.stdout = open(out_log, 'a', 0)
+ else:
+ sys.stdout = NullDevice()
+
+ class NullDevice:
+ "A writeable object that writes to nowhere -- like /dev/null."
+ def write(self, s):
+ pass
diff --git a/lib/python2.7/site-packages/django/utils/datastructures.py b/lib/python2.7/site-packages/django/utils/datastructures.py
new file mode 100644
index 0000000..f854933
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/datastructures.py
@@ -0,0 +1,524 @@
+import copy
+import warnings
+from django.utils import six
+
+
+class MergeDict(object):
+ """
+ A simple class for creating new "virtual" dictionaries that actually look
+ up values in more than one dictionary, passed in the constructor.
+
+ If a key appears in more than one of the given dictionaries, only the
+ first occurrence will be used.
+ """
+ def __init__(self, *dicts):
+ self.dicts = dicts
+
+ def __bool__(self):
+ return any(self.dicts)
+
+ def __nonzero__(self):
+ return type(self).__bool__(self)
+
+ def __getitem__(self, key):
+ for dict_ in self.dicts:
+ try:
+ return dict_[key]
+ except KeyError:
+ pass
+ raise KeyError(key)
+
+ def __copy__(self):
+ return self.__class__(*self.dicts)
+
+ def get(self, key, default=None):
+ try:
+ return self[key]
+ except KeyError:
+ return default
+
+ # This is used by MergeDicts of MultiValueDicts.
+ def getlist(self, key):
+ for dict_ in self.dicts:
+ if key in dict_:
+ return dict_.getlist(key)
+ return []
+
+ def _iteritems(self):
+ seen = set()
+ for dict_ in self.dicts:
+ for item in six.iteritems(dict_):
+ k = item[0]
+ if k in seen:
+ continue
+ seen.add(k)
+ yield item
+
+ def _iterkeys(self):
+ for k, v in self._iteritems():
+ yield k
+
+ def _itervalues(self):
+ for k, v in self._iteritems():
+ yield v
+
+ if six.PY3:
+ items = _iteritems
+ keys = _iterkeys
+ values = _itervalues
+ else:
+ iteritems = _iteritems
+ iterkeys = _iterkeys
+ itervalues = _itervalues
+
+ def items(self):
+ return list(self.iteritems())
+
+ def keys(self):
+ return list(self.iterkeys())
+
+ def values(self):
+ return list(self.itervalues())
+
+ def has_key(self, key):
+ for dict_ in self.dicts:
+ if key in dict_:
+ return True
+ return False
+
+ __contains__ = has_key
+
+ __iter__ = _iterkeys
+
+ def copy(self):
+ """Returns a copy of this object."""
+ return self.__copy__()
+
+ def __str__(self):
+ '''
+ Returns something like
+
+ "{'key1': 'val1', 'key2': 'val2', 'key3': 'val3'}"
+
+ instead of the generic "<object meta-data>" inherited from object.
+ '''
+ return str(dict(self.items()))
+
+ def __repr__(self):
+ '''
+ Returns something like
+
+ MergeDict({'key1': 'val1', 'key2': 'val2'}, {'key3': 'val3'})
+
+ instead of generic "<object meta-data>" inherited from object.
+ '''
+ dictreprs = ', '.join(repr(d) for d in self.dicts)
+ return '%s(%s)' % (self.__class__.__name__, dictreprs)
+
+class SortedDict(dict):
+ """
+ A dictionary that keeps its keys in the order in which they're inserted.
+ """
+ def __new__(cls, *args, **kwargs):
+ instance = super(SortedDict, cls).__new__(cls, *args, **kwargs)
+ instance.keyOrder = []
+ return instance
+
+ def __init__(self, data=None):
+ if data is None or isinstance(data, dict):
+ data = data or []
+ super(SortedDict, self).__init__(data)
+ self.keyOrder = list(data) if data else []
+ else:
+ super(SortedDict, self).__init__()
+ super_set = super(SortedDict, self).__setitem__
+ for key, value in data:
+ # Take the ordering from first key
+ if key not in self:
+ self.keyOrder.append(key)
+ # But override with last value in data (dict() does this)
+ super_set(key, value)
+
+ def __deepcopy__(self, memo):
+ return self.__class__([(key, copy.deepcopy(value, memo))
+ for key, value in self.items()])
+
+ def __copy__(self):
+ # The Python's default copy implementation will alter the state
+ # of self. The reason for this seems complex but is likely related to
+ # subclassing dict.
+ return self.copy()
+
+ def __setitem__(self, key, value):
+ if key not in self:
+ self.keyOrder.append(key)
+ super(SortedDict, self).__setitem__(key, value)
+
+ def __delitem__(self, key):
+ super(SortedDict, self).__delitem__(key)
+ self.keyOrder.remove(key)
+
+ def __iter__(self):
+ return iter(self.keyOrder)
+
+ def __reversed__(self):
+ return reversed(self.keyOrder)
+
+ def pop(self, k, *args):
+ result = super(SortedDict, self).pop(k, *args)
+ try:
+ self.keyOrder.remove(k)
+ except ValueError:
+ # Key wasn't in the dictionary in the first place. No problem.
+ pass
+ return result
+
+ def popitem(self):
+ result = super(SortedDict, self).popitem()
+ self.keyOrder.remove(result[0])
+ return result
+
+ def _iteritems(self):
+ for key in self.keyOrder:
+ yield key, self[key]
+
+ def _iterkeys(self):
+ for key in self.keyOrder:
+ yield key
+
+ def _itervalues(self):
+ for key in self.keyOrder:
+ yield self[key]
+
+ if six.PY3:
+ items = _iteritems
+ keys = _iterkeys
+ values = _itervalues
+ else:
+ iteritems = _iteritems
+ iterkeys = _iterkeys
+ itervalues = _itervalues
+
+ def items(self):
+ return [(k, self[k]) for k in self.keyOrder]
+
+ def keys(self):
+ return self.keyOrder[:]
+
+ def values(self):
+ return [self[k] for k in self.keyOrder]
+
+ def update(self, dict_):
+ for k, v in six.iteritems(dict_):
+ self[k] = v
+
+ def setdefault(self, key, default):
+ if key not in self:
+ self.keyOrder.append(key)
+ return super(SortedDict, self).setdefault(key, default)
+
+ def value_for_index(self, index):
+ """Returns the value of the item at the given zero-based index."""
+ # This, and insert() are deprecated because they cannot be implemented
+ # using collections.OrderedDict (Python 2.7 and up), which we'll
+ # eventually switch to
+ warnings.warn(
+ "SortedDict.value_for_index is deprecated", DeprecationWarning,
+ stacklevel=2
+ )
+ return self[self.keyOrder[index]]
+
+ def insert(self, index, key, value):
+ """Inserts the key, value pair before the item with the given index."""
+ warnings.warn(
+ "SortedDict.insert is deprecated", DeprecationWarning,
+ stacklevel=2
+ )
+ if key in self.keyOrder:
+ n = self.keyOrder.index(key)
+ del self.keyOrder[n]
+ if n < index:
+ index -= 1
+ self.keyOrder.insert(index, key)
+ super(SortedDict, self).__setitem__(key, value)
+
+ def copy(self):
+ """Returns a copy of this object."""
+ # This way of initializing the copy means it works for subclasses, too.
+ return self.__class__(self)
+
+ def __repr__(self):
+ """
+ Replaces the normal dict.__repr__ with a version that returns the keys
+ in their sorted order.
+ """
+ return '{%s}' % ', '.join(['%r: %r' % (k, v) for k, v in six.iteritems(self)])
+
+ def clear(self):
+ super(SortedDict, self).clear()
+ self.keyOrder = []
+
+class MultiValueDictKeyError(KeyError):
+ pass
+
+class MultiValueDict(dict):
+ """
+ A subclass of dictionary customized to handle multiple values for the
+ same key.
+
+ >>> d = MultiValueDict({'name': ['Adrian', 'Simon'], 'position': ['Developer']})
+ >>> d['name']
+ 'Simon'
+ >>> d.getlist('name')
+ ['Adrian', 'Simon']
+ >>> d.getlist('doesnotexist')
+ []
+ >>> d.getlist('doesnotexist', ['Adrian', 'Simon'])
+ ['Adrian', 'Simon']
+ >>> d.get('lastname', 'nonexistent')
+ 'nonexistent'
+ >>> d.setlist('lastname', ['Holovaty', 'Willison'])
+
+ This class exists to solve the irritating problem raised by cgi.parse_qs,
+ which returns a list for every key, even though most Web forms submit
+ single name-value pairs.
+ """
+ def __init__(self, key_to_list_mapping=()):
+ super(MultiValueDict, self).__init__(key_to_list_mapping)
+
+ def __repr__(self):
+ return "<%s: %s>" % (self.__class__.__name__,
+ super(MultiValueDict, self).__repr__())
+
+ def __getitem__(self, key):
+ """
+ Returns the last data value for this key, or [] if it's an empty list;
+ raises KeyError if not found.
+ """
+ try:
+ list_ = super(MultiValueDict, self).__getitem__(key)
+ except KeyError:
+ raise MultiValueDictKeyError(repr(key))
+ try:
+ return list_[-1]
+ except IndexError:
+ return []
+
+ def __setitem__(self, key, value):
+ super(MultiValueDict, self).__setitem__(key, [value])
+
+ def __copy__(self):
+ return self.__class__([
+ (k, v[:])
+ for k, v in self.lists()
+ ])
+
+ def __deepcopy__(self, memo=None):
+ if memo is None:
+ memo = {}
+ result = self.__class__()
+ memo[id(self)] = result
+ for key, value in dict.items(self):
+ dict.__setitem__(result, copy.deepcopy(key, memo),
+ copy.deepcopy(value, memo))
+ return result
+
+ def __getstate__(self):
+ obj_dict = self.__dict__.copy()
+ obj_dict['_data'] = dict([(k, self.getlist(k)) for k in self])
+ return obj_dict
+
+ def __setstate__(self, obj_dict):
+ data = obj_dict.pop('_data', {})
+ for k, v in data.items():
+ self.setlist(k, v)
+ self.__dict__.update(obj_dict)
+
+ def get(self, key, default=None):
+ """
+ Returns the last data value for the passed key. If key doesn't exist
+ or value is an empty list, then default is returned.
+ """
+ try:
+ val = self[key]
+ except KeyError:
+ return default
+ if val == []:
+ return default
+ return val
+
+ def getlist(self, key, default=None):
+ """
+ Returns the list of values for the passed key. If key doesn't exist,
+ then a default value is returned.
+ """
+ try:
+ return super(MultiValueDict, self).__getitem__(key)
+ except KeyError:
+ if default is None:
+ return []
+ return default
+
+ def setlist(self, key, list_):
+ super(MultiValueDict, self).__setitem__(key, list_)
+
+ def setdefault(self, key, default=None):
+ if key not in self:
+ self[key] = default
+ # Do not return default here because __setitem__() may store
+ # another value -- QueryDict.__setitem__() does. Look it up.
+ return self[key]
+
+ def setlistdefault(self, key, default_list=None):
+ if key not in self:
+ if default_list is None:
+ default_list = []
+ self.setlist(key, default_list)
+ # Do not return default_list here because setlist() may store
+ # another value -- QueryDict.setlist() does. Look it up.
+ return self.getlist(key)
+
+ def appendlist(self, key, value):
+ """Appends an item to the internal list associated with key."""
+ self.setlistdefault(key).append(value)
+
+ def _iteritems(self):
+ """
+ Yields (key, value) pairs, where value is the last item in the list
+ associated with the key.
+ """
+ for key in self:
+ yield key, self[key]
+
+ def _iterlists(self):
+ """Yields (key, list) pairs."""
+ return six.iteritems(super(MultiValueDict, self))
+
+ def _itervalues(self):
+ """Yield the last value on every key list."""
+ for key in self:
+ yield self[key]
+
+ if six.PY3:
+ items = _iteritems
+ lists = _iterlists
+ values = _itervalues
+ else:
+ iteritems = _iteritems
+ iterlists = _iterlists
+ itervalues = _itervalues
+
+ def items(self):
+ return list(self.iteritems())
+
+ def lists(self):
+ return list(self.iterlists())
+
+ def values(self):
+ return list(self.itervalues())
+
+ def copy(self):
+ """Returns a shallow copy of this object."""
+ return copy.copy(self)
+
+ def update(self, *args, **kwargs):
+ """
+ update() extends rather than replaces existing key lists.
+ Also accepts keyword args.
+ """
+ if len(args) > 1:
+ raise TypeError("update expected at most 1 arguments, got %d" % len(args))
+ if args:
+ other_dict = args[0]
+ if isinstance(other_dict, MultiValueDict):
+ for key, value_list in other_dict.lists():
+ self.setlistdefault(key).extend(value_list)
+ else:
+ try:
+ for key, value in other_dict.items():
+ self.setlistdefault(key).append(value)
+ except TypeError:
+ raise ValueError("MultiValueDict.update() takes either a MultiValueDict or dictionary")
+ for key, value in six.iteritems(kwargs):
+ self.setlistdefault(key).append(value)
+
+ def dict(self):
+ """
+ Returns current object as a dict with singular values.
+ """
+ return dict((key, self[key]) for key in self)
+
+
+class ImmutableList(tuple):
+ """
+ A tuple-like object that raises useful errors when it is asked to mutate.
+
+ Example::
+
+ >>> a = ImmutableList(range(5), warning="You cannot mutate this.")
+ >>> a[3] = '4'
+ Traceback (most recent call last):
+ ...
+ AttributeError: You cannot mutate this.
+ """
+
+ def __new__(cls, *args, **kwargs):
+ if 'warning' in kwargs:
+ warning = kwargs['warning']
+ del kwargs['warning']
+ else:
+ warning = 'ImmutableList object is immutable.'
+ self = tuple.__new__(cls, *args, **kwargs)
+ self.warning = warning
+ return self
+
+ def complain(self, *wargs, **kwargs):
+ if isinstance(self.warning, Exception):
+ raise self.warning
+ else:
+ raise AttributeError(self.warning)
+
+ # All list mutation functions complain.
+ __delitem__ = complain
+ __delslice__ = complain
+ __iadd__ = complain
+ __imul__ = complain
+ __setitem__ = complain
+ __setslice__ = complain
+ append = complain
+ extend = complain
+ insert = complain
+ pop = complain
+ remove = complain
+ sort = complain
+ reverse = complain
+
+class DictWrapper(dict):
+ """
+ Wraps accesses to a dictionary so that certain values (those starting with
+ the specified prefix) are passed through a function before being returned.
+ The prefix is removed before looking up the real value.
+
+ Used by the SQL construction code to ensure that values are correctly
+ quoted before being used.
+ """
+ def __init__(self, data, func, prefix):
+ super(DictWrapper, self).__init__(data)
+ self.func = func
+ self.prefix = prefix
+
+ def __getitem__(self, key):
+ """
+ Retrieves the real value after stripping the prefix string (if
+ present). If the prefix is present, pass the value through self.func
+ before returning, otherwise return the raw value.
+ """
+ if key.startswith(self.prefix):
+ use_func = True
+ key = key[len(self.prefix):]
+ else:
+ use_func = False
+ value = super(DictWrapper, self).__getitem__(key)
+ if use_func:
+ return self.func(value)
+ return value
diff --git a/lib/python2.7/site-packages/django/utils/dateformat.py b/lib/python2.7/site-packages/django/utils/dateformat.py
new file mode 100644
index 0000000..6d0a7b6
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/dateformat.py
@@ -0,0 +1,317 @@
+"""
+PHP date() style date formatting
+See http://www.php.net/date for format strings
+
+Usage:
+>>> import datetime
+>>> d = datetime.datetime.now()
+>>> df = DateFormat(d)
+>>> print(df.format('jS F Y H:i'))
+7th October 2003 11:39
+>>>
+"""
+from __future__ import unicode_literals
+
+import re
+import time
+import calendar
+import datetime
+
+from django.utils.dates import MONTHS, MONTHS_3, MONTHS_ALT, MONTHS_AP, WEEKDAYS, WEEKDAYS_ABBR
+from django.utils.tzinfo import LocalTimezone
+from django.utils.translation import ugettext as _
+from django.utils.encoding import force_text
+from django.utils import six
+from django.utils.timezone import is_aware, is_naive
+
+re_formatchars = re.compile(r'(?<!\\)([aAbBcdDeEfFgGhHiIjlLmMnNoOPrsStTUuwWyYzZ])')
+re_escaped = re.compile(r'\\(.)')
+
+class Formatter(object):
+ def format(self, formatstr):
+ pieces = []
+ for i, piece in enumerate(re_formatchars.split(force_text(formatstr))):
+ if i % 2:
+ pieces.append(force_text(getattr(self, piece)()))
+ elif piece:
+ pieces.append(re_escaped.sub(r'\1', piece))
+ return ''.join(pieces)
+
+class TimeFormat(Formatter):
+ def __init__(self, t):
+ self.data = t
+
+ def a(self):
+ "'a.m.' or 'p.m.'"
+ if self.data.hour > 11:
+ return _('p.m.')
+ return _('a.m.')
+
+ def A(self):
+ "'AM' or 'PM'"
+ if self.data.hour > 11:
+ return _('PM')
+ return _('AM')
+
+ def B(self):
+ "Swatch Internet time"
+ raise NotImplementedError
+
+ def f(self):
+ """
+ Time, in 12-hour hours and minutes, with minutes left off if they're
+ zero.
+ Examples: '1', '1:30', '2:05', '2'
+ Proprietary extension.
+ """
+ if self.data.minute == 0:
+ return self.g()
+ return '%s:%s' % (self.g(), self.i())
+
+ def g(self):
+ "Hour, 12-hour format without leading zeros; i.e. '1' to '12'"
+ if self.data.hour == 0:
+ return 12
+ if self.data.hour > 12:
+ return self.data.hour - 12
+ return self.data.hour
+
+ def G(self):
+ "Hour, 24-hour format without leading zeros; i.e. '0' to '23'"
+ return self.data.hour
+
+ def h(self):
+ "Hour, 12-hour format; i.e. '01' to '12'"
+ return '%02d' % self.g()
+
+ def H(self):
+ "Hour, 24-hour format; i.e. '00' to '23'"
+ return '%02d' % self.G()
+
+ def i(self):
+ "Minutes; i.e. '00' to '59'"
+ return '%02d' % self.data.minute
+
+ def P(self):
+ """
+ Time, in 12-hour hours, minutes and 'a.m.'/'p.m.', with minutes left off
+ if they're zero and the strings 'midnight' and 'noon' if appropriate.
+ Examples: '1 a.m.', '1:30 p.m.', 'midnight', 'noon', '12:30 p.m.'
+ Proprietary extension.
+ """
+ if self.data.minute == 0 and self.data.hour == 0:
+ return _('midnight')
+ if self.data.minute == 0 and self.data.hour == 12:
+ return _('noon')
+ return '%s %s' % (self.f(), self.a())
+
+ def s(self):
+ "Seconds; i.e. '00' to '59'"
+ return '%02d' % self.data.second
+
+ def u(self):
+ "Microseconds; i.e. '000000' to '999999'"
+ return '%06d' %self.data.microsecond
+
+
+class DateFormat(TimeFormat):
+ year_days = [None, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334]
+
+ def __init__(self, dt):
+ # Accepts either a datetime or date object.
+ self.data = dt
+ self.timezone = None
+ if isinstance(dt, datetime.datetime):
+ if is_naive(dt):
+ self.timezone = LocalTimezone(dt)
+ else:
+ self.timezone = dt.tzinfo
+
+ def b(self):
+ "Month, textual, 3 letters, lowercase; e.g. 'jan'"
+ return MONTHS_3[self.data.month]
+
+ def c(self):
+ """
+ ISO 8601 Format
+ Example : '2008-01-02T10:30:00.000123'
+ """
+ return self.data.isoformat()
+
+ def d(self):
+ "Day of the month, 2 digits with leading zeros; i.e. '01' to '31'"
+ return '%02d' % self.data.day
+
+ def D(self):
+ "Day of the week, textual, 3 letters; e.g. 'Fri'"
+ return WEEKDAYS_ABBR[self.data.weekday()]
+
+ def e(self):
+ "Timezone name if available"
+ try:
+ if hasattr(self.data, 'tzinfo') and self.data.tzinfo:
+ # Have to use tzinfo.tzname and not datetime.tzname
+ # because datatime.tzname does not expect Unicode
+ return self.data.tzinfo.tzname(self.data) or ""
+ except NotImplementedError:
+ pass
+ return ""
+
+ def E(self):
+ "Alternative month names as required by some locales. Proprietary extension."
+ return MONTHS_ALT[self.data.month]
+
+ def F(self):
+ "Month, textual, long; e.g. 'January'"
+ return MONTHS[self.data.month]
+
+ def I(self):
+ "'1' if Daylight Savings Time, '0' otherwise."
+ if self.timezone and self.timezone.dst(self.data):
+ return '1'
+ else:
+ return '0'
+
+ def j(self):
+ "Day of the month without leading zeros; i.e. '1' to '31'"
+ return self.data.day
+
+ def l(self):
+ "Day of the week, textual, long; e.g. 'Friday'"
+ return WEEKDAYS[self.data.weekday()]
+
+ def L(self):
+ "Boolean for whether it is a leap year; i.e. True or False"
+ return calendar.isleap(self.data.year)
+
+ def m(self):
+ "Month; i.e. '01' to '12'"
+ return '%02d' % self.data.month
+
+ def M(self):
+ "Month, textual, 3 letters; e.g. 'Jan'"
+ return MONTHS_3[self.data.month].title()
+
+ def n(self):
+ "Month without leading zeros; i.e. '1' to '12'"
+ return self.data.month
+
+ def N(self):
+ "Month abbreviation in Associated Press style. Proprietary extension."
+ return MONTHS_AP[self.data.month]
+
+ def o(self):
+ "ISO 8601 year number matching the ISO week number (W)"
+ return self.data.isocalendar()[0]
+
+ def O(self):
+ "Difference to Greenwich time in hours; e.g. '+0200', '-0430'"
+ seconds = self.Z()
+ sign = '-' if seconds < 0 else '+'
+ seconds = abs(seconds)
+ return "%s%02d%02d" % (sign, seconds // 3600, (seconds // 60) % 60)
+
+ def r(self):
+ "RFC 2822 formatted date; e.g. 'Thu, 21 Dec 2000 16:01:07 +0200'"
+ return self.format('D, j M Y H:i:s O')
+
+ def S(self):
+ "English ordinal suffix for the day of the month, 2 characters; i.e. 'st', 'nd', 'rd' or 'th'"
+ if self.data.day in (11, 12, 13): # Special case
+ return 'th'
+ last = self.data.day % 10
+ if last == 1:
+ return 'st'
+ if last == 2:
+ return 'nd'
+ if last == 3:
+ return 'rd'
+ return 'th'
+
+ def t(self):
+ "Number of days in the given month; i.e. '28' to '31'"
+ return '%02d' % calendar.monthrange(self.data.year, self.data.month)[1]
+
+ def T(self):
+ "Time zone of this machine; e.g. 'EST' or 'MDT'"
+ name = self.timezone.tzname(self.data) if self.timezone else None
+ if name is None:
+ name = self.format('O')
+ return six.text_type(name)
+
+ def U(self):
+ "Seconds since the Unix epoch (January 1 1970 00:00:00 GMT)"
+ if isinstance(self.data, datetime.datetime) and is_aware(self.data):
+ return int(calendar.timegm(self.data.utctimetuple()))
+ else:
+ return int(time.mktime(self.data.timetuple()))
+
+ def w(self):
+ "Day of the week, numeric, i.e. '0' (Sunday) to '6' (Saturday)"
+ return (self.data.weekday() + 1) % 7
+
+ def W(self):
+ "ISO-8601 week number of year, weeks starting on Monday"
+ # Algorithm from http://www.personal.ecu.edu/mccartyr/ISOwdALG.txt
+ week_number = None
+ jan1_weekday = self.data.replace(month=1, day=1).weekday() + 1
+ weekday = self.data.weekday() + 1
+ day_of_year = self.z()
+ if day_of_year <= (8 - jan1_weekday) and jan1_weekday > 4:
+ if jan1_weekday == 5 or (jan1_weekday == 6 and calendar.isleap(self.data.year-1)):
+ week_number = 53
+ else:
+ week_number = 52
+ else:
+ if calendar.isleap(self.data.year):
+ i = 366
+ else:
+ i = 365
+ if (i - day_of_year) < (4 - weekday):
+ week_number = 1
+ else:
+ j = day_of_year + (7 - weekday) + (jan1_weekday - 1)
+ week_number = j // 7
+ if jan1_weekday > 4:
+ week_number -= 1
+ return week_number
+
+ def y(self):
+ "Year, 2 digits; e.g. '99'"
+ return six.text_type(self.data.year)[2:]
+
+ def Y(self):
+ "Year, 4 digits; e.g. '1999'"
+ return self.data.year
+
+ def z(self):
+ "Day of the year; i.e. '0' to '365'"
+ doy = self.year_days[self.data.month] + self.data.day
+ if self.L() and self.data.month > 2:
+ doy += 1
+ return doy
+
+ def Z(self):
+ """
+ Time zone offset in seconds (i.e. '-43200' to '43200'). The offset for
+ timezones west of UTC is always negative, and for those east of UTC is
+ always positive.
+ """
+ if not self.timezone:
+ return 0
+ offset = self.timezone.utcoffset(self.data)
+ # `offset` is a datetime.timedelta. For negative values (to the west of
+ # UTC) only days can be negative (days=-1) and seconds are always
+ # positive. e.g. UTC-1 -> timedelta(days=-1, seconds=82800, microseconds=0)
+ # Positive offsets have days=0
+ return offset.days * 86400 + offset.seconds
+
+def format(value, format_string):
+ "Convenience function"
+ df = DateFormat(value)
+ return df.format(format_string)
+
+def time_format(value, format_string):
+ "Convenience function"
+ tf = TimeFormat(value)
+ return tf.format(format_string)
diff --git a/lib/python2.7/site-packages/django/utils/dateparse.py b/lib/python2.7/site-packages/django/utils/dateparse.py
new file mode 100644
index 0000000..b4bd559
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/dateparse.py
@@ -0,0 +1,82 @@
+"""Functions to parse datetime objects."""
+
+# We're using regular expressions rather than time.strptime because:
+# - They provide both validation and parsing.
+# - They're more flexible for datetimes.
+# - The date/datetime/time constructors produce friendlier error messages.
+
+import datetime
+import re
+from django.utils import six
+from django.utils.timezone import utc
+from django.utils.tzinfo import FixedOffset
+
+date_re = re.compile(
+ r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})$'
+)
+
+time_re = re.compile(
+ r'(?P<hour>\d{1,2}):(?P<minute>\d{1,2})'
+ r'(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?'
+)
+
+datetime_re = re.compile(
+ r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})'
+ r'[T ](?P<hour>\d{1,2}):(?P<minute>\d{1,2})'
+ r'(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?'
+ r'(?P<tzinfo>Z|[+-]\d{2}:?\d{2})?$'
+)
+
+def parse_date(value):
+ """Parses a string and return a datetime.date.
+
+ Raises ValueError if the input is well formatted but not a valid date.
+ Returns None if the input isn't well formatted.
+ """
+ match = date_re.match(value)
+ if match:
+ kw = dict((k, int(v)) for k, v in six.iteritems(match.groupdict()))
+ return datetime.date(**kw)
+
+def parse_time(value):
+ """Parses a string and return a datetime.time.
+
+ This function doesn't support time zone offsets.
+
+ Raises ValueError if the input is well formatted but not a valid time.
+ Returns None if the input isn't well formatted, in particular if it
+ contains an offset.
+ """
+ match = time_re.match(value)
+ if match:
+ kw = match.groupdict()
+ if kw['microsecond']:
+ kw['microsecond'] = kw['microsecond'].ljust(6, '0')
+ kw = dict((k, int(v)) for k, v in six.iteritems(kw) if v is not None)
+ return datetime.time(**kw)
+
+def parse_datetime(value):
+ """Parses a string and return a datetime.datetime.
+
+ This function supports time zone offsets. When the input contains one,
+ the output uses an instance of FixedOffset as tzinfo.
+
+ Raises ValueError if the input is well formatted but not a valid datetime.
+ Returns None if the input isn't well formatted.
+ """
+ match = datetime_re.match(value)
+ if match:
+ kw = match.groupdict()
+ if kw['microsecond']:
+ kw['microsecond'] = kw['microsecond'].ljust(6, '0')
+ tzinfo = kw.pop('tzinfo')
+ if tzinfo == 'Z':
+ tzinfo = utc
+ elif tzinfo is not None:
+ offset = 60 * int(tzinfo[1:3]) + int(tzinfo[-2:])
+ if tzinfo[0] == '-':
+ offset = -offset
+ tzinfo = FixedOffset(offset)
+ kw = dict((k, int(v)) for k, v in six.iteritems(kw) if v is not None)
+ kw['tzinfo'] = tzinfo
+ return datetime.datetime(**kw)
diff --git a/lib/python2.7/site-packages/django/utils/dates.py b/lib/python2.7/site-packages/django/utils/dates.py
new file mode 100644
index 0000000..a1c0fa2
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/dates.py
@@ -0,0 +1,57 @@
+"Commonly-used date structures"
+
+from django.utils.translation import ugettext_lazy as _, pgettext_lazy
+
+WEEKDAYS = {
+ 0:_('Monday'), 1:_('Tuesday'), 2:_('Wednesday'), 3:_('Thursday'), 4:_('Friday'),
+ 5:_('Saturday'), 6:_('Sunday')
+}
+WEEKDAYS_ABBR = {
+ 0:_('Mon'), 1:_('Tue'), 2:_('Wed'), 3:_('Thu'), 4:_('Fri'),
+ 5:_('Sat'), 6:_('Sun')
+}
+WEEKDAYS_REV = {
+ 'monday':0, 'tuesday':1, 'wednesday':2, 'thursday':3, 'friday':4,
+ 'saturday':5, 'sunday':6
+}
+MONTHS = {
+ 1:_('January'), 2:_('February'), 3:_('March'), 4:_('April'), 5:_('May'), 6:_('June'),
+ 7:_('July'), 8:_('August'), 9:_('September'), 10:_('October'), 11:_('November'),
+ 12:_('December')
+}
+MONTHS_3 = {
+ 1:_('jan'), 2:_('feb'), 3:_('mar'), 4:_('apr'), 5:_('may'), 6:_('jun'),
+ 7:_('jul'), 8:_('aug'), 9:_('sep'), 10:_('oct'), 11:_('nov'), 12:_('dec')
+}
+MONTHS_3_REV = {
+ 'jan':1, 'feb':2, 'mar':3, 'apr':4, 'may':5, 'jun':6, 'jul':7, 'aug':8,
+ 'sep':9, 'oct':10, 'nov':11, 'dec':12
+}
+MONTHS_AP = { # month names in Associated Press style
+ 1: pgettext_lazy('abbrev. month', 'Jan.'),
+ 2: pgettext_lazy('abbrev. month', 'Feb.'),
+ 3: pgettext_lazy('abbrev. month', 'March'),
+ 4: pgettext_lazy('abbrev. month', 'April'),
+ 5: pgettext_lazy('abbrev. month', 'May'),
+ 6: pgettext_lazy('abbrev. month', 'June'),
+ 7: pgettext_lazy('abbrev. month', 'July'),
+ 8: pgettext_lazy('abbrev. month', 'Aug.'),
+ 9: pgettext_lazy('abbrev. month', 'Sept.'),
+ 10: pgettext_lazy('abbrev. month', 'Oct.'),
+ 11: pgettext_lazy('abbrev. month', 'Nov.'),
+ 12: pgettext_lazy('abbrev. month', 'Dec.')
+}
+MONTHS_ALT = { # required for long date representation by some locales
+ 1: pgettext_lazy('alt. month', 'January'),
+ 2: pgettext_lazy('alt. month', 'February'),
+ 3: pgettext_lazy('alt. month', 'March'),
+ 4: pgettext_lazy('alt. month', 'April'),
+ 5: pgettext_lazy('alt. month', 'May'),
+ 6: pgettext_lazy('alt. month', 'June'),
+ 7: pgettext_lazy('alt. month', 'July'),
+ 8: pgettext_lazy('alt. month', 'August'),
+ 9: pgettext_lazy('alt. month', 'September'),
+ 10: pgettext_lazy('alt. month', 'October'),
+ 11: pgettext_lazy('alt. month', 'November'),
+ 12: pgettext_lazy('alt. month', 'December')
+}
diff --git a/lib/python2.7/site-packages/django/utils/datetime_safe.py b/lib/python2.7/site-packages/django/utils/datetime_safe.py
new file mode 100644
index 0000000..ca96fb3
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/datetime_safe.py
@@ -0,0 +1,92 @@
+# Python's datetime strftime doesn't handle dates before 1900.
+# These classes override date and datetime to support the formatting of a date
+# through its full "proleptic Gregorian" date range.
+#
+# Based on code submitted to comp.lang.python by Andrew Dalke
+#
+# >>> datetime_safe.date(1850, 8, 2).strftime("%Y/%m/%d was a %A")
+# '1850/08/02 was a Friday'
+
+from datetime import date as real_date, datetime as real_datetime
+import re
+import time
+
+class date(real_date):
+ def strftime(self, fmt):
+ return strftime(self, fmt)
+
+class datetime(real_datetime):
+ def strftime(self, fmt):
+ return strftime(self, fmt)
+
+ @classmethod
+ def combine(cls, date, time):
+ return cls(date.year, date.month, date.day,
+ time.hour, time.minute, time.second,
+ time.microsecond, time.tzinfo)
+
+ def date(self):
+ return date(self.year, self.month, self.day)
+
+def new_date(d):
+ "Generate a safe date from a datetime.date object."
+ return date(d.year, d.month, d.day)
+
+def new_datetime(d):
+ """
+ Generate a safe datetime from a datetime.date or datetime.datetime object.
+ """
+ kw = [d.year, d.month, d.day]
+ if isinstance(d, real_datetime):
+ kw.extend([d.hour, d.minute, d.second, d.microsecond, d.tzinfo])
+ return datetime(*kw)
+
+# This library does not support strftime's "%s" or "%y" format strings.
+# Allowed if there's an even number of "%"s because they are escaped.
+_illegal_formatting = re.compile(r"((^|[^%])(%%)*%[sy])")
+
+def _findall(text, substr):
+ # Also finds overlaps
+ sites = []
+ i = 0
+ while 1:
+ j = text.find(substr, i)
+ if j == -1:
+ break
+ sites.append(j)
+ i=j+1
+ return sites
+
+def strftime(dt, fmt):
+ if dt.year >= 1900:
+ return super(type(dt), dt).strftime(fmt)
+ illegal_formatting = _illegal_formatting.search(fmt)
+ if illegal_formatting:
+ raise TypeError("strftime of dates before 1900 does not handle" + illegal_formatting.group(0))
+
+ year = dt.year
+ # For every non-leap year century, advance by
+ # 6 years to get into the 28-year repeat cycle
+ delta = 2000 - year
+ off = 6 * (delta // 100 + delta // 400)
+ year = year + off
+
+ # Move to around the year 2000
+ year = year + ((2000 - year) // 28) * 28
+ timetuple = dt.timetuple()
+ s1 = time.strftime(fmt, (year,) + timetuple[1:])
+ sites1 = _findall(s1, str(year))
+
+ s2 = time.strftime(fmt, (year+28,) + timetuple[1:])
+ sites2 = _findall(s2, str(year+28))
+
+ sites = []
+ for site in sites1:
+ if site in sites2:
+ sites.append(site)
+
+ s = s1
+ syear = "%04d" % (dt.year,)
+ for site in sites:
+ s = s[:site] + syear + s[site+4:]
+ return s
diff --git a/lib/python2.7/site-packages/django/utils/decorators.py b/lib/python2.7/site-packages/django/utils/decorators.py
new file mode 100644
index 0000000..91444f0
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/decorators.py
@@ -0,0 +1,120 @@
+"Functions that help with dynamically creating decorators for views."
+
+from functools import wraps, update_wrapper, WRAPPER_ASSIGNMENTS
+
+from django.utils import six
+
+
+class classonlymethod(classmethod):
+ def __get__(self, instance, owner):
+ if instance is not None:
+ raise AttributeError("This method is available only on the view class.")
+ return super(classonlymethod, self).__get__(instance, owner)
+
+
+def method_decorator(decorator):
+ """
+ Converts a function decorator into a method decorator
+ """
+ # 'func' is a function at the time it is passed to _dec, but will eventually
+ # be a method of the class it is defined it.
+ def _dec(func):
+ def _wrapper(self, *args, **kwargs):
+ @decorator
+ def bound_func(*args2, **kwargs2):
+ return func(self, *args2, **kwargs2)
+ # bound_func has the signature that 'decorator' expects i.e. no
+ # 'self' argument, but it is a closure over self so it can call
+ # 'func' correctly.
+ return bound_func(*args, **kwargs)
+ # In case 'decorator' adds attributes to the function it decorates, we
+ # want to copy those. We don't have access to bound_func in this scope,
+ # but we can cheat by using it on a dummy function.
+ @decorator
+ def dummy(*args, **kwargs):
+ pass
+ update_wrapper(_wrapper, dummy)
+ # Need to preserve any existing attributes of 'func', including the name.
+ update_wrapper(_wrapper, func)
+
+ return _wrapper
+ update_wrapper(_dec, decorator)
+ # Change the name to aid debugging.
+ _dec.__name__ = 'method_decorator(%s)' % decorator.__name__
+ return _dec
+
+
+def decorator_from_middleware_with_args(middleware_class):
+ """
+ Like decorator_from_middleware, but returns a function
+ that accepts the arguments to be passed to the middleware_class.
+ Use like::
+
+ cache_page = decorator_from_middleware_with_args(CacheMiddleware)
+ # ...
+
+ @cache_page(3600)
+ def my_view(request):
+ # ...
+ """
+ return make_middleware_decorator(middleware_class)
+
+
+def decorator_from_middleware(middleware_class):
+ """
+ Given a middleware class (not an instance), returns a view decorator. This
+ lets you use middleware functionality on a per-view basis. The middleware
+ is created with no params passed.
+ """
+ return make_middleware_decorator(middleware_class)()
+
+
+def available_attrs(fn):
+ """
+ Return the list of functools-wrappable attributes on a callable.
+ This is required as a workaround for http://bugs.python.org/issue3445
+ under Python 2.
+ """
+ if six.PY3:
+ return WRAPPER_ASSIGNMENTS
+ else:
+ return tuple(a for a in WRAPPER_ASSIGNMENTS if hasattr(fn, a))
+
+
+def make_middleware_decorator(middleware_class):
+ def _make_decorator(*m_args, **m_kwargs):
+ middleware = middleware_class(*m_args, **m_kwargs)
+ def _decorator(view_func):
+ @wraps(view_func, assigned=available_attrs(view_func))
+ def _wrapped_view(request, *args, **kwargs):
+ if hasattr(middleware, 'process_request'):
+ result = middleware.process_request(request)
+ if result is not None:
+ return result
+ if hasattr(middleware, 'process_view'):
+ result = middleware.process_view(request, view_func, args, kwargs)
+ if result is not None:
+ return result
+ try:
+ response = view_func(request, *args, **kwargs)
+ except Exception as e:
+ if hasattr(middleware, 'process_exception'):
+ result = middleware.process_exception(request, e)
+ if result is not None:
+ return result
+ raise
+ if hasattr(response, 'render') and callable(response.render):
+ if hasattr(middleware, 'process_template_response'):
+ response = middleware.process_template_response(request, response)
+ # Defer running of process_response until after the template
+ # has been rendered:
+ if hasattr(middleware, 'process_response'):
+ callback = lambda response: middleware.process_response(request, response)
+ response.add_post_render_callback(callback)
+ else:
+ if hasattr(middleware, 'process_response'):
+ return middleware.process_response(request, response)
+ return response
+ return _wrapped_view
+ return _decorator
+ return _make_decorator
diff --git a/lib/python2.7/site-packages/django/utils/deprecation.py b/lib/python2.7/site-packages/django/utils/deprecation.py
new file mode 100644
index 0000000..edbb5ca
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/deprecation.py
@@ -0,0 +1,62 @@
+import inspect
+import warnings
+
+
+class warn_about_renamed_method(object):
+ def __init__(self, class_name, old_method_name, new_method_name, deprecation_warning):
+ self.class_name = class_name
+ self.old_method_name = old_method_name
+ self.new_method_name = new_method_name
+ self.deprecation_warning = deprecation_warning
+
+ def __call__(self, f):
+ def wrapped(*args, **kwargs):
+ warnings.warn(
+ "`%s.%s` is deprecated, use `%s` instead." %
+ (self.class_name, self.old_method_name, self.new_method_name),
+ self.deprecation_warning, 2)
+ return f(*args, **kwargs)
+ return wrapped
+
+
+class RenameMethodsBase(type):
+ """
+ Handles the deprecation paths when renaming a method.
+
+ It does the following:
+ 1) Define the new method if missing and complain about it.
+ 2) Define the old method if missing.
+ 3) Complain whenever an old method is called.
+
+ See #15363 for more details.
+ """
+
+ renamed_methods = ()
+
+ def __new__(cls, name, bases, attrs):
+ new_class = super(RenameMethodsBase, cls).__new__(cls, name, bases, attrs)
+
+ for base in inspect.getmro(new_class):
+ class_name = base.__name__
+ for renamed_method in cls.renamed_methods:
+ old_method_name = renamed_method[0]
+ old_method = base.__dict__.get(old_method_name)
+ new_method_name = renamed_method[1]
+ new_method = base.__dict__.get(new_method_name)
+ deprecation_warning = renamed_method[2]
+ wrapper = warn_about_renamed_method(class_name, *renamed_method)
+
+ # Define the new method if missing and complain about it
+ if not new_method and old_method:
+ warnings.warn(
+ "`%s.%s` method should be renamed `%s`." %
+ (class_name, old_method_name, new_method_name),
+ deprecation_warning, 2)
+ setattr(base, new_method_name, old_method)
+ setattr(base, old_method_name, wrapper(old_method))
+
+ # Define the old method as a wrapped call to the new method.
+ if not old_method and new_method:
+ setattr(base, old_method_name, wrapper(new_method))
+
+ return new_class
diff --git a/lib/python2.7/site-packages/django/utils/dictconfig.py b/lib/python2.7/site-packages/django/utils/dictconfig.py
new file mode 100644
index 0000000..c7b3981
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/dictconfig.py
@@ -0,0 +1,555 @@
+# This is a copy of the Python logging.config.dictconfig module,
+# reproduced with permission. It is provided here for backwards
+# compatibility for Python versions prior to 2.7.
+#
+# Copyright 2009-2010 by Vinay Sajip. All Rights Reserved.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose and without fee is hereby granted,
+# provided that the above copyright notice appear in all copies and that
+# both that copyright notice and this permission notice appear in
+# supporting documentation, and that the name of Vinay Sajip
+# not be used in advertising or publicity pertaining to distribution
+# of the software without specific, written prior permission.
+# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
+# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
+# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
+# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import logging.handlers
+import re
+import sys
+import types
+
+from django.utils import six
+
+IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I)
+
+def valid_ident(s):
+ m = IDENTIFIER.match(s)
+ if not m:
+ raise ValueError('Not a valid Python identifier: %r' % s)
+ return True
+
+#
+# This function is defined in logging only in recent versions of Python
+#
+try:
+ from logging import _checkLevel
+except ImportError:
+ def _checkLevel(level):
+ if isinstance(level, int):
+ rv = level
+ elif str(level) == level:
+ if level not in logging._levelNames:
+ raise ValueError('Unknown level: %r' % level)
+ rv = logging._levelNames[level]
+ else:
+ raise TypeError('Level not an integer or a '
+ 'valid string: %r' % level)
+ return rv
+
+# The ConvertingXXX classes are wrappers around standard Python containers,
+# and they serve to convert any suitable values in the container. The
+# conversion converts base dicts, lists and tuples to their wrapped
+# equivalents, whereas strings which match a conversion format are converted
+# appropriately.
+#
+# Each wrapper should have a configurator attribute holding the actual
+# configurator to use for conversion.
+
+class ConvertingDict(dict):
+ """A converting dictionary wrapper."""
+
+ def __getitem__(self, key):
+ value = dict.__getitem__(self, key)
+ result = self.configurator.convert(value)
+ #If the converted value is different, save for next time
+ if value is not result:
+ self[key] = result
+ if type(result) in (ConvertingDict, ConvertingList,
+ ConvertingTuple):
+ result.parent = self
+ result.key = key
+ return result
+
+ def get(self, key, default=None):
+ value = dict.get(self, key, default)
+ result = self.configurator.convert(value)
+ #If the converted value is different, save for next time
+ if value is not result:
+ self[key] = result
+ if type(result) in (ConvertingDict, ConvertingList,
+ ConvertingTuple):
+ result.parent = self
+ result.key = key
+ return result
+
+ def pop(self, key, default=None):
+ value = dict.pop(self, key, default)
+ result = self.configurator.convert(value)
+ if value is not result:
+ if type(result) in (ConvertingDict, ConvertingList,
+ ConvertingTuple):
+ result.parent = self
+ result.key = key
+ return result
+
+class ConvertingList(list):
+ """A converting list wrapper."""
+ def __getitem__(self, key):
+ value = list.__getitem__(self, key)
+ result = self.configurator.convert(value)
+ #If the converted value is different, save for next time
+ if value is not result:
+ self[key] = result
+ if type(result) in (ConvertingDict, ConvertingList,
+ ConvertingTuple):
+ result.parent = self
+ result.key = key
+ return result
+
+ def pop(self, idx=-1):
+ value = list.pop(self, idx)
+ result = self.configurator.convert(value)
+ if value is not result:
+ if type(result) in (ConvertingDict, ConvertingList,
+ ConvertingTuple):
+ result.parent = self
+ return result
+
+class ConvertingTuple(tuple):
+ """A converting tuple wrapper."""
+ def __getitem__(self, key):
+ value = tuple.__getitem__(self, key)
+ result = self.configurator.convert(value)
+ if value is not result:
+ if type(result) in (ConvertingDict, ConvertingList,
+ ConvertingTuple):
+ result.parent = self
+ result.key = key
+ return result
+
+class BaseConfigurator(object):
+ """
+ The configurator base class which defines some useful defaults.
+ """
+
+ CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$')
+
+ WORD_PATTERN = re.compile(r'^\s*(\w+)\s*')
+ DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*')
+ INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*')
+ DIGIT_PATTERN = re.compile(r'^\d+$')
+
+ value_converters = {
+ 'ext' : 'ext_convert',
+ 'cfg' : 'cfg_convert',
+ }
+
+ # We might want to use a different one, e.g. importlib
+ importer = __import__
+
+ def __init__(self, config):
+ self.config = ConvertingDict(config)
+ self.config.configurator = self
+
+ def resolve(self, s):
+ """
+ Resolve strings to objects using standard import and attribute
+ syntax.
+ """
+ name = s.split('.')
+ used = name.pop(0)
+ try:
+ found = self.importer(used)
+ for frag in name:
+ used += '.' + frag
+ try:
+ found = getattr(found, frag)
+ except AttributeError:
+ self.importer(used)
+ found = getattr(found, frag)
+ return found
+ except ImportError:
+ e, tb = sys.exc_info()[1:]
+ v = ValueError('Cannot resolve %r: %s' % (s, e))
+ v.__cause__, v.__traceback__ = e, tb
+ raise v
+
+ def ext_convert(self, value):
+ """Default converter for the ext:// protocol."""
+ return self.resolve(value)
+
+ def cfg_convert(self, value):
+ """Default converter for the cfg:// protocol."""
+ rest = value
+ m = self.WORD_PATTERN.match(rest)
+ if m is None:
+ raise ValueError("Unable to convert %r" % value)
+ else:
+ rest = rest[m.end():]
+ d = self.config[m.groups()[0]]
+ #print d, rest
+ while rest:
+ m = self.DOT_PATTERN.match(rest)
+ if m:
+ d = d[m.groups()[0]]
+ else:
+ m = self.INDEX_PATTERN.match(rest)
+ if m:
+ idx = m.groups()[0]
+ if not self.DIGIT_PATTERN.match(idx):
+ d = d[idx]
+ else:
+ try:
+ n = int(idx) # try as number first (most likely)
+ d = d[n]
+ except TypeError:
+ d = d[idx]
+ if m:
+ rest = rest[m.end():]
+ else:
+ raise ValueError('Unable to convert '
+ '%r at %r' % (value, rest))
+ #rest should be empty
+ return d
+
+ def convert(self, value):
+ """
+ Convert values to an appropriate type. dicts, lists and tuples are
+ replaced by their converting alternatives. Strings are checked to
+ see if they have a conversion format and are converted if they do.
+ """
+ if not isinstance(value, ConvertingDict) and isinstance(value, dict):
+ value = ConvertingDict(value)
+ value.configurator = self
+ elif not isinstance(value, ConvertingList) and isinstance(value, list):
+ value = ConvertingList(value)
+ value.configurator = self
+ elif not isinstance(value, ConvertingTuple) and\
+ isinstance(value, tuple):
+ value = ConvertingTuple(value)
+ value.configurator = self
+ elif isinstance(value, six.string_types): # str for py3k
+ m = self.CONVERT_PATTERN.match(value)
+ if m:
+ d = m.groupdict()
+ prefix = d['prefix']
+ converter = self.value_converters.get(prefix, None)
+ if converter:
+ suffix = d['suffix']
+ converter = getattr(self, converter)
+ value = converter(suffix)
+ return value
+
+ def configure_custom(self, config):
+ """Configure an object with a user-supplied factory."""
+ c = config.pop('()')
+ if not hasattr(c, '__call__') and hasattr(types, 'ClassType') and type(c) != types.ClassType:
+ c = self.resolve(c)
+ props = config.pop('.', None)
+ # Check for valid identifiers
+ kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
+ result = c(**kwargs)
+ if props:
+ for name, value in props.items():
+ setattr(result, name, value)
+ return result
+
+ def as_tuple(self, value):
+ """Utility function which converts lists to tuples."""
+ if isinstance(value, list):
+ value = tuple(value)
+ return value
+
+class DictConfigurator(BaseConfigurator):
+ """
+ Configure logging using a dictionary-like object to describe the
+ configuration.
+ """
+
+ def configure(self):
+ """Do the configuration."""
+
+ config = self.config
+ if 'version' not in config:
+ raise ValueError("dictionary doesn't specify a version")
+ if config['version'] != 1:
+ raise ValueError("Unsupported version: %s" % config['version'])
+ incremental = config.pop('incremental', False)
+ EMPTY_DICT = {}
+ logging._acquireLock()
+ try:
+ if incremental:
+ handlers = config.get('handlers', EMPTY_DICT)
+ # incremental handler config only if handler name
+ # ties in to logging._handlers (Python 2.7)
+ if sys.version_info[:2] == (2, 7):
+ for name in handlers:
+ if name not in logging._handlers:
+ raise ValueError('No handler found with '
+ 'name %r' % name)
+ else:
+ try:
+ handler = logging._handlers[name]
+ handler_config = handlers[name]
+ level = handler_config.get('level', None)
+ if level:
+ handler.setLevel(_checkLevel(level))
+ except StandardError as e:
+ raise ValueError('Unable to configure handler '
+ '%r: %s' % (name, e))
+ loggers = config.get('loggers', EMPTY_DICT)
+ for name in loggers:
+ try:
+ self.configure_logger(name, loggers[name], True)
+ except StandardError as e:
+ raise ValueError('Unable to configure logger '
+ '%r: %s' % (name, e))
+ root = config.get('root', None)
+ if root:
+ try:
+ self.configure_root(root, True)
+ except StandardError as e:
+ raise ValueError('Unable to configure root '
+ 'logger: %s' % e)
+ else:
+ disable_existing = config.pop('disable_existing_loggers', True)
+
+ logging._handlers.clear()
+ del logging._handlerList[:]
+
+ # Do formatters first - they don't refer to anything else
+ formatters = config.get('formatters', EMPTY_DICT)
+ for name in formatters:
+ try:
+ formatters[name] = self.configure_formatter(
+ formatters[name])
+ except StandardError as e:
+ raise ValueError('Unable to configure '
+ 'formatter %r: %s' % (name, e))
+ # Next, do filters - they don't refer to anything else, either
+ filters = config.get('filters', EMPTY_DICT)
+ for name in filters:
+ try:
+ filters[name] = self.configure_filter(filters[name])
+ except StandardError as e:
+ raise ValueError('Unable to configure '
+ 'filter %r: %s' % (name, e))
+
+ # Next, do handlers - they refer to formatters and filters
+ # As handlers can refer to other handlers, sort the keys
+ # to allow a deterministic order of configuration
+ handlers = config.get('handlers', EMPTY_DICT)
+ for name in sorted(handlers):
+ try:
+ handler = self.configure_handler(handlers[name])
+ handler.name = name
+ handlers[name] = handler
+ except StandardError as e:
+ raise ValueError('Unable to configure handler '
+ '%r: %s' % (name, e))
+ # Next, do loggers - they refer to handlers and filters
+
+ #we don't want to lose the existing loggers,
+ #since other threads may have pointers to them.
+ #existing is set to contain all existing loggers,
+ #and as we go through the new configuration we
+ #remove any which are configured. At the end,
+ #what's left in existing is the set of loggers
+ #which were in the previous configuration but
+ #which are not in the new configuration.
+ root = logging.root
+ existing = list(root.manager.loggerDict)
+ #The list needs to be sorted so that we can
+ #avoid disabling child loggers of explicitly
+ #named loggers. With a sorted list it is easier
+ #to find the child loggers.
+ existing.sort()
+ #We'll keep the list of existing loggers
+ #which are children of named loggers here...
+ child_loggers = []
+ #now set up the new ones...
+ loggers = config.get('loggers', EMPTY_DICT)
+ for name in loggers:
+ if name in existing:
+ i = existing.index(name)
+ prefixed = name + "."
+ pflen = len(prefixed)
+ num_existing = len(existing)
+ i = i + 1 # look at the entry after name
+ while (i < num_existing) and\
+ (existing[i][:pflen] == prefixed):
+ child_loggers.append(existing[i])
+ i = i + 1
+ existing.remove(name)
+ try:
+ self.configure_logger(name, loggers[name])
+ except StandardError as e:
+ raise ValueError('Unable to configure logger '
+ '%r: %s' % (name, e))
+
+ #Disable any old loggers. There's no point deleting
+ #them as other threads may continue to hold references
+ #and by disabling them, you stop them doing any logging.
+ #However, don't disable children of named loggers, as that's
+ #probably not what was intended by the user.
+ for log in existing:
+ logger = root.manager.loggerDict[log]
+ if log in child_loggers:
+ logger.level = logging.NOTSET
+ logger.handlers = []
+ logger.propagate = True
+ elif disable_existing:
+ logger.disabled = True
+
+ # And finally, do the root logger
+ root = config.get('root', None)
+ if root:
+ try:
+ self.configure_root(root)
+ except StandardError as e:
+ raise ValueError('Unable to configure root '
+ 'logger: %s' % e)
+ finally:
+ logging._releaseLock()
+
+ def configure_formatter(self, config):
+ """Configure a formatter from a dictionary."""
+ if '()' in config:
+ factory = config['()'] # for use in exception handler
+ try:
+ result = self.configure_custom(config)
+ except TypeError as te:
+ if "'format'" not in str(te):
+ raise
+ #Name of parameter changed from fmt to format.
+ #Retry with old name.
+ #This is so that code can be used with older Python versions
+ #(e.g. by Django)
+ config['fmt'] = config.pop('format')
+ config['()'] = factory
+ result = self.configure_custom(config)
+ else:
+ fmt = config.get('format', None)
+ dfmt = config.get('datefmt', None)
+ result = logging.Formatter(fmt, dfmt)
+ return result
+
+ def configure_filter(self, config):
+ """Configure a filter from a dictionary."""
+ if '()' in config:
+ result = self.configure_custom(config)
+ else:
+ name = config.get('name', '')
+ result = logging.Filter(name)
+ return result
+
+ def add_filters(self, filterer, filters):
+ """Add filters to a filterer from a list of names."""
+ for f in filters:
+ try:
+ filterer.addFilter(self.config['filters'][f])
+ except StandardError as e:
+ raise ValueError('Unable to add filter %r: %s' % (f, e))
+
+ def configure_handler(self, config):
+ """Configure a handler from a dictionary."""
+ formatter = config.pop('formatter', None)
+ if formatter:
+ try:
+ formatter = self.config['formatters'][formatter]
+ except StandardError as e:
+ raise ValueError('Unable to set formatter '
+ '%r: %s' % (formatter, e))
+ level = config.pop('level', None)
+ filters = config.pop('filters', None)
+ if '()' in config:
+ c = config.pop('()')
+ if not hasattr(c, '__call__') and hasattr(types, 'ClassType') and type(c) != types.ClassType:
+ c = self.resolve(c)
+ factory = c
+ else:
+ klass = self.resolve(config.pop('class'))
+ #Special case for handler which refers to another handler
+ if issubclass(klass, logging.handlers.MemoryHandler) and\
+ 'target' in config:
+ try:
+ config['target'] = self.config['handlers'][config['target']]
+ except StandardError as e:
+ raise ValueError('Unable to set target handler '
+ '%r: %s' % (config['target'], e))
+ elif issubclass(klass, logging.handlers.SMTPHandler) and\
+ 'mailhost' in config:
+ config['mailhost'] = self.as_tuple(config['mailhost'])
+ elif issubclass(klass, logging.handlers.SysLogHandler) and\
+ 'address' in config:
+ config['address'] = self.as_tuple(config['address'])
+ factory = klass
+ kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
+ try:
+ result = factory(**kwargs)
+ except TypeError as te:
+ if "'stream'" not in str(te):
+ raise
+ #The argument name changed from strm to stream
+ #Retry with old name.
+ #This is so that code can be used with older Python versions
+ #(e.g. by Django)
+ kwargs['strm'] = kwargs.pop('stream')
+ result = factory(**kwargs)
+ if formatter:
+ result.setFormatter(formatter)
+ if level is not None:
+ result.setLevel(_checkLevel(level))
+ if filters:
+ self.add_filters(result, filters)
+ return result
+
+ def add_handlers(self, logger, handlers):
+ """Add handlers to a logger from a list of names."""
+ for h in handlers:
+ try:
+ logger.addHandler(self.config['handlers'][h])
+ except StandardError as e:
+ raise ValueError('Unable to add handler %r: %s' % (h, e))
+
+ def common_logger_config(self, logger, config, incremental=False):
+ """
+ Perform configuration which is common to root and non-root loggers.
+ """
+ level = config.get('level', None)
+ if level is not None:
+ logger.setLevel(_checkLevel(level))
+ if not incremental:
+ #Remove any existing handlers
+ for h in logger.handlers[:]:
+ logger.removeHandler(h)
+ handlers = config.get('handlers', None)
+ if handlers:
+ self.add_handlers(logger, handlers)
+ filters = config.get('filters', None)
+ if filters:
+ self.add_filters(logger, filters)
+
+ def configure_logger(self, name, config, incremental=False):
+ """Configure a non-root logger from a dictionary."""
+ logger = logging.getLogger(name)
+ self.common_logger_config(logger, config, incremental)
+ propagate = config.get('propagate', None)
+ if propagate is not None:
+ logger.propagate = propagate
+
+ def configure_root(self, config, incremental=False):
+ """Configure a root logger from a dictionary."""
+ root = logging.getLogger()
+ self.common_logger_config(root, config, incremental)
+
+dictConfigClass = DictConfigurator
+
+def dictConfig(config):
+ """Configure logging using a dictionary."""
+ dictConfigClass(config).configure()
diff --git a/lib/python2.7/site-packages/django/utils/encoding.py b/lib/python2.7/site-packages/django/utils/encoding.py
new file mode 100644
index 0000000..599952b
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/encoding.py
@@ -0,0 +1,253 @@
+from __future__ import unicode_literals
+
+import codecs
+import datetime
+from decimal import Decimal
+import locale
+import warnings
+
+from django.utils.functional import Promise
+from django.utils import six
+from django.utils.six.moves.urllib.parse import quote
+
+class DjangoUnicodeDecodeError(UnicodeDecodeError):
+ def __init__(self, obj, *args):
+ self.obj = obj
+ UnicodeDecodeError.__init__(self, *args)
+
+ def __str__(self):
+ original = UnicodeDecodeError.__str__(self)
+ return '%s. You passed in %r (%s)' % (original, self.obj,
+ type(self.obj))
+
+class StrAndUnicode(object):
+ """
+ A class that derives __str__ from __unicode__.
+
+ On Python 2, __str__ returns the output of __unicode__ encoded as a UTF-8
+ bytestring. On Python 3, __str__ returns the output of __unicode__.
+
+ Useful as a mix-in. If you support Python 2 and 3 with a single code base,
+ you can inherit this mix-in and just define __unicode__.
+ """
+ def __init__(self, *args, **kwargs):
+ warnings.warn("StrAndUnicode is deprecated. Define a __str__ method "
+ "and apply the @python_2_unicode_compatible decorator "
+ "instead.", DeprecationWarning, stacklevel=2)
+ super(StrAndUnicode, self).__init__(*args, **kwargs)
+
+ if six.PY3:
+ def __str__(self):
+ return self.__unicode__()
+ else:
+ def __str__(self):
+ return self.__unicode__().encode('utf-8')
+
+def python_2_unicode_compatible(klass):
+ """
+ A decorator that defines __unicode__ and __str__ methods under Python 2.
+ Under Python 3 it does nothing.
+
+ To support Python 2 and 3 with a single code base, define a __str__ method
+ returning text and apply this decorator to the class.
+ """
+ if six.PY2:
+ if '__str__' not in klass.__dict__:
+ raise ValueError("@python_2_unicode_compatible cannot be applied "
+ "to %s because it doesn't define __str__()." %
+ klass.__name__)
+ klass.__unicode__ = klass.__str__
+ klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
+ return klass
+
+def smart_text(s, encoding='utf-8', strings_only=False, errors='strict'):
+ """
+ Returns a text object representing 's' -- unicode on Python 2 and str on
+ Python 3. Treats bytestrings using the 'encoding' codec.
+
+ If strings_only is True, don't convert (some) non-string-like objects.
+ """
+ if isinstance(s, Promise):
+ # The input is the result of a gettext_lazy() call.
+ return s
+ return force_text(s, encoding, strings_only, errors)
+
+def is_protected_type(obj):
+ """Determine if the object instance is of a protected type.
+
+ Objects of protected types are preserved as-is when passed to
+ force_text(strings_only=True).
+ """
+ return isinstance(obj, six.integer_types + (type(None), float, Decimal,
+ datetime.datetime, datetime.date, datetime.time))
+
+def force_text(s, encoding='utf-8', strings_only=False, errors='strict'):
+ """
+ Similar to smart_text, except that lazy instances are resolved to
+ strings, rather than kept as lazy objects.
+
+ If strings_only is True, don't convert (some) non-string-like objects.
+ """
+ # Handle the common case first, saves 30-40% when s is an instance of
+ # six.text_type. This function gets called often in that setting.
+ if isinstance(s, six.text_type):
+ return s
+ if strings_only and is_protected_type(s):
+ return s
+ try:
+ if not isinstance(s, six.string_types):
+ if hasattr(s, '__unicode__'):
+ s = s.__unicode__()
+ else:
+ if six.PY3:
+ if isinstance(s, bytes):
+ s = six.text_type(s, encoding, errors)
+ else:
+ s = six.text_type(s)
+ else:
+ s = six.text_type(bytes(s), encoding, errors)
+ else:
+ # Note: We use .decode() here, instead of six.text_type(s, encoding,
+ # errors), so that if s is a SafeBytes, it ends up being a
+ # SafeText at the end.
+ s = s.decode(encoding, errors)
+ except UnicodeDecodeError as e:
+ if not isinstance(s, Exception):
+ raise DjangoUnicodeDecodeError(s, *e.args)
+ else:
+ # If we get to here, the caller has passed in an Exception
+ # subclass populated with non-ASCII bytestring data without a
+ # working unicode method. Try to handle this without raising a
+ # further exception by individually forcing the exception args
+ # to unicode.
+ s = ' '.join([force_text(arg, encoding, strings_only,
+ errors) for arg in s])
+ return s
+
+def smart_bytes(s, encoding='utf-8', strings_only=False, errors='strict'):
+ """
+ Returns a bytestring version of 's', encoded as specified in 'encoding'.
+
+ If strings_only is True, don't convert (some) non-string-like objects.
+ """
+ if isinstance(s, Promise):
+ # The input is the result of a gettext_lazy() call.
+ return s
+ return force_bytes(s, encoding, strings_only, errors)
+
+
+def force_bytes(s, encoding='utf-8', strings_only=False, errors='strict'):
+ """
+ Similar to smart_bytes, except that lazy instances are resolved to
+ strings, rather than kept as lazy objects.
+
+ If strings_only is True, don't convert (some) non-string-like objects.
+ """
+ if isinstance(s, six.memoryview):
+ s = bytes(s)
+ if isinstance(s, bytes):
+ if encoding == 'utf-8':
+ return s
+ else:
+ return s.decode('utf-8', errors).encode(encoding, errors)
+ if strings_only and (s is None or isinstance(s, int)):
+ return s
+ if isinstance(s, Promise):
+ return six.text_type(s).encode(encoding, errors)
+ if not isinstance(s, six.string_types):
+ try:
+ if six.PY3:
+ return six.text_type(s).encode(encoding)
+ else:
+ return bytes(s)
+ except UnicodeEncodeError:
+ if isinstance(s, Exception):
+ # An Exception subclass containing non-ASCII data that doesn't
+ # know how to print itself properly. We shouldn't raise a
+ # further exception.
+ return b' '.join([force_bytes(arg, encoding, strings_only,
+ errors) for arg in s])
+ return six.text_type(s).encode(encoding, errors)
+ else:
+ return s.encode(encoding, errors)
+
+if six.PY3:
+ smart_str = smart_text
+ force_str = force_text
+else:
+ smart_str = smart_bytes
+ force_str = force_bytes
+ # backwards compatibility for Python 2
+ smart_unicode = smart_text
+ force_unicode = force_text
+
+smart_str.__doc__ = """\
+Apply smart_text in Python 3 and smart_bytes in Python 2.
+
+This is suitable for writing to sys.stdout (for instance).
+"""
+
+force_str.__doc__ = """\
+Apply force_text in Python 3 and force_bytes in Python 2.
+"""
+
+def iri_to_uri(iri):
+ """
+ Convert an Internationalized Resource Identifier (IRI) portion to a URI
+ portion that is suitable for inclusion in a URL.
+
+ This is the algorithm from section 3.1 of RFC 3987. However, since we are
+ assuming input is either UTF-8 or unicode already, we can simplify things a
+ little from the full method.
+
+ Returns an ASCII string containing the encoded result.
+ """
+ # The list of safe characters here is constructed from the "reserved" and
+ # "unreserved" characters specified in sections 2.2 and 2.3 of RFC 3986:
+ # reserved = gen-delims / sub-delims
+ # gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@"
+ # sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
+ # / "*" / "+" / "," / ";" / "="
+ # unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
+ # Of the unreserved characters, urllib.quote already considers all but
+ # the ~ safe.
+ # The % character is also added to the list of safe characters here, as the
+ # end of section 3.1 of RFC 3987 specifically mentions that % must not be
+ # converted.
+ if iri is None:
+ return iri
+ return quote(force_bytes(iri), safe=b"/#%[]=:;$&()+,!?*@'~")
+
+def filepath_to_uri(path):
+ """Convert a file system path to a URI portion that is suitable for
+ inclusion in a URL.
+
+ We are assuming input is either UTF-8 or unicode already.
+
+ This method will encode certain chars that would normally be recognized as
+ special chars for URIs. Note that this method does not encode the '
+ character, as it is a valid character within URIs. See
+ encodeURIComponent() JavaScript function for more details.
+
+ Returns an ASCII string containing the encoded result.
+ """
+ if path is None:
+ return path
+ # I know about `os.sep` and `os.altsep` but I want to leave
+ # some flexibility for hardcoding separators.
+ return quote(force_bytes(path).replace(b"\\", b"/"), safe=b"/~!*()'")
+
+def get_system_encoding():
+ """
+ The encoding of the default system locale but falls back to the given
+ fallback encoding if the encoding is unsupported by python or could
+ not be determined. See tickets #10335 and #5846
+ """
+ try:
+ encoding = locale.getdefaultlocale()[1] or 'ascii'
+ codecs.lookup(encoding)
+ except Exception:
+ encoding = 'ascii'
+ return encoding
+
+DEFAULT_LOCALE_ENCODING = get_system_encoding()
diff --git a/lib/python2.7/site-packages/django/utils/feedgenerator.py b/lib/python2.7/site-packages/django/utils/feedgenerator.py
new file mode 100644
index 0000000..13708d4
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/feedgenerator.py
@@ -0,0 +1,391 @@
+"""
+Syndication feed generation library -- used for generating RSS, etc.
+
+Sample usage:
+
+>>> from django.utils import feedgenerator
+>>> feed = feedgenerator.Rss201rev2Feed(
+... title="Poynter E-Media Tidbits",
+... link="http://www.poynter.org/column.asp?id=31",
+... description="A group Weblog by the sharpest minds in online media/journalism/publishing.",
+... language="en",
+... )
+>>> feed.add_item(
+... title="Hello",
+... link="http://www.holovaty.com/test/",
+... description="Testing."
+... )
+>>> with open('test.rss', 'w') as fp:
+... feed.write(fp, 'utf-8')
+
+For definitions of the different versions of RSS, see:
+http://web.archive.org/web/20110718035220/http://diveintomark.org/archives/2004/02/04/incompatible-rss
+"""
+from __future__ import unicode_literals
+
+import datetime
+from django.utils.xmlutils import SimplerXMLGenerator
+from django.utils.encoding import force_text, iri_to_uri
+from django.utils import datetime_safe
+from django.utils import six
+from django.utils.six import StringIO
+from django.utils.six.moves.urllib.parse import urlparse
+from django.utils.timezone import is_aware
+
+def rfc2822_date(date):
+ # We can't use strftime() because it produces locale-dependent results, so
+ # we have to map english month and day names manually
+ months = ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec',)
+ days = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')
+ # Support datetime objects older than 1900
+ date = datetime_safe.new_datetime(date)
+ # We do this ourselves to be timezone aware, email.Utils is not tz aware.
+ dow = days[date.weekday()]
+ month = months[date.month - 1]
+ time_str = date.strftime('%s, %%d %s %%Y %%H:%%M:%%S ' % (dow, month))
+ if six.PY2: # strftime returns a byte string in Python 2
+ time_str = time_str.decode('utf-8')
+ if is_aware(date):
+ offset = date.tzinfo.utcoffset(date)
+ timezone = (offset.days * 24 * 60) + (offset.seconds // 60)
+ hour, minute = divmod(timezone, 60)
+ return time_str + '%+03d%02d' % (hour, minute)
+ else:
+ return time_str + '-0000'
+
+def rfc3339_date(date):
+ # Support datetime objects older than 1900
+ date = datetime_safe.new_datetime(date)
+ time_str = date.strftime('%Y-%m-%dT%H:%M:%S')
+ if six.PY2: # strftime returns a byte string in Python 2
+ time_str = time_str.decode('utf-8')
+ if is_aware(date):
+ offset = date.tzinfo.utcoffset(date)
+ timezone = (offset.days * 24 * 60) + (offset.seconds // 60)
+ hour, minute = divmod(timezone, 60)
+ return time_str + '%+03d:%02d' % (hour, minute)
+ else:
+ return time_str + 'Z'
+
+def get_tag_uri(url, date):
+ """
+ Creates a TagURI.
+
+ See http://web.archive.org/web/20110514113830/http://diveintomark.org/archives/2004/05/28/howto-atom-id
+ """
+ bits = urlparse(url)
+ d = ''
+ if date is not None:
+ d = ',%s' % datetime_safe.new_datetime(date).strftime('%Y-%m-%d')
+ return 'tag:%s%s:%s/%s' % (bits.hostname, d, bits.path, bits.fragment)
+
+class SyndicationFeed(object):
+ "Base class for all syndication feeds. Subclasses should provide write()"
+ def __init__(self, title, link, description, language=None, author_email=None,
+ author_name=None, author_link=None, subtitle=None, categories=None,
+ feed_url=None, feed_copyright=None, feed_guid=None, ttl=None, **kwargs):
+ to_unicode = lambda s: force_text(s, strings_only=True)
+ if categories:
+ categories = [force_text(c) for c in categories]
+ if ttl is not None:
+ # Force ints to unicode
+ ttl = force_text(ttl)
+ self.feed = {
+ 'title': to_unicode(title),
+ 'link': iri_to_uri(link),
+ 'description': to_unicode(description),
+ 'language': to_unicode(language),
+ 'author_email': to_unicode(author_email),
+ 'author_name': to_unicode(author_name),
+ 'author_link': iri_to_uri(author_link),
+ 'subtitle': to_unicode(subtitle),
+ 'categories': categories or (),
+ 'feed_url': iri_to_uri(feed_url),
+ 'feed_copyright': to_unicode(feed_copyright),
+ 'id': feed_guid or link,
+ 'ttl': ttl,
+ }
+ self.feed.update(kwargs)
+ self.items = []
+
+ def add_item(self, title, link, description, author_email=None,
+ author_name=None, author_link=None, pubdate=None, comments=None,
+ unique_id=None, unique_id_is_permalink=None, enclosure=None,
+ categories=(), item_copyright=None, ttl=None, **kwargs):
+ """
+ Adds an item to the feed. All args are expected to be Python Unicode
+ objects except pubdate, which is a datetime.datetime object, and
+ enclosure, which is an instance of the Enclosure class.
+ """
+ to_unicode = lambda s: force_text(s, strings_only=True)
+ if categories:
+ categories = [to_unicode(c) for c in categories]
+ if ttl is not None:
+ # Force ints to unicode
+ ttl = force_text(ttl)
+ item = {
+ 'title': to_unicode(title),
+ 'link': iri_to_uri(link),
+ 'description': to_unicode(description),
+ 'author_email': to_unicode(author_email),
+ 'author_name': to_unicode(author_name),
+ 'author_link': iri_to_uri(author_link),
+ 'pubdate': pubdate,
+ 'comments': to_unicode(comments),
+ 'unique_id': to_unicode(unique_id),
+ 'unique_id_is_permalink': unique_id_is_permalink,
+ 'enclosure': enclosure,
+ 'categories': categories or (),
+ 'item_copyright': to_unicode(item_copyright),
+ 'ttl': ttl,
+ }
+ item.update(kwargs)
+ self.items.append(item)
+
+ def num_items(self):
+ return len(self.items)
+
+ def root_attributes(self):
+ """
+ Return extra attributes to place on the root (i.e. feed/channel) element.
+ Called from write().
+ """
+ return {}
+
+ def add_root_elements(self, handler):
+ """
+ Add elements in the root (i.e. feed/channel) element. Called
+ from write().
+ """
+ pass
+
+ def item_attributes(self, item):
+ """
+ Return extra attributes to place on each item (i.e. item/entry) element.
+ """
+ return {}
+
+ def add_item_elements(self, handler, item):
+ """
+ Add elements on each item (i.e. item/entry) element.
+ """
+ pass
+
+ def write(self, outfile, encoding):
+ """
+ Outputs the feed in the given encoding to outfile, which is a file-like
+ object. Subclasses should override this.
+ """
+ raise NotImplementedError
+
+ def writeString(self, encoding):
+ """
+ Returns the feed in the given encoding as a string.
+ """
+ s = StringIO()
+ self.write(s, encoding)
+ return s.getvalue()
+
+ def latest_post_date(self):
+ """
+ Returns the latest item's pubdate. If none of them have a pubdate,
+ this returns the current date/time.
+ """
+ updates = [i['pubdate'] for i in self.items if i['pubdate'] is not None]
+ if len(updates) > 0:
+ updates.sort()
+ return updates[-1]
+ else:
+ return datetime.datetime.now()
+
+class Enclosure(object):
+ "Represents an RSS enclosure"
+ def __init__(self, url, length, mime_type):
+ "All args are expected to be Python Unicode objects"
+ self.length, self.mime_type = length, mime_type
+ self.url = iri_to_uri(url)
+
+class RssFeed(SyndicationFeed):
+ mime_type = 'application/rss+xml; charset=utf-8'
+ def write(self, outfile, encoding):
+ handler = SimplerXMLGenerator(outfile, encoding)
+ handler.startDocument()
+ handler.startElement("rss", self.rss_attributes())
+ handler.startElement("channel", self.root_attributes())
+ self.add_root_elements(handler)
+ self.write_items(handler)
+ self.endChannelElement(handler)
+ handler.endElement("rss")
+
+ def rss_attributes(self):
+ return {"version": self._version,
+ "xmlns:atom": "http://www.w3.org/2005/Atom"}
+
+ def write_items(self, handler):
+ for item in self.items:
+ handler.startElement('item', self.item_attributes(item))
+ self.add_item_elements(handler, item)
+ handler.endElement("item")
+
+ def add_root_elements(self, handler):
+ handler.addQuickElement("title", self.feed['title'])
+ handler.addQuickElement("link", self.feed['link'])
+ handler.addQuickElement("description", self.feed['description'])
+ if self.feed['feed_url'] is not None:
+ handler.addQuickElement("atom:link", None,
+ {"rel": "self", "href": self.feed['feed_url']})
+ if self.feed['language'] is not None:
+ handler.addQuickElement("language", self.feed['language'])
+ for cat in self.feed['categories']:
+ handler.addQuickElement("category", cat)
+ if self.feed['feed_copyright'] is not None:
+ handler.addQuickElement("copyright", self.feed['feed_copyright'])
+ handler.addQuickElement("lastBuildDate", rfc2822_date(self.latest_post_date()))
+ if self.feed['ttl'] is not None:
+ handler.addQuickElement("ttl", self.feed['ttl'])
+
+ def endChannelElement(self, handler):
+ handler.endElement("channel")
+
+class RssUserland091Feed(RssFeed):
+ _version = "0.91"
+ def add_item_elements(self, handler, item):
+ handler.addQuickElement("title", item['title'])
+ handler.addQuickElement("link", item['link'])
+ if item['description'] is not None:
+ handler.addQuickElement("description", item['description'])
+
+class Rss201rev2Feed(RssFeed):
+ # Spec: http://blogs.law.harvard.edu/tech/rss
+ _version = "2.0"
+ def add_item_elements(self, handler, item):
+ handler.addQuickElement("title", item['title'])
+ handler.addQuickElement("link", item['link'])
+ if item['description'] is not None:
+ handler.addQuickElement("description", item['description'])
+
+ # Author information.
+ if item["author_name"] and item["author_email"]:
+ handler.addQuickElement("author", "%s (%s)" % \
+ (item['author_email'], item['author_name']))
+ elif item["author_email"]:
+ handler.addQuickElement("author", item["author_email"])
+ elif item["author_name"]:
+ handler.addQuickElement("dc:creator", item["author_name"], {"xmlns:dc": "http://purl.org/dc/elements/1.1/"})
+
+ if item['pubdate'] is not None:
+ handler.addQuickElement("pubDate", rfc2822_date(item['pubdate']))
+ if item['comments'] is not None:
+ handler.addQuickElement("comments", item['comments'])
+ if item['unique_id'] is not None:
+ guid_attrs = {}
+ if isinstance(item.get('unique_id_is_permalink'), bool):
+ guid_attrs['isPermaLink'] = str(
+ item['unique_id_is_permalink']).lower()
+ handler.addQuickElement("guid", item['unique_id'], guid_attrs)
+ if item['ttl'] is not None:
+ handler.addQuickElement("ttl", item['ttl'])
+
+ # Enclosure.
+ if item['enclosure'] is not None:
+ handler.addQuickElement("enclosure", '',
+ {"url": item['enclosure'].url, "length": item['enclosure'].length,
+ "type": item['enclosure'].mime_type})
+
+ # Categories.
+ for cat in item['categories']:
+ handler.addQuickElement("category", cat)
+
+class Atom1Feed(SyndicationFeed):
+ # Spec: http://atompub.org/2005/07/11/draft-ietf-atompub-format-10.html
+ mime_type = 'application/atom+xml; charset=utf-8'
+ ns = "http://www.w3.org/2005/Atom"
+
+ def write(self, outfile, encoding):
+ handler = SimplerXMLGenerator(outfile, encoding)
+ handler.startDocument()
+ handler.startElement('feed', self.root_attributes())
+ self.add_root_elements(handler)
+ self.write_items(handler)
+ handler.endElement("feed")
+
+ def root_attributes(self):
+ if self.feed['language'] is not None:
+ return {"xmlns": self.ns, "xml:lang": self.feed['language']}
+ else:
+ return {"xmlns": self.ns}
+
+ def add_root_elements(self, handler):
+ handler.addQuickElement("title", self.feed['title'])
+ handler.addQuickElement("link", "", {"rel": "alternate", "href": self.feed['link']})
+ if self.feed['feed_url'] is not None:
+ handler.addQuickElement("link", "", {"rel": "self", "href": self.feed['feed_url']})
+ handler.addQuickElement("id", self.feed['id'])
+ handler.addQuickElement("updated", rfc3339_date(self.latest_post_date()))
+ if self.feed['author_name'] is not None:
+ handler.startElement("author", {})
+ handler.addQuickElement("name", self.feed['author_name'])
+ if self.feed['author_email'] is not None:
+ handler.addQuickElement("email", self.feed['author_email'])
+ if self.feed['author_link'] is not None:
+ handler.addQuickElement("uri", self.feed['author_link'])
+ handler.endElement("author")
+ if self.feed['subtitle'] is not None:
+ handler.addQuickElement("subtitle", self.feed['subtitle'])
+ for cat in self.feed['categories']:
+ handler.addQuickElement("category", "", {"term": cat})
+ if self.feed['feed_copyright'] is not None:
+ handler.addQuickElement("rights", self.feed['feed_copyright'])
+
+ def write_items(self, handler):
+ for item in self.items:
+ handler.startElement("entry", self.item_attributes(item))
+ self.add_item_elements(handler, item)
+ handler.endElement("entry")
+
+ def add_item_elements(self, handler, item):
+ handler.addQuickElement("title", item['title'])
+ handler.addQuickElement("link", "", {"href": item['link'], "rel": "alternate"})
+ if item['pubdate'] is not None:
+ handler.addQuickElement("updated", rfc3339_date(item['pubdate']))
+
+ # Author information.
+ if item['author_name'] is not None:
+ handler.startElement("author", {})
+ handler.addQuickElement("name", item['author_name'])
+ if item['author_email'] is not None:
+ handler.addQuickElement("email", item['author_email'])
+ if item['author_link'] is not None:
+ handler.addQuickElement("uri", item['author_link'])
+ handler.endElement("author")
+
+ # Unique ID.
+ if item['unique_id'] is not None:
+ unique_id = item['unique_id']
+ else:
+ unique_id = get_tag_uri(item['link'], item['pubdate'])
+ handler.addQuickElement("id", unique_id)
+
+ # Summary.
+ if item['description'] is not None:
+ handler.addQuickElement("summary", item['description'], {"type": "html"})
+
+ # Enclosure.
+ if item['enclosure'] is not None:
+ handler.addQuickElement("link", '',
+ {"rel": "enclosure",
+ "href": item['enclosure'].url,
+ "length": item['enclosure'].length,
+ "type": item['enclosure'].mime_type})
+
+ # Categories.
+ for cat in item['categories']:
+ handler.addQuickElement("category", "", {"term": cat})
+
+ # Rights.
+ if item['item_copyright'] is not None:
+ handler.addQuickElement("rights", item['item_copyright'])
+
+# This isolates the decision of what the system default is, so calling code can
+# do "feedgenerator.DefaultFeed" instead of "feedgenerator.Rss201rev2Feed".
+DefaultFeed = Rss201rev2Feed
diff --git a/lib/python2.7/site-packages/django/utils/formats.py b/lib/python2.7/site-packages/django/utils/formats.py
new file mode 100644
index 0000000..dbe1716
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/formats.py
@@ -0,0 +1,209 @@
+import decimal
+import datetime
+import unicodedata
+
+from django.conf import settings
+from django.utils import dateformat, numberformat, datetime_safe
+from django.utils.importlib import import_module
+from django.utils.encoding import force_str
+from django.utils.functional import lazy
+from django.utils.safestring import mark_safe
+from django.utils import six
+from django.utils.translation import get_language, to_locale, check_for_language
+
+# format_cache is a mapping from (format_type, lang) to the format string.
+# By using the cache, it is possible to avoid running get_format_modules
+# repeatedly.
+_format_cache = {}
+_format_modules_cache = {}
+
+ISO_INPUT_FORMATS = {
+ 'DATE_INPUT_FORMATS': ('%Y-%m-%d',),
+ 'TIME_INPUT_FORMATS': ('%H:%M:%S', '%H:%M:%S.%f', '%H:%M'),
+ 'DATETIME_INPUT_FORMATS': (
+ '%Y-%m-%d %H:%M:%S',
+ '%Y-%m-%d %H:%M:%S.%f',
+ '%Y-%m-%d %H:%M',
+ '%Y-%m-%d'
+ ),
+}
+
+def reset_format_cache():
+ """Clear any cached formats.
+
+ This method is provided primarily for testing purposes,
+ so that the effects of cached formats can be removed.
+ """
+ global _format_cache, _format_modules_cache
+ _format_cache = {}
+ _format_modules_cache = {}
+
+def iter_format_modules(lang):
+ """
+ Does the heavy lifting of finding format modules.
+ """
+ if check_for_language(lang):
+ format_locations = ['django.conf.locale.%s']
+ if settings.FORMAT_MODULE_PATH:
+ format_locations.append(settings.FORMAT_MODULE_PATH + '.%s')
+ format_locations.reverse()
+ locale = to_locale(lang)
+ locales = [locale]
+ if '_' in locale:
+ locales.append(locale.split('_')[0])
+ for location in format_locations:
+ for loc in locales:
+ try:
+ yield import_module('%s.formats' % (location % loc))
+ except ImportError:
+ pass
+
+def get_format_modules(lang=None, reverse=False):
+ """
+ Returns a list of the format modules found
+ """
+ if lang is None:
+ lang = get_language()
+ modules = _format_modules_cache.setdefault(lang, list(iter_format_modules(lang)))
+ if reverse:
+ return list(reversed(modules))
+ return modules
+
+def get_format(format_type, lang=None, use_l10n=None):
+ """
+ For a specific format type, returns the format for the current
+ language (locale), defaults to the format in the settings.
+ format_type is the name of the format, e.g. 'DATE_FORMAT'
+
+ If use_l10n is provided and is not None, that will force the value to
+ be localized (or not), overriding the value of settings.USE_L10N.
+ """
+ format_type = force_str(format_type)
+ if use_l10n or (use_l10n is None and settings.USE_L10N):
+ if lang is None:
+ lang = get_language()
+ cache_key = (format_type, lang)
+ try:
+ cached = _format_cache[cache_key]
+ if cached is not None:
+ return cached
+ else:
+ # Return the general setting by default
+ return getattr(settings, format_type)
+ except KeyError:
+ for module in get_format_modules(lang):
+ try:
+ val = getattr(module, format_type)
+ for iso_input in ISO_INPUT_FORMATS.get(format_type, ()):
+ if iso_input not in val:
+ if isinstance(val, tuple):
+ val = list(val)
+ val.append(iso_input)
+ _format_cache[cache_key] = val
+ return val
+ except AttributeError:
+ pass
+ _format_cache[cache_key] = None
+ return getattr(settings, format_type)
+
+get_format_lazy = lazy(get_format, six.text_type, list, tuple)
+
+def date_format(value, format=None, use_l10n=None):
+ """
+ Formats a datetime.date or datetime.datetime object using a
+ localizable format
+
+ If use_l10n is provided and is not None, that will force the value to
+ be localized (or not), overriding the value of settings.USE_L10N.
+ """
+ return dateformat.format(value, get_format(format or 'DATE_FORMAT', use_l10n=use_l10n))
+
+def time_format(value, format=None, use_l10n=None):
+ """
+ Formats a datetime.time object using a localizable format
+
+ If use_l10n is provided and is not None, that will force the value to
+ be localized (or not), overriding the value of settings.USE_L10N.
+ """
+ return dateformat.time_format(value, get_format(format or 'TIME_FORMAT', use_l10n=use_l10n))
+
+def number_format(value, decimal_pos=None, use_l10n=None, force_grouping=False):
+ """
+ Formats a numeric value using localization settings
+
+ If use_l10n is provided and is not None, that will force the value to
+ be localized (or not), overriding the value of settings.USE_L10N.
+ """
+ if use_l10n or (use_l10n is None and settings.USE_L10N):
+ lang = get_language()
+ else:
+ lang = None
+ return numberformat.format(
+ value,
+ get_format('DECIMAL_SEPARATOR', lang, use_l10n=use_l10n),
+ decimal_pos,
+ get_format('NUMBER_GROUPING', lang, use_l10n=use_l10n),
+ get_format('THOUSAND_SEPARATOR', lang, use_l10n=use_l10n),
+ force_grouping=force_grouping
+ )
+
+def localize(value, use_l10n=None):
+ """
+ Checks if value is a localizable type (date, number...) and returns it
+ formatted as a string using current locale format.
+
+ If use_l10n is provided and is not None, that will force the value to
+ be localized (or not), overriding the value of settings.USE_L10N.
+ """
+ if isinstance(value, bool):
+ return mark_safe(six.text_type(value))
+ elif isinstance(value, (decimal.Decimal, float) + six.integer_types):
+ return number_format(value, use_l10n=use_l10n)
+ elif isinstance(value, datetime.datetime):
+ return date_format(value, 'DATETIME_FORMAT', use_l10n=use_l10n)
+ elif isinstance(value, datetime.date):
+ return date_format(value, use_l10n=use_l10n)
+ elif isinstance(value, datetime.time):
+ return time_format(value, 'TIME_FORMAT', use_l10n=use_l10n)
+ else:
+ return value
+
+def localize_input(value, default=None):
+ """
+ Checks if an input value is a localizable type and returns it
+ formatted with the appropriate formatting string of the current locale.
+ """
+ if isinstance(value, (decimal.Decimal, float) + six.integer_types):
+ return number_format(value)
+ elif isinstance(value, datetime.datetime):
+ value = datetime_safe.new_datetime(value)
+ format = force_str(default or get_format('DATETIME_INPUT_FORMATS')[0])
+ return value.strftime(format)
+ elif isinstance(value, datetime.date):
+ value = datetime_safe.new_date(value)
+ format = force_str(default or get_format('DATE_INPUT_FORMATS')[0])
+ return value.strftime(format)
+ elif isinstance(value, datetime.time):
+ format = force_str(default or get_format('TIME_INPUT_FORMATS')[0])
+ return value.strftime(format)
+ return value
+
+def sanitize_separators(value):
+ """
+ Sanitizes a value according to the current decimal and
+ thousand separator setting. Used with form field input.
+ """
+ if settings.USE_L10N and isinstance(value, six.string_types):
+ parts = []
+ decimal_separator = get_format('DECIMAL_SEPARATOR')
+ if decimal_separator in value:
+ value, decimals = value.split(decimal_separator, 1)
+ parts.append(decimals)
+ if settings.USE_THOUSAND_SEPARATOR:
+ thousand_sep = get_format('THOUSAND_SEPARATOR')
+ for replacement in set([
+ thousand_sep, unicodedata.normalize('NFKD', thousand_sep)]):
+ value = value.replace(replacement, '')
+ parts.append(value)
+ value = '.'.join(reversed(parts))
+ return value
diff --git a/lib/python2.7/site-packages/django/utils/functional.py b/lib/python2.7/site-packages/django/utils/functional.py
new file mode 100644
index 0000000..6c63999
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/functional.py
@@ -0,0 +1,430 @@
+import copy
+import operator
+from functools import wraps
+import sys
+
+from django.utils import six
+from django.utils.six.moves import copyreg
+
+
+# You can't trivially replace this with `functools.partial` because this binds
+# to classes and returns bound instances, whereas functools.partial (on
+# CPython) is a type and its instances don't bind.
+def curry(_curried_func, *args, **kwargs):
+ def _curried(*moreargs, **morekwargs):
+ return _curried_func(*(args + moreargs), **dict(kwargs, **morekwargs))
+ return _curried
+
+
+def memoize(func, cache, num_args):
+ """
+ Wrap a function so that results for any argument tuple are stored in
+ 'cache'. Note that the args to the function must be usable as dictionary
+ keys.
+
+ Only the first num_args are considered when creating the key.
+ """
+ @wraps(func)
+ def wrapper(*args):
+ mem_args = args[:num_args]
+ if mem_args in cache:
+ return cache[mem_args]
+ result = func(*args)
+ cache[mem_args] = result
+ return result
+ return wrapper
+
+
+class cached_property(object):
+ """
+ Decorator that converts a method with a single self argument into a
+ property cached on the instance.
+ """
+ def __init__(self, func):
+ self.func = func
+
+ def __get__(self, instance, type=None):
+ if instance is None:
+ return self
+ res = instance.__dict__[self.func.__name__] = self.func(instance)
+ return res
+
+
+class Promise(object):
+ """
+ This is just a base class for the proxy class created in
+ the closure of the lazy function. It can be used to recognize
+ promises in code.
+ """
+ pass
+
+
+def lazy(func, *resultclasses):
+ """
+ Turns any callable into a lazy evaluated callable. You need to give result
+ classes or types -- at least one is needed so that the automatic forcing of
+ the lazy evaluation code is triggered. Results are not memoized; the
+ function is evaluated on every access.
+ """
+
+ @total_ordering
+ class __proxy__(Promise):
+ """
+ Encapsulate a function call and act as a proxy for methods that are
+ called on the result of that function. The function is not evaluated
+ until one of the methods on the result is called.
+ """
+ __dispatch = None
+
+ def __init__(self, args, kw):
+ self.__args = args
+ self.__kw = kw
+ if self.__dispatch is None:
+ self.__prepare_class__()
+
+ def __reduce__(self):
+ return (
+ _lazy_proxy_unpickle,
+ (func, self.__args, self.__kw) + resultclasses
+ )
+
+ def __prepare_class__(cls):
+ cls.__dispatch = {}
+ for resultclass in resultclasses:
+ cls.__dispatch[resultclass] = {}
+ for type_ in reversed(resultclass.mro()):
+ for (k, v) in type_.__dict__.items():
+ # All __promise__ return the same wrapper method, but
+ # they also do setup, inserting the method into the
+ # dispatch dict.
+ meth = cls.__promise__(resultclass, k, v)
+ if hasattr(cls, k):
+ continue
+ setattr(cls, k, meth)
+ cls._delegate_bytes = bytes in resultclasses
+ cls._delegate_text = six.text_type in resultclasses
+ assert not (cls._delegate_bytes and cls._delegate_text), "Cannot call lazy() with both bytes and text return types."
+ if cls._delegate_text:
+ if six.PY3:
+ cls.__str__ = cls.__text_cast
+ else:
+ cls.__unicode__ = cls.__text_cast
+ elif cls._delegate_bytes:
+ if six.PY3:
+ cls.__bytes__ = cls.__bytes_cast
+ else:
+ cls.__str__ = cls.__bytes_cast
+ __prepare_class__ = classmethod(__prepare_class__)
+
+ def __promise__(cls, klass, funcname, method):
+ # Builds a wrapper around some magic method and registers that
+ # magic method for the given type and method name.
+ def __wrapper__(self, *args, **kw):
+ # Automatically triggers the evaluation of a lazy value and
+ # applies the given magic method of the result type.
+ res = func(*self.__args, **self.__kw)
+ for t in type(res).mro():
+ if t in self.__dispatch:
+ return self.__dispatch[t][funcname](res, *args, **kw)
+ raise TypeError("Lazy object returned unexpected type.")
+
+ if klass not in cls.__dispatch:
+ cls.__dispatch[klass] = {}
+ cls.__dispatch[klass][funcname] = method
+ return __wrapper__
+ __promise__ = classmethod(__promise__)
+
+ def __text_cast(self):
+ return func(*self.__args, **self.__kw)
+
+ def __bytes_cast(self):
+ return bytes(func(*self.__args, **self.__kw))
+
+ def __cast(self):
+ if self._delegate_bytes:
+ return self.__bytes_cast()
+ elif self._delegate_text:
+ return self.__text_cast()
+ else:
+ return func(*self.__args, **self.__kw)
+
+ def __eq__(self, other):
+ if isinstance(other, Promise):
+ other = other.__cast()
+ return self.__cast() == other
+
+ def __lt__(self, other):
+ if isinstance(other, Promise):
+ other = other.__cast()
+ return self.__cast() < other
+
+ def __hash__(self):
+ return hash(self.__cast())
+
+ def __mod__(self, rhs):
+ if self._delegate_bytes and six.PY2:
+ return bytes(self) % rhs
+ elif self._delegate_text:
+ return six.text_type(self) % rhs
+ return self.__cast() % rhs
+
+ def __deepcopy__(self, memo):
+ # Instances of this class are effectively immutable. It's just a
+ # collection of functions. So we don't need to do anything
+ # complicated for copying.
+ memo[id(self)] = self
+ return self
+
+ @wraps(func)
+ def __wrapper__(*args, **kw):
+ # Creates the proxy object, instead of the actual value.
+ return __proxy__(args, kw)
+
+ return __wrapper__
+
+
+def _lazy_proxy_unpickle(func, args, kwargs, *resultclasses):
+ return lazy(func, *resultclasses)(*args, **kwargs)
+
+
+def allow_lazy(func, *resultclasses):
+ """
+ A decorator that allows a function to be called with one or more lazy
+ arguments. If none of the args are lazy, the function is evaluated
+ immediately, otherwise a __proxy__ is returned that will evaluate the
+ function when needed.
+ """
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ for arg in list(args) + list(six.itervalues(kwargs)):
+ if isinstance(arg, Promise):
+ break
+ else:
+ return func(*args, **kwargs)
+ return lazy(func, *resultclasses)(*args, **kwargs)
+ return wrapper
+
+empty = object()
+
+
+def new_method_proxy(func):
+ def inner(self, *args):
+ if self._wrapped is empty:
+ self._setup()
+ return func(self._wrapped, *args)
+ return inner
+
+
+class LazyObject(object):
+ """
+ A wrapper for another class that can be used to delay instantiation of the
+ wrapped class.
+
+ By subclassing, you have the opportunity to intercept and alter the
+ instantiation. If you don't need to do that, use SimpleLazyObject.
+ """
+
+ # Avoid infinite recursion when tracing __init__ (#19456).
+ _wrapped = None
+
+ def __init__(self):
+ self._wrapped = empty
+
+ __getattr__ = new_method_proxy(getattr)
+
+ def __setattr__(self, name, value):
+ if name == "_wrapped":
+ # Assign to __dict__ to avoid infinite __setattr__ loops.
+ self.__dict__["_wrapped"] = value
+ else:
+ if self._wrapped is empty:
+ self._setup()
+ setattr(self._wrapped, name, value)
+
+ def __delattr__(self, name):
+ if name == "_wrapped":
+ raise TypeError("can't delete _wrapped.")
+ if self._wrapped is empty:
+ self._setup()
+ delattr(self._wrapped, name)
+
+ def _setup(self):
+ """
+ Must be implemented by subclasses to initialise the wrapped object.
+ """
+ raise NotImplementedError
+
+ # Introspection support
+ __dir__ = new_method_proxy(dir)
+
+ # Dictionary methods support
+ @new_method_proxy
+ def __getitem__(self, key):
+ return self[key]
+
+ @new_method_proxy
+ def __setitem__(self, key, value):
+ self[key] = value
+
+ @new_method_proxy
+ def __delitem__(self, key):
+ del self[key]
+
+
+# Workaround for http://bugs.python.org/issue12370
+_super = super
+
+
+class SimpleLazyObject(LazyObject):
+ """
+ A lazy object initialised from any function.
+
+ Designed for compound objects of unknown type. For builtins or objects of
+ known type, use django.utils.functional.lazy.
+ """
+ def __init__(self, func):
+ """
+ Pass in a callable that returns the object to be wrapped.
+
+ If copies are made of the resulting SimpleLazyObject, which can happen
+ in various circumstances within Django, then you must ensure that the
+ callable can be safely run more than once and will return the same
+ value.
+ """
+ self.__dict__['_setupfunc'] = func
+ _super(SimpleLazyObject, self).__init__()
+
+ def _setup(self):
+ self._wrapped = self._setupfunc()
+
+ if six.PY3:
+ __bytes__ = new_method_proxy(bytes)
+ __str__ = new_method_proxy(str)
+ else:
+ __str__ = new_method_proxy(str)
+ __unicode__ = new_method_proxy(unicode)
+
+ def __deepcopy__(self, memo):
+ if self._wrapped is empty:
+ # We have to use SimpleLazyObject, not self.__class__, because the
+ # latter is proxied.
+ result = SimpleLazyObject(self._setupfunc)
+ memo[id(self)] = result
+ return result
+ else:
+ return copy.deepcopy(self._wrapped, memo)
+
+ # Because we have messed with __class__ below, we confuse pickle as to what
+ # class we are pickling. It also appears to stop __reduce__ from being
+ # called. So, we define __getstate__ in a way that cooperates with the way
+ # that pickle interprets this class. This fails when the wrapped class is
+ # a builtin, but it is better than nothing.
+ def __getstate__(self):
+ if self._wrapped is empty:
+ self._setup()
+ return self._wrapped.__dict__
+
+ # Python 3.3 will call __reduce__ when pickling; this method is needed
+ # to serialize and deserialize correctly.
+ @classmethod
+ def __newobj__(cls, *args):
+ return cls.__new__(cls, *args)
+
+ def __reduce_ex__(self, proto):
+ if proto >= 2:
+ # On Py3, since the default protocol is 3, pickle uses the
+ # ``__newobj__`` method (& more efficient opcodes) for writing.
+ return (self.__newobj__, (self.__class__,), self.__getstate__())
+ else:
+ # On Py2, the default protocol is 0 (for back-compat) & the above
+ # code fails miserably (see regression test). Instead, we return
+ # exactly what's returned if there's no ``__reduce__`` method at
+ # all.
+ return (copyreg._reconstructor, (self.__class__, object, None), self.__getstate__())
+
+ # Return a meaningful representation of the lazy object for debugging
+ # without evaluating the wrapped object.
+ def __repr__(self):
+ if self._wrapped is empty:
+ repr_attr = self._setupfunc
+ else:
+ repr_attr = self._wrapped
+ return '<SimpleLazyObject: %r>' % repr_attr
+
+ # Need to pretend to be the wrapped class, for the sake of objects that
+ # care about this (especially in equality tests)
+ __class__ = property(new_method_proxy(operator.attrgetter("__class__")))
+ __eq__ = new_method_proxy(operator.eq)
+ __ne__ = new_method_proxy(operator.ne)
+ __hash__ = new_method_proxy(hash)
+ __bool__ = new_method_proxy(bool) # Python 3
+ __nonzero__ = __bool__ # Python 2
+
+
+class lazy_property(property):
+ """
+ A property that works with subclasses by wrapping the decorated
+ functions of the base class.
+ """
+ def __new__(cls, fget=None, fset=None, fdel=None, doc=None):
+ if fget is not None:
+ @wraps(fget)
+ def fget(instance, instance_type=None, name=fget.__name__):
+ return getattr(instance, name)()
+ if fset is not None:
+ @wraps(fset)
+ def fset(instance, value, name=fset.__name__):
+ return getattr(instance, name)(value)
+ if fdel is not None:
+ @wraps(fdel)
+ def fdel(instance, name=fdel.__name__):
+ return getattr(instance, name)()
+ return property(fget, fset, fdel, doc)
+
+
+def partition(predicate, values):
+ """
+ Splits the values into two sets, based on the return value of the function
+ (True/False). e.g.:
+
+ >>> partition(lambda x: x > 3, range(5))
+ [0, 1, 2, 3], [4]
+ """
+ results = ([], [])
+ for item in values:
+ results[predicate(item)].append(item)
+ return results
+
+if sys.version_info >= (2, 7, 2):
+ from functools import total_ordering
+else:
+ # For Python < 2.7.2. Python 2.6 does not have total_ordering, and
+ # total_ordering in 2.7 versions prior to 2.7.2 is buggy. See
+ # http://bugs.python.org/issue10042 for details. For these versions use
+ # code borrowed from Python 2.7.3.
+ def total_ordering(cls):
+ """Class decorator that fills in missing ordering methods"""
+ convert = {
+ '__lt__': [('__gt__', lambda self, other: not (self < other or self == other)),
+ ('__le__', lambda self, other: self < other or self == other),
+ ('__ge__', lambda self, other: not self < other)],
+ '__le__': [('__ge__', lambda self, other: not self <= other or self == other),
+ ('__lt__', lambda self, other: self <= other and not self == other),
+ ('__gt__', lambda self, other: not self <= other)],
+ '__gt__': [('__lt__', lambda self, other: not (self > other or self == other)),
+ ('__ge__', lambda self, other: self > other or self == other),
+ ('__le__', lambda self, other: not self > other)],
+ '__ge__': [('__le__', lambda self, other: (not self >= other) or self == other),
+ ('__gt__', lambda self, other: self >= other and not self == other),
+ ('__lt__', lambda self, other: not self >= other)]
+ }
+ roots = set(dir(cls)) & set(convert)
+ if not roots:
+ raise ValueError('must define at least one ordering operation: < > <= >=')
+ root = max(roots) # prefer __lt__ to __le__ to __gt__ to __ge__
+ for opname, opfunc in convert[root]:
+ if opname not in roots:
+ opfunc.__name__ = opname
+ opfunc.__doc__ = getattr(int, opname).__doc__
+ setattr(cls, opname, opfunc)
+ return cls
diff --git a/lib/python2.7/site-packages/django/utils/html.py b/lib/python2.7/site-packages/django/utils/html.py
new file mode 100644
index 0000000..5d96f15
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/html.py
@@ -0,0 +1,330 @@
+"""HTML utilities suitable for global use."""
+
+from __future__ import unicode_literals
+
+import re
+
+from django.utils.safestring import SafeData, mark_safe
+from django.utils.encoding import force_text, force_str
+from django.utils.functional import allow_lazy
+from django.utils import six
+from django.utils.six.moves.urllib.parse import quote, unquote, urlsplit, urlunsplit
+from django.utils.text import normalize_newlines
+
+from .html_parser import HTMLParser, HTMLParseError
+
+
+# Configuration for urlize() function.
+TRAILING_PUNCTUATION = ['.', ',', ':', ';', '.)']
+WRAPPING_PUNCTUATION = [('(', ')'), ('<', '>'), ('[', ']'), ('&lt;', '&gt;')]
+
+# List of possible strings used for bullets in bulleted lists.
+DOTS = ['&middot;', '*', '\u2022', '&#149;', '&bull;', '&#8226;']
+
+unencoded_ampersands_re = re.compile(r'&(?!(\w+|#\d+);)')
+word_split_re = re.compile(r'(\s+)')
+simple_url_re = re.compile(r'^https?://\[?\w', re.IGNORECASE)
+simple_url_2_re = re.compile(r'^www\.|^(?!http)\w[^@]+\.(com|edu|gov|int|mil|net|org)$', re.IGNORECASE)
+simple_email_re = re.compile(r'^\S+@\S+\.\S+$')
+link_target_attribute_re = re.compile(r'(<a [^>]*?)target=[^\s>]+')
+html_gunk_re = re.compile(r'(?:<br clear="all">|<i><\/i>|<b><\/b>|<em><\/em>|<strong><\/strong>|<\/?smallcaps>|<\/?uppercase>)', re.IGNORECASE)
+hard_coded_bullets_re = re.compile(r'((?:<p>(?:%s).*?[a-zA-Z].*?</p>\s*)+)' % '|'.join([re.escape(x) for x in DOTS]), re.DOTALL)
+trailing_empty_content_re = re.compile(r'(?:<p>(?:&nbsp;|\s|<br \/>)*?</p>\s*)+\Z')
+
+
+def escape(text):
+ """
+ Returns the given text with ampersands, quotes and angle brackets encoded for use in HTML.
+ """
+ return mark_safe(force_text(text).replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;').replace('"', '&quot;').replace("'", '&#39;'))
+escape = allow_lazy(escape, six.text_type)
+
+_js_escapes = {
+ ord('\\'): '\\u005C',
+ ord('\''): '\\u0027',
+ ord('"'): '\\u0022',
+ ord('>'): '\\u003E',
+ ord('<'): '\\u003C',
+ ord('&'): '\\u0026',
+ ord('='): '\\u003D',
+ ord('-'): '\\u002D',
+ ord(';'): '\\u003B',
+ ord('\u2028'): '\\u2028',
+ ord('\u2029'): '\\u2029'
+}
+
+# Escape every ASCII character with a value less than 32.
+_js_escapes.update((ord('%c' % z), '\\u%04X' % z) for z in range(32))
+
+def escapejs(value):
+ """Hex encodes characters for use in JavaScript strings."""
+ return mark_safe(force_text(value).translate(_js_escapes))
+escapejs = allow_lazy(escapejs, six.text_type)
+
+def conditional_escape(text):
+ """
+ Similar to escape(), except that it doesn't operate on pre-escaped strings.
+ """
+ if isinstance(text, SafeData):
+ return text
+ else:
+ return escape(text)
+
+def format_html(format_string, *args, **kwargs):
+ """
+ Similar to str.format, but passes all arguments through conditional_escape,
+ and calls 'mark_safe' on the result. This function should be used instead
+ of str.format or % interpolation to build up small HTML fragments.
+ """
+ args_safe = map(conditional_escape, args)
+ kwargs_safe = dict([(k, conditional_escape(v)) for (k, v) in
+ six.iteritems(kwargs)])
+ return mark_safe(format_string.format(*args_safe, **kwargs_safe))
+
+def format_html_join(sep, format_string, args_generator):
+ """
+ A wrapper of format_html, for the common case of a group of arguments that
+ need to be formatted using the same format string, and then joined using
+ 'sep'. 'sep' is also passed through conditional_escape.
+
+ 'args_generator' should be an iterator that returns the sequence of 'args'
+ that will be passed to format_html.
+
+ Example:
+
+ format_html_join('\n', "<li>{0} {1}</li>", ((u.first_name, u.last_name)
+ for u in users))
+
+ """
+ return mark_safe(conditional_escape(sep).join(
+ format_html(format_string, *tuple(args))
+ for args in args_generator))
+
+
+def linebreaks(value, autoescape=False):
+ """Converts newlines into <p> and <br />s."""
+ value = normalize_newlines(value)
+ paras = re.split('\n{2,}', value)
+ if autoescape:
+ paras = ['<p>%s</p>' % escape(p).replace('\n', '<br />') for p in paras]
+ else:
+ paras = ['<p>%s</p>' % p.replace('\n', '<br />') for p in paras]
+ return '\n\n'.join(paras)
+linebreaks = allow_lazy(linebreaks, six.text_type)
+
+
+class MLStripper(HTMLParser):
+ def __init__(self):
+ if six.PY2:
+ HTMLParser.__init__(self)
+ else:
+ HTMLParser.__init__(self, strict=False)
+ self.reset()
+ self.fed = []
+ def handle_data(self, d):
+ self.fed.append(d)
+ def handle_entityref(self, name):
+ self.fed.append('&%s;' % name)
+ def handle_charref(self, name):
+ self.fed.append('&#%s;' % name)
+ def get_data(self):
+ return ''.join(self.fed)
+
+
+def _strip_once(value):
+ """
+ Internal tag stripping utility used by strip_tags.
+ """
+ s = MLStripper()
+ try:
+ s.feed(value)
+ except HTMLParseError:
+ return value
+ try:
+ s.close()
+ except (HTMLParseError, UnboundLocalError) as err:
+ # UnboundLocalError because of http://bugs.python.org/issue17802
+ # on Python 3.2, triggered by strict=False mode of HTMLParser
+ return s.get_data() + s.rawdata
+ else:
+ return s.get_data()
+
+
+def strip_tags(value):
+ """Returns the given HTML with all tags stripped."""
+ while True:
+ if not ('<' in value or '>' in value):
+ return value
+ new_value = _strip_once(value)
+ if new_value == value:
+ # _strip_once was not able to detect more tags
+ return value
+ else:
+ value = new_value
+strip_tags = allow_lazy(strip_tags)
+
+def remove_tags(html, tags):
+ """Returns the given HTML with given tags removed."""
+ tags = [re.escape(tag) for tag in tags.split()]
+ tags_re = '(%s)' % '|'.join(tags)
+ starttag_re = re.compile(r'<%s(/?>|(\s+[^>]*>))' % tags_re, re.U)
+ endtag_re = re.compile('</%s>' % tags_re)
+ html = starttag_re.sub('', html)
+ html = endtag_re.sub('', html)
+ return html
+remove_tags = allow_lazy(remove_tags, six.text_type)
+
+def strip_spaces_between_tags(value):
+ """Returns the given HTML with spaces between tags removed."""
+ return re.sub(r'>\s+<', '><', force_text(value))
+strip_spaces_between_tags = allow_lazy(strip_spaces_between_tags, six.text_type)
+
+def strip_entities(value):
+ """Returns the given HTML with all entities (&something;) stripped."""
+ return re.sub(r'&(?:\w+|#\d+);', '', force_text(value))
+strip_entities = allow_lazy(strip_entities, six.text_type)
+
+def fix_ampersands(value):
+ """Returns the given HTML with all unencoded ampersands encoded correctly."""
+ return unencoded_ampersands_re.sub('&amp;', force_text(value))
+fix_ampersands = allow_lazy(fix_ampersands, six.text_type)
+
+def smart_urlquote(url):
+ "Quotes a URL if it isn't already quoted."
+ # Handle IDN before quoting.
+ try:
+ scheme, netloc, path, query, fragment = urlsplit(url)
+ try:
+ netloc = netloc.encode('idna').decode('ascii') # IDN -> ACE
+ except UnicodeError: # invalid domain part
+ pass
+ else:
+ url = urlunsplit((scheme, netloc, path, query, fragment))
+ except ValueError:
+ # invalid IPv6 URL (normally square brackets in hostname part).
+ pass
+
+ url = unquote(force_str(url))
+ # See http://bugs.python.org/issue2637
+ url = quote(url, safe=b'!*\'();:@&=+$,/?#[]~')
+
+ return force_text(url)
+
+def urlize(text, trim_url_limit=None, nofollow=False, autoescape=False):
+ """
+ Converts any URLs in text into clickable links.
+
+ Works on http://, https://, www. links, and also on links ending in one of
+ the original seven gTLDs (.com, .edu, .gov, .int, .mil, .net, and .org).
+ Links can have trailing punctuation (periods, commas, close-parens) and
+ leading punctuation (opening parens) and it'll still do the right thing.
+
+ If trim_url_limit is not None, the URLs in link text longer than this limit
+ will truncated to trim_url_limit-3 characters and appended with an elipsis.
+
+ If nofollow is True, the URLs in link text will get a rel="nofollow"
+ attribute.
+
+ If autoescape is True, the link text and URLs will get autoescaped.
+ """
+ def trim_url(x, limit=trim_url_limit):
+ if limit is None or len(x) <= limit:
+ return x
+ return '%s...' % x[:max(0, limit - 3)]
+ safe_input = isinstance(text, SafeData)
+ words = word_split_re.split(force_text(text))
+ for i, word in enumerate(words):
+ match = None
+ if '.' in word or '@' in word or ':' in word:
+ # Deal with punctuation.
+ lead, middle, trail = '', word, ''
+ for punctuation in TRAILING_PUNCTUATION:
+ if middle.endswith(punctuation):
+ middle = middle[:-len(punctuation)]
+ trail = punctuation + trail
+ for opening, closing in WRAPPING_PUNCTUATION:
+ if middle.startswith(opening):
+ middle = middle[len(opening):]
+ lead = lead + opening
+ # Keep parentheses at the end only if they're balanced.
+ if (middle.endswith(closing)
+ and middle.count(closing) == middle.count(opening) + 1):
+ middle = middle[:-len(closing)]
+ trail = closing + trail
+
+ # Make URL we want to point to.
+ url = None
+ nofollow_attr = ' rel="nofollow"' if nofollow else ''
+ if simple_url_re.match(middle):
+ url = smart_urlquote(middle)
+ elif simple_url_2_re.match(middle):
+ url = smart_urlquote('http://%s' % middle)
+ elif not ':' in middle and simple_email_re.match(middle):
+ local, domain = middle.rsplit('@', 1)
+ try:
+ domain = domain.encode('idna').decode('ascii')
+ except UnicodeError:
+ continue
+ url = 'mailto:%s@%s' % (local, domain)
+ nofollow_attr = ''
+
+ # Make link.
+ if url:
+ trimmed = trim_url(middle)
+ if autoescape and not safe_input:
+ lead, trail = escape(lead), escape(trail)
+ url, trimmed = escape(url), escape(trimmed)
+ middle = '<a href="%s"%s>%s</a>' % (url, nofollow_attr, trimmed)
+ words[i] = mark_safe('%s%s%s' % (lead, middle, trail))
+ else:
+ if safe_input:
+ words[i] = mark_safe(word)
+ elif autoescape:
+ words[i] = escape(word)
+ elif safe_input:
+ words[i] = mark_safe(word)
+ elif autoescape:
+ words[i] = escape(word)
+ return ''.join(words)
+urlize = allow_lazy(urlize, six.text_type)
+
+def clean_html(text):
+ """
+ Clean the given HTML. Specifically, do the following:
+ * Convert <b> and <i> to <strong> and <em>.
+ * Encode all ampersands correctly.
+ * Remove all "target" attributes from <a> tags.
+ * Remove extraneous HTML, such as presentational tags that open and
+ immediately close and <br clear="all">.
+ * Convert hard-coded bullets into HTML unordered lists.
+ * Remove stuff like "<p>&nbsp;&nbsp;</p>", but only if it's at the
+ bottom of the text.
+ """
+ from django.utils.text import normalize_newlines
+ text = normalize_newlines(force_text(text))
+ text = re.sub(r'<(/?)\s*b\s*>', '<\\1strong>', text)
+ text = re.sub(r'<(/?)\s*i\s*>', '<\\1em>', text)
+ text = fix_ampersands(text)
+ # Remove all target="" attributes from <a> tags.
+ text = link_target_attribute_re.sub('\\1', text)
+ # Trim stupid HTML such as <br clear="all">.
+ text = html_gunk_re.sub('', text)
+ # Convert hard-coded bullets into HTML unordered lists.
+ def replace_p_tags(match):
+ s = match.group().replace('</p>', '</li>')
+ for d in DOTS:
+ s = s.replace('<p>%s' % d, '<li>')
+ return '<ul>\n%s\n</ul>' % s
+ text = hard_coded_bullets_re.sub(replace_p_tags, text)
+ # Remove stuff like "<p>&nbsp;&nbsp;</p>", but only if it's at the bottom
+ # of the text.
+ text = trailing_empty_content_re.sub('', text)
+ return text
+clean_html = allow_lazy(clean_html, six.text_type)
+
+def avoid_wrapping(value):
+ """
+ Avoid text wrapping in the middle of a phrase by adding non-breaking
+ spaces where there previously were normal spaces.
+ """
+ return value.replace(" ", "\xa0")
diff --git a/lib/python2.7/site-packages/django/utils/html_parser.py b/lib/python2.7/site-packages/django/utils/html_parser.py
new file mode 100644
index 0000000..6ccb665
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/html_parser.py
@@ -0,0 +1,113 @@
+from django.utils.six.moves import html_parser as _html_parser
+import re
+import sys
+
+current_version = sys.version_info
+
+use_workaround = (
+ (current_version < (2, 7, 3)) or
+ (current_version >= (3, 0) and current_version < (3, 2, 3))
+)
+
+HTMLParseError = _html_parser.HTMLParseError
+
+if not use_workaround:
+ HTMLParser = _html_parser.HTMLParser
+else:
+ tagfind = re.compile('([a-zA-Z][-.a-zA-Z0-9:_]*)(?:\s|/(?!>))*')
+
+ class HTMLParser(_html_parser.HTMLParser):
+ """
+ Patched version of stdlib's HTMLParser with patch from:
+ http://bugs.python.org/issue670664
+ """
+ def __init__(self):
+ _html_parser.HTMLParser.__init__(self)
+ self.cdata_tag = None
+
+ def set_cdata_mode(self, tag):
+ try:
+ self.interesting = _html_parser.interesting_cdata
+ except AttributeError:
+ self.interesting = re.compile(r'</\s*%s\s*>' % tag.lower(), re.I)
+ self.cdata_tag = tag.lower()
+
+ def clear_cdata_mode(self):
+ self.interesting = _html_parser.interesting_normal
+ self.cdata_tag = None
+
+ # Internal -- handle starttag, return end or -1 if not terminated
+ def parse_starttag(self, i):
+ self.__starttag_text = None
+ endpos = self.check_for_whole_start_tag(i)
+ if endpos < 0:
+ return endpos
+ rawdata = self.rawdata
+ self.__starttag_text = rawdata[i:endpos]
+
+ # Now parse the data between i+1 and j into a tag and attrs
+ attrs = []
+ match = tagfind.match(rawdata, i + 1)
+ assert match, 'unexpected call to parse_starttag()'
+ k = match.end()
+ self.lasttag = tag = match.group(1).lower()
+
+ while k < endpos:
+ m = _html_parser.attrfind.match(rawdata, k)
+ if not m:
+ break
+ attrname, rest, attrvalue = m.group(1, 2, 3)
+ if not rest:
+ attrvalue = None
+ elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
+ attrvalue[:1] == '"' == attrvalue[-1:]:
+ attrvalue = attrvalue[1:-1]
+ if attrvalue:
+ attrvalue = self.unescape(attrvalue)
+ attrs.append((attrname.lower(), attrvalue))
+ k = m.end()
+
+ end = rawdata[k:endpos].strip()
+ if end not in (">", "/>"):
+ lineno, offset = self.getpos()
+ if "\n" in self.__starttag_text:
+ lineno = lineno + self.__starttag_text.count("\n")
+ offset = len(self.__starttag_text) \
+ - self.__starttag_text.rfind("\n")
+ else:
+ offset = offset + len(self.__starttag_text)
+ self.error("junk characters in start tag: %r"
+ % (rawdata[k:endpos][:20],))
+ if end.endswith('/>'):
+ # XHTML-style empty tag: <span attr="value" />
+ self.handle_startendtag(tag, attrs)
+ else:
+ self.handle_starttag(tag, attrs)
+ if tag in self.CDATA_CONTENT_ELEMENTS:
+ self.set_cdata_mode(tag) # <--------------------------- Changed
+ return endpos
+
+ # Internal -- parse endtag, return end or -1 if incomplete
+ def parse_endtag(self, i):
+ rawdata = self.rawdata
+ assert rawdata[i:i + 2] == "</", "unexpected call to parse_endtag"
+ match = _html_parser.endendtag.search(rawdata, i + 1) # >
+ if not match:
+ return -1
+ j = match.end()
+ match = _html_parser.endtagfind.match(rawdata, i) # </ + tag + >
+ if not match:
+ if self.cdata_tag is not None: # *** add ***
+ self.handle_data(rawdata[i:j]) # *** add ***
+ return j # *** add ***
+ self.error("bad end tag: %r" % (rawdata[i:j],))
+ # --- changed start ---------------------------------------------------
+ tag = match.group(1).strip()
+ if self.cdata_tag is not None:
+ if tag.lower() != self.cdata_tag:
+ self.handle_data(rawdata[i:j])
+ return j
+ # --- changed end -----------------------------------------------------
+ self.handle_endtag(tag.lower())
+ self.clear_cdata_mode()
+ return j
diff --git a/lib/python2.7/site-packages/django/utils/http.py b/lib/python2.7/site-packages/django/utils/http.py
new file mode 100644
index 0000000..571a179
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/http.py
@@ -0,0 +1,261 @@
+from __future__ import unicode_literals
+
+import base64
+import calendar
+import datetime
+import re
+import sys
+
+from binascii import Error as BinasciiError
+from email.utils import formatdate
+
+from django.utils.datastructures import MultiValueDict
+from django.utils.encoding import force_str, force_text
+from django.utils.functional import allow_lazy
+from django.utils import six
+from django.utils.six.moves.urllib.parse import (
+ quote, quote_plus, unquote, unquote_plus, urlparse,
+ urlencode as original_urlencode)
+
+ETAG_MATCH = re.compile(r'(?:W/)?"((?:\\.|[^"])*)"')
+
+MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
+__D = r'(?P<day>\d{2})'
+__D2 = r'(?P<day>[ \d]\d)'
+__M = r'(?P<mon>\w{3})'
+__Y = r'(?P<year>\d{4})'
+__Y2 = r'(?P<year>\d{2})'
+__T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})'
+RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T))
+RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T))
+ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y))
+
+def urlquote(url, safe='/'):
+ """
+ A version of Python's urllib.quote() function that can operate on unicode
+ strings. The url is first UTF-8 encoded before quoting. The returned string
+ can safely be used as part of an argument to a subsequent iri_to_uri() call
+ without double-quoting occurring.
+ """
+ return force_text(quote(force_str(url), force_str(safe)))
+urlquote = allow_lazy(urlquote, six.text_type)
+
+def urlquote_plus(url, safe=''):
+ """
+ A version of Python's urllib.quote_plus() function that can operate on
+ unicode strings. The url is first UTF-8 encoded before quoting. The
+ returned string can safely be used as part of an argument to a subsequent
+ iri_to_uri() call without double-quoting occurring.
+ """
+ return force_text(quote_plus(force_str(url), force_str(safe)))
+urlquote_plus = allow_lazy(urlquote_plus, six.text_type)
+
+def urlunquote(quoted_url):
+ """
+ A wrapper for Python's urllib.unquote() function that can operate on
+ the result of django.utils.http.urlquote().
+ """
+ return force_text(unquote(force_str(quoted_url)))
+urlunquote = allow_lazy(urlunquote, six.text_type)
+
+def urlunquote_plus(quoted_url):
+ """
+ A wrapper for Python's urllib.unquote_plus() function that can operate on
+ the result of django.utils.http.urlquote_plus().
+ """
+ return force_text(unquote_plus(force_str(quoted_url)))
+urlunquote_plus = allow_lazy(urlunquote_plus, six.text_type)
+
+def urlencode(query, doseq=0):
+ """
+ A version of Python's urllib.urlencode() function that can operate on
+ unicode strings. The parameters are first cast to UTF-8 encoded strings and
+ then encoded as per normal.
+ """
+ if isinstance(query, MultiValueDict):
+ query = query.lists()
+ elif hasattr(query, 'items'):
+ query = query.items()
+ return original_urlencode(
+ [(force_str(k),
+ [force_str(i) for i in v] if isinstance(v, (list,tuple)) else force_str(v))
+ for k, v in query],
+ doseq)
+
+def cookie_date(epoch_seconds=None):
+ """
+ Formats the time to ensure compatibility with Netscape's cookie standard.
+
+ Accepts a floating point number expressed in seconds since the epoch, in
+ UTC - such as that outputted by time.time(). If set to None, defaults to
+ the current time.
+
+ Outputs a string in the format 'Wdy, DD-Mon-YYYY HH:MM:SS GMT'.
+ """
+ rfcdate = formatdate(epoch_seconds)
+ return '%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25])
+
+def http_date(epoch_seconds=None):
+ """
+ Formats the time to match the RFC1123 date format as specified by HTTP
+ RFC2616 section 3.3.1.
+
+ Accepts a floating point number expressed in seconds since the epoch, in
+ UTC - such as that outputted by time.time(). If set to None, defaults to
+ the current time.
+
+ Outputs a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
+ """
+ rfcdate = formatdate(epoch_seconds)
+ return '%s GMT' % rfcdate[:25]
+
+def parse_http_date(date):
+ """
+ Parses a date format as specified by HTTP RFC2616 section 3.3.1.
+
+ The three formats allowed by the RFC are accepted, even if only the first
+ one is still in widespread use.
+
+ Returns an integer expressed in seconds since the epoch, in UTC.
+ """
+ # emails.Util.parsedate does the job for RFC1123 dates; unfortunately
+ # RFC2616 makes it mandatory to support RFC850 dates too. So we roll
+ # our own RFC-compliant parsing.
+ for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
+ m = regex.match(date)
+ if m is not None:
+ break
+ else:
+ raise ValueError("%r is not in a valid HTTP date format" % date)
+ try:
+ year = int(m.group('year'))
+ if year < 100:
+ if year < 70:
+ year += 2000
+ else:
+ year += 1900
+ month = MONTHS.index(m.group('mon').lower()) + 1
+ day = int(m.group('day'))
+ hour = int(m.group('hour'))
+ min = int(m.group('min'))
+ sec = int(m.group('sec'))
+ result = datetime.datetime(year, month, day, hour, min, sec)
+ return calendar.timegm(result.utctimetuple())
+ except Exception:
+ six.reraise(ValueError, ValueError("%r is not a valid date" % date), sys.exc_info()[2])
+
+def parse_http_date_safe(date):
+ """
+ Same as parse_http_date, but returns None if the input is invalid.
+ """
+ try:
+ return parse_http_date(date)
+ except Exception:
+ pass
+
+# Base 36 functions: useful for generating compact URLs
+
+def base36_to_int(s):
+ """
+ Converts a base 36 string to an ``int``. Raises ``ValueError` if the
+ input won't fit into an int.
+ """
+ # To prevent overconsumption of server resources, reject any
+ # base36 string that is long than 13 base36 digits (13 digits
+ # is sufficient to base36-encode any 64-bit integer)
+ if len(s) > 13:
+ raise ValueError("Base36 input too large")
+ value = int(s, 36)
+ # ... then do a final check that the value will fit into an int to avoid
+ # returning a long (#15067). The long type was removed in Python 3.
+ if six.PY2 and value > sys.maxint:
+ raise ValueError("Base36 input too large")
+ return value
+
+def int_to_base36(i):
+ """
+ Converts an integer to a base36 string
+ """
+ digits = "0123456789abcdefghijklmnopqrstuvwxyz"
+ factor = 0
+ if i < 0:
+ raise ValueError("Negative base36 conversion input.")
+ if six.PY2:
+ if not isinstance(i, six.integer_types):
+ raise TypeError("Non-integer base36 conversion input.")
+ if i > sys.maxint:
+ raise ValueError("Base36 conversion input too large.")
+ # Find starting factor
+ while True:
+ factor += 1
+ if i < 36 ** factor:
+ factor -= 1
+ break
+ base36 = []
+ # Construct base36 representation
+ while factor >= 0:
+ j = 36 ** factor
+ base36.append(digits[i // j])
+ i = i % j
+ factor -= 1
+ return ''.join(base36)
+
+def urlsafe_base64_encode(s):
+ """
+ Encodes a bytestring in base64 for use in URLs, stripping any trailing
+ equal signs.
+ """
+ return base64.urlsafe_b64encode(s).rstrip(b'\n=')
+
+def urlsafe_base64_decode(s):
+ """
+ Decodes a base64 encoded string, adding back any trailing equal signs that
+ might have been stripped.
+ """
+ s = s.encode('utf-8') # base64encode should only return ASCII.
+ try:
+ return base64.urlsafe_b64decode(s.ljust(len(s) + len(s) % 4, b'='))
+ except (LookupError, BinasciiError) as e:
+ raise ValueError(e)
+
+def parse_etags(etag_str):
+ """
+ Parses a string with one or several etags passed in If-None-Match and
+ If-Match headers by the rules in RFC 2616. Returns a list of etags
+ without surrounding double quotes (") and unescaped from \<CHAR>.
+ """
+ etags = ETAG_MATCH.findall(etag_str)
+ if not etags:
+ # etag_str has wrong format, treat it as an opaque string then
+ return [etag_str]
+ etags = [e.encode('ascii').decode('unicode_escape') for e in etags]
+ return etags
+
+def quote_etag(etag):
+ """
+ Wraps a string in double quotes escaping contents as necessary.
+ """
+ return '"%s"' % etag.replace('\\', '\\\\').replace('"', '\\"')
+
+def same_origin(url1, url2):
+ """
+ Checks if two URLs are 'same-origin'
+ """
+ p1, p2 = urlparse(url1), urlparse(url2)
+ try:
+ return (p1.scheme, p1.hostname, p1.port) == (p2.scheme, p2.hostname, p2.port)
+ except ValueError:
+ return False
+
+def is_safe_url(url, host=None):
+ """
+ Return ``True`` if the url is a safe redirection (i.e. it doesn't point to
+ a different host and uses a safe scheme).
+
+ Always returns ``False`` on an empty url.
+ """
+ if not url:
+ return False
+ url_info = urlparse(url)
+ return (not url_info.netloc or url_info.netloc == host) and \
+ (not url_info.scheme or url_info.scheme in ['http', 'https'])
diff --git a/lib/python2.7/site-packages/django/utils/image.py b/lib/python2.7/site-packages/django/utils/image.py
new file mode 100644
index 0000000..4bece2e
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/image.py
@@ -0,0 +1,154 @@
+# -*- coding: utf-8 -*-
+"""
+To provide a shim layer over Pillow/PIL situation until the PIL support is
+removed.
+
+
+Combinations To Account For
+===========================
+
+* Pillow:
+
+ * never has ``_imaging`` under any Python
+ * has the ``Image.alpha_composite``, which may aid in detection
+
+* PIL
+
+ * CPython 2.x may have _imaging (& work)
+ * CPython 2.x may *NOT* have _imaging (broken & needs a error message)
+ * CPython 3.x doesn't work
+ * PyPy will *NOT* have _imaging (but works?)
+ * On some platforms (Homebrew and RHEL6 reported) _imaging isn't available,
+ the needed import is from PIL import _imaging (refs #21355)
+
+Restated, that looks like:
+
+* If we're on Python 2.x, it could be either Pillow or PIL:
+
+ * If ``import _imaging`` results in ``ImportError``, either they have a
+ working Pillow installation or a broken PIL installation, so we need to
+ detect further:
+
+ * To detect, we first ``import Image``.
+ * If ``Image`` has a ``alpha_composite`` attribute present, only Pillow
+ has this, so we assume it's working.
+ * If ``Image`` DOES NOT have a ``alpha_composite``attribute, it must be
+ PIL & is a broken (likely C compiler-less) install, which we need to
+ warn the user about.
+
+ * If ``import _imaging`` works, it must be PIL & is a working install.
+
+* Python 3.x
+
+ * If ``import Image`` works, it must be Pillow, since PIL isn't Python 3.x
+ compatible.
+
+* PyPy
+
+ * If ``import _imaging`` results in ``ImportError``, it could be either
+ Pillow or PIL, both of which work without it on PyPy, so we're fine.
+
+
+Approach
+========
+
+* Attempt to import ``Image``
+
+ * ``ImportError`` - nothing is installed, toss an exception
+ * Either Pillow or the PIL is installed, so continue detecting
+
+* Attempt to ``hasattr(Image, 'alpha_composite')``
+
+ * If it works, it's Pillow & working
+ * If it fails, we've got a PIL install, continue detecting
+
+ * The only option here is that we're on Python 2.x or PyPy, of which
+ we only care about if we're on CPython.
+ * If we're on CPython, attempt to ``from PIL import _imaging`` and
+ ``import _imaging``
+
+ * ``ImportError`` - Bad install, toss an exception
+
+"""
+from __future__ import unicode_literals
+
+import warnings
+
+from django.core.exceptions import ImproperlyConfigured
+from django.utils.translation import ugettext_lazy as _
+
+
+Image = None
+_imaging = None
+ImageFile = None
+
+
+def _detect_image_library():
+ global Image
+ global _imaging
+ global ImageFile
+
+ # Skip re-attempting to import if we've already run detection.
+ if Image is not None:
+ return Image, _imaging, ImageFile
+
+ # Assume it's not there.
+ PIL_imaging = False
+
+ try:
+ # Try from the Pillow (or one variant of PIL) install location first.
+ from PIL import Image as PILImage
+ except ImportError as err:
+ try:
+ # If that failed, try the alternate import syntax for PIL.
+ import Image as PILImage
+ except ImportError as err:
+ # Neither worked, so it's likely not installed.
+ raise ImproperlyConfigured(
+ _("Neither Pillow nor PIL could be imported: %s") % err
+ )
+
+ # ``Image.alpha_composite`` was added to Pillow in SHA: e414c6 & is not
+ # available in any version of the PIL.
+ if hasattr(PILImage, 'alpha_composite'):
+ PIL_imaging = False
+ else:
+ # We're dealing with the PIL. Determine if we're on CPython & if
+ # ``_imaging`` is available.
+ import platform
+
+ # This is the Alex Approvedâ„¢ way.
+ # See http://mail.python.org/pipermail//pypy-dev/2011-November/008739.html
+ if platform.python_implementation().lower() == 'cpython':
+ # We're on CPython (likely 2.x). Since a C compiler is needed to
+ # produce a fully-working PIL & will create a ``_imaging`` module,
+ # we'll attempt to import it to verify their kit works.
+ try:
+ from PIL import _imaging as PIL_imaging
+ except ImportError:
+ try:
+ import _imaging as PIL_imaging
+ except ImportError as err:
+ raise ImproperlyConfigured(
+ _("The '_imaging' module for the PIL could not be "
+ "imported: %s") % err
+ )
+
+ # Try to import ImageFile as well.
+ try:
+ from PIL import ImageFile as PILImageFile
+ except ImportError:
+ import ImageFile as PILImageFile
+
+ # Finally, warn about deprecation...
+ if PIL_imaging is not False:
+ warnings.warn(
+ "Support for the PIL will be removed in Django 1.8. Please " +
+ "uninstall it & install Pillow instead.",
+ PendingDeprecationWarning
+ )
+
+ return PILImage, PIL_imaging, PILImageFile
+
+
+Image, _imaging, ImageFile = _detect_image_library()
diff --git a/lib/python2.7/site-packages/django/utils/importlib.py b/lib/python2.7/site-packages/django/utils/importlib.py
new file mode 100644
index 0000000..ae7987c
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/importlib.py
@@ -0,0 +1,41 @@
+# Taken from Python 2.7 with permission from/by the original author.
+import sys
+
+from django.utils import six
+
+def _resolve_name(name, package, level):
+ """Return the absolute name of the module to be imported."""
+ if not hasattr(package, 'rindex'):
+ raise ValueError("'package' not set to a string")
+ dot = len(package)
+ for x in range(level, 1, -1):
+ try:
+ dot = package.rindex('.', 0, dot)
+ except ValueError:
+ raise ValueError("attempted relative import beyond top-level "
+ "package")
+ return "%s.%s" % (package[:dot], name)
+
+
+if six.PY3:
+ from importlib import import_module
+else:
+ def import_module(name, package=None):
+ """Import a module.
+
+ The 'package' argument is required when performing a relative import. It
+ specifies the package to use as the anchor point from which to resolve the
+ relative import to an absolute import.
+
+ """
+ if name.startswith('.'):
+ if not package:
+ raise TypeError("relative imports require the 'package' argument")
+ level = 0
+ for character in name:
+ if character != '.':
+ break
+ level += 1
+ name = _resolve_name(name[level:], package, level)
+ __import__(name)
+ return sys.modules[name]
diff --git a/lib/python2.7/site-packages/django/utils/ipv6.py b/lib/python2.7/site-packages/django/utils/ipv6.py
new file mode 100644
index 0000000..4d53522
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/ipv6.py
@@ -0,0 +1,268 @@
+# This code was mostly based on ipaddr-py
+# Copyright 2007 Google Inc. http://code.google.com/p/ipaddr-py/
+# Licensed under the Apache License, Version 2.0 (the "License").
+from django.core.exceptions import ValidationError
+from django.utils.translation import ugettext_lazy as _
+from django.utils.six.moves import xrange
+
+def clean_ipv6_address(ip_str, unpack_ipv4=False,
+ error_message=_("This is not a valid IPv6 address.")):
+ """
+ Cleans a IPv6 address string.
+
+ Validity is checked by calling is_valid_ipv6_address() - if an
+ invalid address is passed, ValidationError is raised.
+
+ Replaces the longest continious zero-sequence with "::" and
+ removes leading zeroes and makes sure all hextets are lowercase.
+
+ Args:
+ ip_str: A valid IPv6 address.
+ unpack_ipv4: if an IPv4-mapped address is found,
+ return the plain IPv4 address (default=False).
+ error_message: A error message for in the ValidationError.
+
+ Returns:
+ A compressed IPv6 address, or the same value
+
+ """
+ best_doublecolon_start = -1
+ best_doublecolon_len = 0
+ doublecolon_start = -1
+ doublecolon_len = 0
+
+ if not is_valid_ipv6_address(ip_str):
+ raise ValidationError(error_message, code='invalid')
+
+ # This algorithm can only handle fully exploded
+ # IP strings
+ ip_str = _explode_shorthand_ip_string(ip_str)
+
+ ip_str = _sanitize_ipv4_mapping(ip_str)
+
+ # If needed, unpack the IPv4 and return straight away
+ # - no need in running the rest of the algorithm
+ if unpack_ipv4:
+ ipv4_unpacked = _unpack_ipv4(ip_str)
+
+ if ipv4_unpacked:
+ return ipv4_unpacked
+
+ hextets = ip_str.split(":")
+
+ for index in range(len(hextets)):
+ # Remove leading zeroes
+ hextets[index] = hextets[index].lstrip('0')
+ if not hextets[index]:
+ hextets[index] = '0'
+
+ # Determine best hextet to compress
+ if hextets[index] == '0':
+ doublecolon_len += 1
+ if doublecolon_start == -1:
+ # Start of a sequence of zeros.
+ doublecolon_start = index
+ if doublecolon_len > best_doublecolon_len:
+ # This is the longest sequence of zeros so far.
+ best_doublecolon_len = doublecolon_len
+ best_doublecolon_start = doublecolon_start
+ else:
+ doublecolon_len = 0
+ doublecolon_start = -1
+
+ # Compress the most suitable hextet
+ if best_doublecolon_len > 1:
+ best_doublecolon_end = (best_doublecolon_start +
+ best_doublecolon_len)
+ # For zeros at the end of the address.
+ if best_doublecolon_end == len(hextets):
+ hextets += ['']
+ hextets[best_doublecolon_start:best_doublecolon_end] = ['']
+ # For zeros at the beginning of the address.
+ if best_doublecolon_start == 0:
+ hextets = [''] + hextets
+
+ result = ":".join(hextets)
+
+ return result.lower()
+
+
+def _sanitize_ipv4_mapping(ip_str):
+ """
+ Sanitize IPv4 mapping in a expanded IPv6 address.
+
+ This converts ::ffff:0a0a:0a0a to ::ffff:10.10.10.10.
+ If there is nothing to sanitize, returns an unchanged
+ string.
+
+ Args:
+ ip_str: A string, the expanded IPv6 address.
+
+ Returns:
+ The sanitized output string, if applicable.
+ """
+ if not ip_str.lower().startswith('0000:0000:0000:0000:0000:ffff:'):
+ # not an ipv4 mapping
+ return ip_str
+
+ hextets = ip_str.split(':')
+
+ if '.' in hextets[-1]:
+ # already sanitized
+ return ip_str
+
+ ipv4_address = "%d.%d.%d.%d" % (
+ int(hextets[6][0:2], 16),
+ int(hextets[6][2:4], 16),
+ int(hextets[7][0:2], 16),
+ int(hextets[7][2:4], 16),
+ )
+
+ result = ':'.join(hextets[0:6])
+ result += ':' + ipv4_address
+
+ return result
+
+def _unpack_ipv4(ip_str):
+ """
+ Unpack an IPv4 address that was mapped in a compressed IPv6 address.
+
+ This converts 0000:0000:0000:0000:0000:ffff:10.10.10.10 to 10.10.10.10.
+ If there is nothing to sanitize, returns None.
+
+ Args:
+ ip_str: A string, the expanded IPv6 address.
+
+ Returns:
+ The unpacked IPv4 address, or None if there was nothing to unpack.
+ """
+ if not ip_str.lower().startswith('0000:0000:0000:0000:0000:ffff:'):
+ return None
+
+ return ip_str.rsplit(':', 1)[1]
+
+def is_valid_ipv6_address(ip_str):
+ """
+ Ensure we have a valid IPv6 address.
+
+ Args:
+ ip_str: A string, the IPv6 address.
+
+ Returns:
+ A boolean, True if this is a valid IPv6 address.
+
+ """
+ from django.core.validators import validate_ipv4_address
+
+ # We need to have at least one ':'.
+ if ':' not in ip_str:
+ return False
+
+ # We can only have one '::' shortener.
+ if ip_str.count('::') > 1:
+ return False
+
+ # '::' should be encompassed by start, digits or end.
+ if ':::' in ip_str:
+ return False
+
+ # A single colon can neither start nor end an address.
+ if ((ip_str.startswith(':') and not ip_str.startswith('::')) or
+ (ip_str.endswith(':') and not ip_str.endswith('::'))):
+ return False
+
+ # We can never have more than 7 ':' (1::2:3:4:5:6:7:8 is invalid)
+ if ip_str.count(':') > 7:
+ return False
+
+ # If we have no concatenation, we need to have 8 fields with 7 ':'.
+ if '::' not in ip_str and ip_str.count(':') != 7:
+ # We might have an IPv4 mapped address.
+ if ip_str.count('.') != 3:
+ return False
+
+ ip_str = _explode_shorthand_ip_string(ip_str)
+
+ # Now that we have that all squared away, let's check that each of the
+ # hextets are between 0x0 and 0xFFFF.
+ for hextet in ip_str.split(':'):
+ if hextet.count('.') == 3:
+ # If we have an IPv4 mapped address, the IPv4 portion has to
+ # be at the end of the IPv6 portion.
+ if not ip_str.split(':')[-1] == hextet:
+ return False
+ try:
+ validate_ipv4_address(hextet)
+ except ValidationError:
+ return False
+ else:
+ try:
+ # a value error here means that we got a bad hextet,
+ # something like 0xzzzz
+ if int(hextet, 16) < 0x0 or int(hextet, 16) > 0xFFFF:
+ return False
+ except ValueError:
+ return False
+ return True
+
+
+def _explode_shorthand_ip_string(ip_str):
+ """
+ Expand a shortened IPv6 address.
+
+ Args:
+ ip_str: A string, the IPv6 address.
+
+ Returns:
+ A string, the expanded IPv6 address.
+
+ """
+ if not _is_shorthand_ip(ip_str):
+ # We've already got a longhand ip_str.
+ return ip_str
+
+ new_ip = []
+ hextet = ip_str.split('::')
+
+ # If there is a ::, we need to expand it with zeroes
+ # to get to 8 hextets - unless there is a dot in the last hextet,
+ # meaning we're doing v4-mapping
+ if '.' in ip_str.split(':')[-1]:
+ fill_to = 7
+ else:
+ fill_to = 8
+
+ if len(hextet) > 1:
+ sep = len(hextet[0].split(':')) + len(hextet[1].split(':'))
+ new_ip = hextet[0].split(':')
+
+ for _ in xrange(fill_to - sep):
+ new_ip.append('0000')
+ new_ip += hextet[1].split(':')
+
+ else:
+ new_ip = ip_str.split(':')
+
+ # Now need to make sure every hextet is 4 lower case characters.
+ # If a hextet is < 4 characters, we've got missing leading 0's.
+ ret_ip = []
+ for hextet in new_ip:
+ ret_ip.append(('0' * (4 - len(hextet)) + hextet).lower())
+ return ':'.join(ret_ip)
+
+
+def _is_shorthand_ip(ip_str):
+ """Determine if the address is shortened.
+
+ Args:
+ ip_str: A string, the IPv6 address.
+
+ Returns:
+ A boolean, True if the address is shortened.
+
+ """
+ if ip_str.count('::') == 1:
+ return True
+ if any(len(x) < 4 for x in ip_str.split(':')):
+ return True
+ return False
diff --git a/lib/python2.7/site-packages/django/utils/itercompat.py b/lib/python2.7/site-packages/django/utils/itercompat.py
new file mode 100644
index 0000000..c50dcfb
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/itercompat.py
@@ -0,0 +1,36 @@
+"""
+Providing iterator functions that are not in all version of Python we support.
+Where possible, we try to use the system-native version and only fall back to
+these implementations if necessary.
+"""
+
+import collections
+import itertools
+import sys
+import warnings
+
+
+def is_iterable(x):
+ "A implementation independent way of checking for iterables"
+ try:
+ iter(x)
+ except TypeError:
+ return False
+ else:
+ return True
+
+def is_iterator(x):
+ """An implementation independent way of checking for iterators
+
+ Python 2.6 has a different implementation of collections.Iterator which
+ accepts anything with a `next` method. 2.7+ requires and `__iter__` method
+ as well.
+ """
+ if sys.version_info >= (2, 7):
+ return isinstance(x, collections.Iterator)
+ return isinstance(x, collections.Iterator) and hasattr(x, '__iter__')
+
+def product(*args, **kwds):
+ warnings.warn("django.utils.itercompat.product is deprecated; use the native version instead",
+ DeprecationWarning, stacklevel=2)
+ return itertools.product(*args, **kwds)
diff --git a/lib/python2.7/site-packages/django/utils/jslex.py b/lib/python2.7/site-packages/django/utils/jslex.py
new file mode 100644
index 0000000..c465647
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/jslex.py
@@ -0,0 +1,219 @@
+"""JsLex: a lexer for Javascript"""
+# Originally from https://bitbucket.org/ned/jslex
+import re
+
+class Tok(object):
+ """
+ A specification for a token class.
+ """
+ num = 0
+
+ def __init__(self, name, regex, next=None):
+ self.id = Tok.num
+ Tok.num += 1
+ self.name = name
+ self.regex = regex
+ self.next = next
+
+def literals(choices, prefix="", suffix=""):
+ """
+ Create a regex from a space-separated list of literal `choices`.
+
+ If provided, `prefix` and `suffix` will be attached to each choice
+ individually.
+
+ """
+ return "|".join(prefix+re.escape(c)+suffix for c in choices.split())
+
+
+class Lexer(object):
+ """
+ A generic multi-state regex-based lexer.
+ """
+
+ def __init__(self, states, first):
+ self.regexes = {}
+ self.toks = {}
+
+ for state, rules in states.items():
+ parts = []
+ for tok in rules:
+ groupid = "t%d" % tok.id
+ self.toks[groupid] = tok
+ parts.append("(?P<%s>%s)" % (groupid, tok.regex))
+ self.regexes[state] = re.compile("|".join(parts), re.MULTILINE|re.VERBOSE)
+
+ self.state = first
+
+ def lex(self, text):
+ """
+ Lexically analyze `text`.
+
+ Yields pairs (`name`, `tokentext`).
+ """
+ end = len(text)
+ state = self.state
+ regexes = self.regexes
+ toks = self.toks
+ start = 0
+
+ while start < end:
+ for match in regexes[state].finditer(text, start):
+ name = match.lastgroup
+ tok = toks[name]
+ toktext = match.group(name)
+ start += len(toktext)
+ yield (tok.name, toktext)
+
+ if tok.next:
+ state = tok.next
+ break
+
+ self.state = state
+
+
+class JsLexer(Lexer):
+ """
+ A Javascript lexer
+
+ >>> lexer = JsLexer()
+ >>> list(lexer.lex("a = 1"))
+ [('id', 'a'), ('ws', ' '), ('punct', '='), ('ws', ' '), ('dnum', '1')]
+
+ This doesn't properly handle non-Ascii characters in the Javascript source.
+ """
+
+ # Because these tokens are matched as alternatives in a regex, longer
+ # possibilities must appear in the list before shorter ones, for example,
+ # '>>' before '>'.
+ #
+ # Note that we don't have to detect malformed Javascript, only properly
+ # lex correct Javascript, so much of this is simplified.
+
+ # Details of Javascript lexical structure are taken from
+ # http://www.ecma-international.org/publications/files/ECMA-ST/ECMA-262.pdf
+
+ # A useful explanation of automatic semicolon insertion is at
+ # http://inimino.org/~inimino/blog/javascript_semicolons
+
+ both_before = [
+ Tok("comment", r"/\*(.|\n)*?\*/"),
+ Tok("linecomment", r"//.*?$"),
+ Tok("ws", r"\s+"),
+ Tok("keyword", literals("""
+ break case catch class const continue debugger
+ default delete do else enum export extends
+ finally for function if import in instanceof
+ new return super switch this throw try typeof
+ var void while with
+ """, suffix=r"\b"), next='reg'),
+ Tok("reserved", literals("null true false", suffix=r"\b"), next='div'),
+ Tok("id", r"""
+ ([a-zA-Z_$ ]|\\u[0-9a-fA-Z]{4}) # first char
+ ([a-zA-Z_$0-9]|\\u[0-9a-fA-F]{4})* # rest chars
+ """, next='div'),
+ Tok("hnum", r"0[xX][0-9a-fA-F]+", next='div'),
+ Tok("onum", r"0[0-7]+"),
+ Tok("dnum", r"""
+ ( (0|[1-9][0-9]*) # DecimalIntegerLiteral
+ \. # dot
+ [0-9]* # DecimalDigits-opt
+ ([eE][-+]?[0-9]+)? # ExponentPart-opt
+ |
+ \. # dot
+ [0-9]+ # DecimalDigits
+ ([eE][-+]?[0-9]+)? # ExponentPart-opt
+ |
+ (0|[1-9][0-9]*) # DecimalIntegerLiteral
+ ([eE][-+]?[0-9]+)? # ExponentPart-opt
+ )
+ """, next='div'),
+ Tok("punct", literals("""
+ >>>= === !== >>> <<= >>= <= >= == != << >> &&
+ || += -= *= %= &= |= ^=
+ """), next="reg"),
+ Tok("punct", literals("++ -- ) ]"), next='div'),
+ Tok("punct", literals("{ } ( [ . ; , < > + - * % & | ^ ! ~ ? : ="), next='reg'),
+ Tok("string", r'"([^"\\]|(\\(.|\n)))*?"', next='div'),
+ Tok("string", r"'([^'\\]|(\\(.|\n)))*?'", next='div'),
+ ]
+
+ both_after = [
+ Tok("other", r"."),
+ ]
+
+ states = {
+ 'div': # slash will mean division
+ both_before + [
+ Tok("punct", literals("/= /"), next='reg'),
+ ] + both_after,
+
+ 'reg': # slash will mean regex
+ both_before + [
+ Tok("regex",
+ r"""
+ / # opening slash
+ # First character is..
+ ( [^*\\/[] # anything but * \ / or [
+ | \\. # or an escape sequence
+ | \[ # or a class, which has
+ ( [^\]\\] # anything but \ or ]
+ | \\. # or an escape sequence
+ )* # many times
+ \]
+ )
+ # Following characters are same, except for excluding a star
+ ( [^\\/[] # anything but \ / or [
+ | \\. # or an escape sequence
+ | \[ # or a class, which has
+ ( [^\]\\] # anything but \ or ]
+ | \\. # or an escape sequence
+ )* # many times
+ \]
+ )* # many times
+ / # closing slash
+ [a-zA-Z0-9]* # trailing flags
+ """, next='div'),
+ ] + both_after,
+ }
+
+ def __init__(self):
+ super(JsLexer, self).__init__(self.states, 'reg')
+
+
+def prepare_js_for_gettext(js):
+ """
+ Convert the Javascript source `js` into something resembling C for
+ xgettext.
+
+ What actually happens is that all the regex literals are replaced with
+ "REGEX".
+ """
+ def escape_quotes(m):
+ """Used in a regex to properly escape double quotes."""
+ s = m.group(0)
+ if s == '"':
+ return r'\"'
+ else:
+ return s
+
+ lexer = JsLexer()
+ c = []
+ for name, tok in lexer.lex(js):
+ if name == 'regex':
+ # C doesn't grok regexes, and they aren't needed for gettext,
+ # so just output a string instead.
+ tok = '"REGEX"';
+ elif name == 'string':
+ # C doesn't have single-quoted strings, so make all strings
+ # double-quoted.
+ if tok.startswith("'"):
+ guts = re.sub(r"\\.|.", escape_quotes, tok[1:-1])
+ tok = '"' + guts + '"'
+ elif name == 'id':
+ # C can't deal with Unicode escapes in identifiers. We don't
+ # need them for gettext anyway, so replace them with something
+ # innocuous
+ tok = tok.replace("\\", "U");
+ c.append(tok)
+ return ''.join(c)
diff --git a/lib/python2.7/site-packages/django/utils/log.py b/lib/python2.7/site-packages/django/utils/log.py
new file mode 100644
index 0000000..6734a72
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/log.py
@@ -0,0 +1,160 @@
+import logging
+import traceback
+
+from django.conf import settings
+from django.core import mail
+from django.core.mail import get_connection
+from django.views.debug import ExceptionReporter, get_exception_reporter_filter
+
+
+# Make sure a NullHandler is available
+# This was added in Python 2.7/3.2
+try:
+ from logging import NullHandler
+except ImportError:
+ class NullHandler(logging.Handler):
+ def emit(self, record):
+ pass
+
+# Make sure that dictConfig is available
+# This was added in Python 2.7/3.2
+try:
+ from logging.config import dictConfig
+except ImportError:
+ from django.utils.dictconfig import dictConfig
+
+getLogger = logging.getLogger
+
+# Default logging for Django. This sends an email to the site admins on every
+# HTTP 500 error. Depending on DEBUG, all other log records are either sent to
+# the console (DEBUG=True) or discarded by mean of the NullHandler (DEBUG=False).
+DEFAULT_LOGGING = {
+ 'version': 1,
+ 'disable_existing_loggers': False,
+ 'filters': {
+ 'require_debug_false': {
+ '()': 'django.utils.log.RequireDebugFalse',
+ },
+ 'require_debug_true': {
+ '()': 'django.utils.log.RequireDebugTrue',
+ },
+ },
+ 'handlers': {
+ 'console': {
+ 'level': 'INFO',
+ 'filters': ['require_debug_true'],
+ 'class': 'logging.StreamHandler',
+ },
+ 'null': {
+ 'class': 'django.utils.log.NullHandler',
+ },
+ 'mail_admins': {
+ 'level': 'ERROR',
+ 'filters': ['require_debug_false'],
+ 'class': 'django.utils.log.AdminEmailHandler'
+ }
+ },
+ 'loggers': {
+ 'django': {
+ 'handlers': ['console'],
+ },
+ 'django.request': {
+ 'handlers': ['mail_admins'],
+ 'level': 'ERROR',
+ 'propagate': False,
+ },
+ 'django.security': {
+ 'handlers': ['mail_admins'],
+ 'level': 'ERROR',
+ 'propagate': False,
+ },
+ 'py.warnings': {
+ 'handlers': ['console'],
+ },
+ }
+}
+
+
+class AdminEmailHandler(logging.Handler):
+ """An exception log handler that emails log entries to site admins.
+
+ If the request is passed as the first argument to the log record,
+ request data will be provided in the email report.
+ """
+
+ def __init__(self, include_html=False, email_backend=None):
+ logging.Handler.__init__(self)
+ self.include_html = include_html
+ self.email_backend = email_backend
+
+ def emit(self, record):
+ try:
+ request = record.request
+ subject = '%s (%s IP): %s' % (
+ record.levelname,
+ ('internal' if request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS
+ else 'EXTERNAL'),
+ record.getMessage()
+ )
+ filter = get_exception_reporter_filter(request)
+ request_repr = filter.get_request_repr(request)
+ except Exception:
+ subject = '%s: %s' % (
+ record.levelname,
+ record.getMessage()
+ )
+ request = None
+ request_repr = "Request repr() unavailable."
+ subject = self.format_subject(subject)
+
+ if record.exc_info:
+ exc_info = record.exc_info
+ stack_trace = '\n'.join(traceback.format_exception(*record.exc_info))
+ else:
+ exc_info = (None, record.getMessage(), None)
+ stack_trace = 'No stack trace available'
+
+ message = "%s\n\n%s" % (stack_trace, request_repr)
+ reporter = ExceptionReporter(request, is_email=True, *exc_info)
+ html_message = reporter.get_traceback_html() if self.include_html else None
+ mail.mail_admins(subject, message, fail_silently=True,
+ html_message=html_message,
+ connection=self.connection())
+
+ def connection(self):
+ return get_connection(backend=self.email_backend, fail_silently=True)
+
+ def format_subject(self, subject):
+ """
+ Escape CR and LF characters, and limit length.
+ RFC 2822's hard limit is 998 characters per line. So, minus "Subject: "
+ the actual subject must be no longer than 989 characters.
+ """
+ formatted_subject = subject.replace('\n', '\\n').replace('\r', '\\r')
+ return formatted_subject[:989]
+
+
+class CallbackFilter(logging.Filter):
+ """
+ A logging filter that checks the return value of a given callable (which
+ takes the record-to-be-logged as its only parameter) to decide whether to
+ log a record.
+
+ """
+ def __init__(self, callback):
+ self.callback = callback
+
+ def filter(self, record):
+ if self.callback(record):
+ return 1
+ return 0
+
+
+class RequireDebugFalse(logging.Filter):
+ def filter(self, record):
+ return not settings.DEBUG
+
+
+class RequireDebugTrue(logging.Filter):
+ def filter(self, record):
+ return settings.DEBUG
diff --git a/lib/python2.7/site-packages/django/utils/module_loading.py b/lib/python2.7/site-packages/django/utils/module_loading.py
new file mode 100644
index 0000000..ede585e
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/module_loading.py
@@ -0,0 +1,98 @@
+import imp
+import os
+import sys
+
+from django.core.exceptions import ImproperlyConfigured
+from django.utils import six
+from django.utils.importlib import import_module
+
+
+def import_by_path(dotted_path, error_prefix=''):
+ """
+ Import a dotted module path and return the attribute/class designated by the
+ last name in the path. Raise ImproperlyConfigured if something goes wrong.
+ """
+ try:
+ module_path, class_name = dotted_path.rsplit('.', 1)
+ except ValueError:
+ raise ImproperlyConfigured("%s%s doesn't look like a module path" % (
+ error_prefix, dotted_path))
+ try:
+ module = import_module(module_path)
+ except ImportError as e:
+ msg = '%sError importing module %s: "%s"' % (
+ error_prefix, module_path, e)
+ six.reraise(ImproperlyConfigured, ImproperlyConfigured(msg),
+ sys.exc_info()[2])
+ try:
+ attr = getattr(module, class_name)
+ except AttributeError:
+ raise ImproperlyConfigured('%sModule "%s" does not define a "%s" attribute/class' % (
+ error_prefix, module_path, class_name))
+ return attr
+
+
+def module_has_submodule(package, module_name):
+ """See if 'module' is in 'package'."""
+ name = ".".join([package.__name__, module_name])
+ try:
+ # None indicates a cached miss; see mark_miss() in Python/import.c.
+ return sys.modules[name] is not None
+ except KeyError:
+ pass
+ try:
+ package_path = package.__path__ # No __path__, then not a package.
+ except AttributeError:
+ # Since the remainder of this function assumes that we're dealing with
+ # a package (module with a __path__), so if it's not, then bail here.
+ return False
+ for finder in sys.meta_path:
+ if finder.find_module(name, package_path):
+ return True
+ for entry in package_path:
+ try:
+ # Try the cached finder.
+ finder = sys.path_importer_cache[entry]
+ if finder is None:
+ # Implicit import machinery should be used.
+ try:
+ file_, _, _ = imp.find_module(module_name, [entry])
+ if file_:
+ file_.close()
+ return True
+ except ImportError:
+ continue
+ # Else see if the finder knows of a loader.
+ elif finder.find_module(name):
+ return True
+ else:
+ continue
+ except KeyError:
+ # No cached finder, so try and make one.
+ for hook in sys.path_hooks:
+ try:
+ finder = hook(entry)
+ # XXX Could cache in sys.path_importer_cache
+ if finder.find_module(name):
+ return True
+ else:
+ # Once a finder is found, stop the search.
+ break
+ except ImportError:
+ # Continue the search for a finder.
+ continue
+ else:
+ # No finder found.
+ # Try the implicit import machinery if searching a directory.
+ if os.path.isdir(entry):
+ try:
+ file_, _, _ = imp.find_module(module_name, [entry])
+ if file_:
+ file_.close()
+ return True
+ except ImportError:
+ pass
+ # XXX Could insert None or NullImporter
+ else:
+ # Exhausted the search, so the module cannot be found.
+ return False
diff --git a/lib/python2.7/site-packages/django/utils/numberformat.py b/lib/python2.7/site-packages/django/utils/numberformat.py
new file mode 100644
index 0000000..6a31237
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/numberformat.py
@@ -0,0 +1,48 @@
+from django.conf import settings
+from django.utils.safestring import mark_safe
+from django.utils import six
+
+
+def format(number, decimal_sep, decimal_pos=None, grouping=0, thousand_sep='',
+ force_grouping=False):
+ """
+ Gets a number (as a number or string), and returns it as a string,
+ using formats defined as arguments:
+
+ * decimal_sep: Decimal separator symbol (for example ".")
+ * decimal_pos: Number of decimal positions
+ * grouping: Number of digits in every group limited by thousand separator
+ * thousand_sep: Thousand separator symbol (for example ",")
+ """
+ use_grouping = settings.USE_L10N and settings.USE_THOUSAND_SEPARATOR
+ use_grouping = use_grouping or force_grouping
+ use_grouping = use_grouping and grouping > 0
+ # Make the common case fast
+ if isinstance(number, int) and not use_grouping and not decimal_pos:
+ return mark_safe(six.text_type(number))
+ # sign
+ sign = ''
+ str_number = six.text_type(number)
+ if str_number[0] == '-':
+ sign = '-'
+ str_number = str_number[1:]
+ # decimal part
+ if '.' in str_number:
+ int_part, dec_part = str_number.split('.')
+ if decimal_pos is not None:
+ dec_part = dec_part[:decimal_pos]
+ else:
+ int_part, dec_part = str_number, ''
+ if decimal_pos is not None:
+ dec_part = dec_part + ('0' * (decimal_pos - len(dec_part)))
+ if dec_part:
+ dec_part = decimal_sep + dec_part
+ # grouping
+ if use_grouping:
+ int_part_gd = ''
+ for cnt, digit in enumerate(int_part[::-1]):
+ if cnt and not cnt % grouping:
+ int_part_gd += thousand_sep
+ int_part_gd += digit
+ int_part = int_part_gd[::-1]
+ return sign + int_part + dec_part
diff --git a/lib/python2.7/site-packages/django/utils/regex_helper.py b/lib/python2.7/site-packages/django/utils/regex_helper.py
new file mode 100644
index 0000000..7b40d14
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/regex_helper.py
@@ -0,0 +1,342 @@
+"""
+Functions for reversing a regular expression (used in reverse URL resolving).
+Used internally by Django and not intended for external use.
+
+This is not, and is not intended to be, a complete reg-exp decompiler. It
+should be good enough for a large class of URLS, however.
+"""
+from __future__ import unicode_literals
+
+from django.utils import six
+from django.utils.six.moves import zip
+
+# Mapping of an escape character to a representative of that class. So, e.g.,
+# "\w" is replaced by "x" in a reverse URL. A value of None means to ignore
+# this sequence. Any missing key is mapped to itself.
+ESCAPE_MAPPINGS = {
+ "A": None,
+ "b": None,
+ "B": None,
+ "d": "0",
+ "D": "x",
+ "s": " ",
+ "S": "x",
+ "w": "x",
+ "W": "!",
+ "Z": None,
+}
+
+class Choice(list):
+ """
+ Used to represent multiple possibilities at this point in a pattern string.
+ We use a distinguished type, rather than a list, so that the usage in the
+ code is clear.
+ """
+
+class Group(list):
+ """
+ Used to represent a capturing group in the pattern string.
+ """
+
+class NonCapture(list):
+ """
+ Used to represent a non-capturing group in the pattern string.
+ """
+
+def normalize(pattern):
+ """
+ Given a reg-exp pattern, normalizes it to an iterable of forms that
+ suffice for reverse matching. This does the following:
+
+ (1) For any repeating sections, keeps the minimum number of occurrences
+ permitted (this means zero for optional groups).
+ (2) If an optional group includes parameters, include one occurrence of
+ that group (along with the zero occurrence case from step (1)).
+ (3) Select the first (essentially an arbitrary) element from any character
+ class. Select an arbitrary character for any unordered class (e.g. '.'
+ or '\w') in the pattern.
+ (5) Ignore comments and any of the reg-exp flags that won't change
+ what we construct ("iLmsu"). "(?x)" is an error, however.
+ (6) Raise an error on all other non-capturing (?...) forms (e.g.
+ look-ahead and look-behind matches) and any disjunctive ('|')
+ constructs.
+
+ Django's URLs for forward resolving are either all positional arguments or
+ all keyword arguments. That is assumed here, as well. Although reverse
+ resolving can be done using positional args when keyword args are
+ specified, the two cannot be mixed in the same reverse() call.
+ """
+ # Do a linear scan to work out the special features of this pattern. The
+ # idea is that we scan once here and collect all the information we need to
+ # make future decisions.
+ result = []
+ non_capturing_groups = []
+ consume_next = True
+ pattern_iter = next_char(iter(pattern))
+ num_args = 0
+
+ # A "while" loop is used here because later on we need to be able to peek
+ # at the next character and possibly go around without consuming another
+ # one at the top of the loop.
+ try:
+ ch, escaped = next(pattern_iter)
+ except StopIteration:
+ return [('', [])]
+
+ try:
+ while True:
+ if escaped:
+ result.append(ch)
+ elif ch == '.':
+ # Replace "any character" with an arbitrary representative.
+ result.append(".")
+ elif ch == '|':
+ # FIXME: One day we'll should do this, but not in 1.0.
+ raise NotImplementedError
+ elif ch == "^":
+ pass
+ elif ch == '$':
+ break
+ elif ch == ')':
+ # This can only be the end of a non-capturing group, since all
+ # other unescaped parentheses are handled by the grouping
+ # section later (and the full group is handled there).
+ #
+ # We regroup everything inside the capturing group so that it
+ # can be quantified, if necessary.
+ start = non_capturing_groups.pop()
+ inner = NonCapture(result[start:])
+ result = result[:start] + [inner]
+ elif ch == '[':
+ # Replace ranges with the first character in the range.
+ ch, escaped = next(pattern_iter)
+ result.append(ch)
+ ch, escaped = next(pattern_iter)
+ while escaped or ch != ']':
+ ch, escaped = next(pattern_iter)
+ elif ch == '(':
+ # Some kind of group.
+ ch, escaped = next(pattern_iter)
+ if ch != '?' or escaped:
+ # A positional group
+ name = "_%d" % num_args
+ num_args += 1
+ result.append(Group((("%%(%s)s" % name), name)))
+ walk_to_end(ch, pattern_iter)
+ else:
+ ch, escaped = next(pattern_iter)
+ if ch in "iLmsu#":
+ # All of these are ignorable. Walk to the end of the
+ # group.
+ walk_to_end(ch, pattern_iter)
+ elif ch == ':':
+ # Non-capturing group
+ non_capturing_groups.append(len(result))
+ elif ch != 'P':
+ # Anything else, other than a named group, is something
+ # we cannot reverse.
+ raise ValueError("Non-reversible reg-exp portion: '(?%s'" % ch)
+ else:
+ ch, escaped = next(pattern_iter)
+ if ch not in ('<', '='):
+ raise ValueError("Non-reversible reg-exp portion: '(?P%s'" % ch)
+ # We are in a named capturing group. Extra the name and
+ # then skip to the end.
+ if ch == '<':
+ terminal_char = '>'
+ # We are in a named backreference.
+ else:
+ terminal_char = ')'
+ name = []
+ ch, escaped = next(pattern_iter)
+ while ch != terminal_char:
+ name.append(ch)
+ ch, escaped = next(pattern_iter)
+ param = ''.join(name)
+ # Named backreferences have already consumed the
+ # parenthesis.
+ if terminal_char != ')':
+ result.append(Group((("%%(%s)s" % param), param)))
+ walk_to_end(ch, pattern_iter)
+ else:
+ result.append(Group((("%%(%s)s" % param), None)))
+ elif ch in "*?+{":
+ # Quanitifers affect the previous item in the result list.
+ count, ch = get_quantifier(ch, pattern_iter)
+ if ch:
+ # We had to look ahead, but it wasn't need to compute the
+ # quanitifer, so use this character next time around the
+ # main loop.
+ consume_next = False
+
+ if count == 0:
+ if contains(result[-1], Group):
+ # If we are quantifying a capturing group (or
+ # something containing such a group) and the minimum is
+ # zero, we must also handle the case of one occurrence
+ # being present. All the quantifiers (except {0,0},
+ # which we conveniently ignore) that have a 0 minimum
+ # also allow a single occurrence.
+ result[-1] = Choice([None, result[-1]])
+ else:
+ result.pop()
+ elif count > 1:
+ result.extend([result[-1]] * (count - 1))
+ else:
+ # Anything else is a literal.
+ result.append(ch)
+
+ if consume_next:
+ ch, escaped = next(pattern_iter)
+ else:
+ consume_next = True
+ except StopIteration:
+ pass
+ except NotImplementedError:
+ # A case of using the disjunctive form. No results for you!
+ return [('', [])]
+
+ return list(zip(*flatten_result(result)))
+
+def next_char(input_iter):
+ """
+ An iterator that yields the next character from "pattern_iter", respecting
+ escape sequences. An escaped character is replaced by a representative of
+ its class (e.g. \w -> "x"). If the escaped character is one that is
+ skipped, it is not returned (the next character is returned instead).
+
+ Yields the next character, along with a boolean indicating whether it is a
+ raw (unescaped) character or not.
+ """
+ for ch in input_iter:
+ if ch != '\\':
+ yield ch, False
+ continue
+ ch = next(input_iter)
+ representative = ESCAPE_MAPPINGS.get(ch, ch)
+ if representative is None:
+ continue
+ yield representative, True
+
+def walk_to_end(ch, input_iter):
+ """
+ The iterator is currently inside a capturing group. We want to walk to the
+ close of this group, skipping over any nested groups and handling escaped
+ parentheses correctly.
+ """
+ if ch == '(':
+ nesting = 1
+ else:
+ nesting = 0
+ for ch, escaped in input_iter:
+ if escaped:
+ continue
+ elif ch == '(':
+ nesting += 1
+ elif ch == ')':
+ if not nesting:
+ return
+ nesting -= 1
+
+def get_quantifier(ch, input_iter):
+ """
+ Parse a quantifier from the input, where "ch" is the first character in the
+ quantifier.
+
+ Returns the minimum number of occurences permitted by the quantifier and
+ either None or the next character from the input_iter if the next character
+ is not part of the quantifier.
+ """
+ if ch in '*?+':
+ try:
+ ch2, escaped = next(input_iter)
+ except StopIteration:
+ ch2 = None
+ if ch2 == '?':
+ ch2 = None
+ if ch == '+':
+ return 1, ch2
+ return 0, ch2
+
+ quant = []
+ while ch != '}':
+ ch, escaped = next(input_iter)
+ quant.append(ch)
+ quant = quant[:-1]
+ values = ''.join(quant).split(',')
+
+ # Consume the trailing '?', if necessary.
+ try:
+ ch, escaped = next(input_iter)
+ except StopIteration:
+ ch = None
+ if ch == '?':
+ ch = None
+ return int(values[0]), ch
+
+def contains(source, inst):
+ """
+ Returns True if the "source" contains an instance of "inst". False,
+ otherwise.
+ """
+ if isinstance(source, inst):
+ return True
+ if isinstance(source, NonCapture):
+ for elt in source:
+ if contains(elt, inst):
+ return True
+ return False
+
+def flatten_result(source):
+ """
+ Turns the given source sequence into a list of reg-exp possibilities and
+ their arguments. Returns a list of strings and a list of argument lists.
+ Each of the two lists will be of the same length.
+ """
+ if source is None:
+ return [''], [[]]
+ if isinstance(source, Group):
+ if source[1] is None:
+ params = []
+ else:
+ params = [source[1]]
+ return [source[0]], [params]
+ result = ['']
+ result_args = [[]]
+ pos = last = 0
+ for pos, elt in enumerate(source):
+ if isinstance(elt, six.string_types):
+ continue
+ piece = ''.join(source[last:pos])
+ if isinstance(elt, Group):
+ piece += elt[0]
+ param = elt[1]
+ else:
+ param = None
+ last = pos + 1
+ for i in range(len(result)):
+ result[i] += piece
+ if param:
+ result_args[i].append(param)
+ if isinstance(elt, (Choice, NonCapture)):
+ if isinstance(elt, NonCapture):
+ elt = [elt]
+ inner_result, inner_args = [], []
+ for item in elt:
+ res, args = flatten_result(item)
+ inner_result.extend(res)
+ inner_args.extend(args)
+ new_result = []
+ new_args = []
+ for item, args in zip(result, result_args):
+ for i_item, i_args in zip(inner_result, inner_args):
+ new_result.append(item + i_item)
+ new_args.append(args[:] + i_args)
+ result = new_result
+ result_args = new_args
+ if pos >= last:
+ piece = ''.join(source[last:])
+ for i in range(len(result)):
+ result[i] += piece
+ return result, result_args
+
diff --git a/lib/python2.7/site-packages/django/utils/safestring.py b/lib/python2.7/site-packages/django/utils/safestring.py
new file mode 100644
index 0000000..07e0bf4
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/safestring.py
@@ -0,0 +1,134 @@
+"""
+Functions for working with "safe strings": strings that can be displayed safely
+without further escaping in HTML. Marking something as a "safe string" means
+that the producer of the string has already turned characters that should not
+be interpreted by the HTML engine (e.g. '<') into the appropriate entities.
+"""
+from django.utils.functional import curry, Promise
+from django.utils import six
+
+class EscapeData(object):
+ pass
+
+class EscapeBytes(bytes, EscapeData):
+ """
+ A byte string that should be HTML-escaped when output.
+ """
+ pass
+
+class EscapeText(six.text_type, EscapeData):
+ """
+ A unicode string object that should be HTML-escaped when output.
+ """
+ pass
+
+if six.PY3:
+ EscapeString = EscapeText
+else:
+ EscapeString = EscapeBytes
+ # backwards compatibility for Python 2
+ EscapeUnicode = EscapeText
+
+class SafeData(object):
+ pass
+
+class SafeBytes(bytes, SafeData):
+ """
+ A bytes subclass that has been specifically marked as "safe" (requires no
+ further escaping) for HTML output purposes.
+ """
+ def __add__(self, rhs):
+ """
+ Concatenating a safe byte string with another safe byte string or safe
+ unicode string is safe. Otherwise, the result is no longer safe.
+ """
+ t = super(SafeBytes, self).__add__(rhs)
+ if isinstance(rhs, SafeText):
+ return SafeText(t)
+ elif isinstance(rhs, SafeBytes):
+ return SafeBytes(t)
+ return t
+
+ def _proxy_method(self, *args, **kwargs):
+ """
+ Wrap a call to a normal unicode method up so that we return safe
+ results. The method that is being wrapped is passed in the 'method'
+ argument.
+ """
+ method = kwargs.pop('method')
+ data = method(self, *args, **kwargs)
+ if isinstance(data, bytes):
+ return SafeBytes(data)
+ else:
+ return SafeText(data)
+
+ decode = curry(_proxy_method, method=bytes.decode)
+
+class SafeText(six.text_type, SafeData):
+ """
+ A unicode (Python 2) / str (Python 3) subclass that has been specifically
+ marked as "safe" for HTML output purposes.
+ """
+ def __add__(self, rhs):
+ """
+ Concatenating a safe unicode string with another safe byte string or
+ safe unicode string is safe. Otherwise, the result is no longer safe.
+ """
+ t = super(SafeText, self).__add__(rhs)
+ if isinstance(rhs, SafeData):
+ return SafeText(t)
+ return t
+
+ def _proxy_method(self, *args, **kwargs):
+ """
+ Wrap a call to a normal unicode method up so that we return safe
+ results. The method that is being wrapped is passed in the 'method'
+ argument.
+ """
+ method = kwargs.pop('method')
+ data = method(self, *args, **kwargs)
+ if isinstance(data, bytes):
+ return SafeBytes(data)
+ else:
+ return SafeText(data)
+
+ encode = curry(_proxy_method, method=six.text_type.encode)
+
+if six.PY3:
+ SafeString = SafeText
+else:
+ SafeString = SafeBytes
+ # backwards compatibility for Python 2
+ SafeUnicode = SafeText
+
+def mark_safe(s):
+ """
+ Explicitly mark a string as safe for (HTML) output purposes. The returned
+ object can be used everywhere a string or unicode object is appropriate.
+
+ Can be called multiple times on a single string.
+ """
+ if isinstance(s, SafeData):
+ return s
+ if isinstance(s, bytes) or (isinstance(s, Promise) and s._delegate_bytes):
+ return SafeBytes(s)
+ if isinstance(s, (six.text_type, Promise)):
+ return SafeText(s)
+ return SafeString(str(s))
+
+def mark_for_escaping(s):
+ """
+ Explicitly mark a string as requiring HTML escaping upon output. Has no
+ effect on SafeData subclasses.
+
+ Can be called multiple times on a single string (the resulting escaping is
+ only applied once).
+ """
+ if isinstance(s, (SafeData, EscapeData)):
+ return s
+ if isinstance(s, bytes) or (isinstance(s, Promise) and s._delegate_bytes):
+ return EscapeBytes(s)
+ if isinstance(s, (six.text_type, Promise)):
+ return EscapeText(s)
+ return EscapeBytes(bytes(s))
+
diff --git a/lib/python2.7/site-packages/django/utils/simplejson.py b/lib/python2.7/site-packages/django/utils/simplejson.py
new file mode 100644
index 0000000..d4032a6
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/simplejson.py
@@ -0,0 +1,31 @@
+# Django 1.5 only supports Python >= 2.6, where the standard library includes
+# the json module. Previous version of Django shipped a copy for Python < 2.6.
+
+# For backwards compatibility, we're keeping an importable json module
+# at this location, with the same lookup sequence.
+
+# Avoid shadowing the simplejson module
+from __future__ import absolute_import
+
+import warnings
+warnings.warn("django.utils.simplejson is deprecated; use json instead.",
+ DeprecationWarning, stacklevel=2)
+
+try:
+ import simplejson
+except ImportError:
+ use_simplejson = False
+else:
+ # The system-installed version has priority providing it is either not an
+ # earlier version or it contains the C speedups.
+ from json import __version__ as stdlib_json_version
+ use_simplejson = (hasattr(simplejson, '_speedups') or
+ simplejson.__version__.split('.') >= stdlib_json_version.split('.'))
+
+# Make sure we copy over the version. See #17071
+if use_simplejson:
+ from simplejson import *
+ from simplejson import __version__
+else:
+ from json import *
+ from json import __version__
diff --git a/lib/python2.7/site-packages/django/utils/six.py b/lib/python2.7/site-packages/django/utils/six.py
new file mode 100644
index 0000000..26370d7
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/six.py
@@ -0,0 +1,676 @@
+"""Utilities for writing code that runs on Python 2 and 3"""
+
+# Copyright (c) 2010-2014 Benjamin Peterson
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+import operator
+import sys
+import types
+
+__author__ = "Benjamin Peterson <benjamin@python.org>"
+__version__ = "1.6.1"
+
+
+# Useful for very coarse version differentiation.
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+
+if PY3:
+ string_types = str,
+ integer_types = int,
+ class_types = type,
+ text_type = str
+ binary_type = bytes
+
+ MAXSIZE = sys.maxsize
+else:
+ string_types = basestring,
+ integer_types = (int, long)
+ class_types = (type, types.ClassType)
+ text_type = unicode
+ binary_type = str
+
+ if sys.platform.startswith("java"):
+ # Jython always uses 32 bits.
+ MAXSIZE = int((1 << 31) - 1)
+ else:
+ # It's possible to have sizeof(long) != sizeof(Py_ssize_t).
+ class X(object):
+ def __len__(self):
+ return 1 << 31
+ try:
+ len(X())
+ except OverflowError:
+ # 32-bit
+ MAXSIZE = int((1 << 31) - 1)
+ else:
+ # 64-bit
+ MAXSIZE = int((1 << 63) - 1)
+ del X
+
+
+def _add_doc(func, doc):
+ """Add documentation to a function."""
+ func.__doc__ = doc
+
+
+def _import_module(name):
+ """Import module, returning the module after the last dot."""
+ __import__(name)
+ return sys.modules[name]
+
+
+class _LazyDescr(object):
+
+ def __init__(self, name):
+ self.name = name
+
+ def __get__(self, obj, tp):
+ try:
+ result = self._resolve()
+ except ImportError:
+ # See the nice big comment in MovedModule.__getattr__.
+ raise AttributeError("%s could not be imported " % self.name)
+ setattr(obj, self.name, result) # Invokes __set__.
+ # This is a bit ugly, but it avoids running this again.
+ delattr(obj.__class__, self.name)
+ return result
+
+
+class MovedModule(_LazyDescr):
+
+ def __init__(self, name, old, new=None):
+ super(MovedModule, self).__init__(name)
+ if PY3:
+ if new is None:
+ new = name
+ self.mod = new
+ else:
+ self.mod = old
+
+ def _resolve(self):
+ return _import_module(self.mod)
+
+ def __getattr__(self, attr):
+ # It turns out many Python frameworks like to traverse sys.modules and
+ # try to load various attributes. This causes problems if this is a
+ # platform-specific module on the wrong platform, like _winreg on
+ # Unixes. Therefore, we silently pretend unimportable modules do not
+ # have any attributes. See issues #51, #53, #56, and #63 for the full
+ # tales of woe.
+ #
+ # First, if possible, avoid loading the module just to look at __file__,
+ # __name__, or __path__.
+ if (attr in ("__file__", "__name__", "__path__") and
+ self.mod not in sys.modules):
+ raise AttributeError(attr)
+ try:
+ _module = self._resolve()
+ except ImportError:
+ raise AttributeError(attr)
+ value = getattr(_module, attr)
+ setattr(self, attr, value)
+ return value
+
+
+class _LazyModule(types.ModuleType):
+
+ def __init__(self, name):
+ super(_LazyModule, self).__init__(name)
+ self.__doc__ = self.__class__.__doc__
+
+ def __dir__(self):
+ attrs = ["__doc__", "__name__"]
+ attrs += [attr.name for attr in self._moved_attributes]
+ return attrs
+
+ # Subclasses should override this
+ _moved_attributes = []
+
+
+class MovedAttribute(_LazyDescr):
+
+ def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
+ super(MovedAttribute, self).__init__(name)
+ if PY3:
+ if new_mod is None:
+ new_mod = name
+ self.mod = new_mod
+ if new_attr is None:
+ if old_attr is None:
+ new_attr = name
+ else:
+ new_attr = old_attr
+ self.attr = new_attr
+ else:
+ self.mod = old_mod
+ if old_attr is None:
+ old_attr = name
+ self.attr = old_attr
+
+ def _resolve(self):
+ module = _import_module(self.mod)
+ return getattr(module, self.attr)
+
+
+
+class _MovedItems(_LazyModule):
+ """Lazy loading of moved objects"""
+
+
+_moved_attributes = [
+ MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
+ MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
+ MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
+ MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
+ MovedAttribute("map", "itertools", "builtins", "imap", "map"),
+ MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
+ MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
+ MovedAttribute("reduce", "__builtin__", "functools"),
+ MovedAttribute("StringIO", "StringIO", "io"),
+ MovedAttribute("UserString", "UserString", "collections"),
+ MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
+ MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
+ MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
+
+ MovedModule("builtins", "__builtin__"),
+ MovedModule("configparser", "ConfigParser"),
+ MovedModule("copyreg", "copy_reg"),
+ MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
+ MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
+ MovedModule("http_cookies", "Cookie", "http.cookies"),
+ MovedModule("html_entities", "htmlentitydefs", "html.entities"),
+ MovedModule("html_parser", "HTMLParser", "html.parser"),
+ MovedModule("http_client", "httplib", "http.client"),
+ MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
+ MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
+ MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
+ MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
+ MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
+ MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
+ MovedModule("cPickle", "cPickle", "pickle"),
+ MovedModule("queue", "Queue"),
+ MovedModule("reprlib", "repr"),
+ MovedModule("socketserver", "SocketServer"),
+ MovedModule("_thread", "thread", "_thread"),
+ MovedModule("tkinter", "Tkinter"),
+ MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
+ MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
+ MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
+ MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
+ MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
+ MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
+ MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
+ MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
+ MovedModule("tkinter_colorchooser", "tkColorChooser",
+ "tkinter.colorchooser"),
+ MovedModule("tkinter_commondialog", "tkCommonDialog",
+ "tkinter.commondialog"),
+ MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
+ MovedModule("tkinter_font", "tkFont", "tkinter.font"),
+ MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
+ MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
+ "tkinter.simpledialog"),
+ MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
+ MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
+ MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
+ MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
+ MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
+ MovedModule("xmlrpc_server", "xmlrpclib", "xmlrpc.server"),
+ MovedModule("winreg", "_winreg"),
+]
+for attr in _moved_attributes:
+ setattr(_MovedItems, attr.name, attr)
+ if isinstance(attr, MovedModule):
+ sys.modules[__name__ + ".moves." + attr.name] = attr
+del attr
+
+_MovedItems._moved_attributes = _moved_attributes
+
+moves = sys.modules[__name__ + ".moves"] = _MovedItems(__name__ + ".moves")
+
+
+class Module_six_moves_urllib_parse(_LazyModule):
+ """Lazy loading of moved objects in six.moves.urllib_parse"""
+
+
+_urllib_parse_moved_attributes = [
+ MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
+ MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
+ MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
+ MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
+ MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
+ MovedAttribute("urljoin", "urlparse", "urllib.parse"),
+ MovedAttribute("urlparse", "urlparse", "urllib.parse"),
+ MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
+ MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
+ MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
+ MovedAttribute("quote", "urllib", "urllib.parse"),
+ MovedAttribute("quote_plus", "urllib", "urllib.parse"),
+ MovedAttribute("unquote", "urllib", "urllib.parse"),
+ MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
+ MovedAttribute("urlencode", "urllib", "urllib.parse"),
+ MovedAttribute("splitquery", "urllib", "urllib.parse"),
+]
+for attr in _urllib_parse_moved_attributes:
+ setattr(Module_six_moves_urllib_parse, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
+
+sys.modules[__name__ + ".moves.urllib_parse"] = sys.modules[__name__ + ".moves.urllib.parse"] = Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse")
+
+
+class Module_six_moves_urllib_error(_LazyModule):
+ """Lazy loading of moved objects in six.moves.urllib_error"""
+
+
+_urllib_error_moved_attributes = [
+ MovedAttribute("URLError", "urllib2", "urllib.error"),
+ MovedAttribute("HTTPError", "urllib2", "urllib.error"),
+ MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
+]
+for attr in _urllib_error_moved_attributes:
+ setattr(Module_six_moves_urllib_error, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
+
+sys.modules[__name__ + ".moves.urllib_error"] = sys.modules[__name__ + ".moves.urllib.error"] = Module_six_moves_urllib_error(__name__ + ".moves.urllib.error")
+
+
+class Module_six_moves_urllib_request(_LazyModule):
+ """Lazy loading of moved objects in six.moves.urllib_request"""
+
+
+_urllib_request_moved_attributes = [
+ MovedAttribute("urlopen", "urllib2", "urllib.request"),
+ MovedAttribute("install_opener", "urllib2", "urllib.request"),
+ MovedAttribute("build_opener", "urllib2", "urllib.request"),
+ MovedAttribute("pathname2url", "urllib", "urllib.request"),
+ MovedAttribute("url2pathname", "urllib", "urllib.request"),
+ MovedAttribute("getproxies", "urllib", "urllib.request"),
+ MovedAttribute("Request", "urllib2", "urllib.request"),
+ MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
+ MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
+ MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
+ MovedAttribute("FileHandler", "urllib2", "urllib.request"),
+ MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
+ MovedAttribute("urlretrieve", "urllib", "urllib.request"),
+ MovedAttribute("urlcleanup", "urllib", "urllib.request"),
+ MovedAttribute("URLopener", "urllib", "urllib.request"),
+ MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
+ MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
+]
+for attr in _urllib_request_moved_attributes:
+ setattr(Module_six_moves_urllib_request, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
+
+sys.modules[__name__ + ".moves.urllib_request"] = sys.modules[__name__ + ".moves.urllib.request"] = Module_six_moves_urllib_request(__name__ + ".moves.urllib.request")
+
+
+class Module_six_moves_urllib_response(_LazyModule):
+ """Lazy loading of moved objects in six.moves.urllib_response"""
+
+
+_urllib_response_moved_attributes = [
+ MovedAttribute("addbase", "urllib", "urllib.response"),
+ MovedAttribute("addclosehook", "urllib", "urllib.response"),
+ MovedAttribute("addinfo", "urllib", "urllib.response"),
+ MovedAttribute("addinfourl", "urllib", "urllib.response"),
+]
+for attr in _urllib_response_moved_attributes:
+ setattr(Module_six_moves_urllib_response, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
+
+sys.modules[__name__ + ".moves.urllib_response"] = sys.modules[__name__ + ".moves.urllib.response"] = Module_six_moves_urllib_response(__name__ + ".moves.urllib.response")
+
+
+class Module_six_moves_urllib_robotparser(_LazyModule):
+ """Lazy loading of moved objects in six.moves.urllib_robotparser"""
+
+
+_urllib_robotparser_moved_attributes = [
+ MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
+]
+for attr in _urllib_robotparser_moved_attributes:
+ setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
+
+sys.modules[__name__ + ".moves.urllib_robotparser"] = sys.modules[__name__ + ".moves.urllib.robotparser"] = Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser")
+
+
+class Module_six_moves_urllib(types.ModuleType):
+ """Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
+ parse = sys.modules[__name__ + ".moves.urllib_parse"]
+ error = sys.modules[__name__ + ".moves.urllib_error"]
+ request = sys.modules[__name__ + ".moves.urllib_request"]
+ response = sys.modules[__name__ + ".moves.urllib_response"]
+ robotparser = sys.modules[__name__ + ".moves.urllib_robotparser"]
+
+ def __dir__(self):
+ return ['parse', 'error', 'request', 'response', 'robotparser']
+
+
+sys.modules[__name__ + ".moves.urllib"] = Module_six_moves_urllib(__name__ + ".moves.urllib")
+
+
+def add_move(move):
+ """Add an item to six.moves."""
+ setattr(_MovedItems, move.name, move)
+
+
+def remove_move(name):
+ """Remove item from six.moves."""
+ try:
+ delattr(_MovedItems, name)
+ except AttributeError:
+ try:
+ del moves.__dict__[name]
+ except KeyError:
+ raise AttributeError("no such move, %r" % (name,))
+
+
+if PY3:
+ _meth_func = "__func__"
+ _meth_self = "__self__"
+
+ _func_closure = "__closure__"
+ _func_code = "__code__"
+ _func_defaults = "__defaults__"
+ _func_globals = "__globals__"
+
+ _iterkeys = "keys"
+ _itervalues = "values"
+ _iteritems = "items"
+ _iterlists = "lists"
+else:
+ _meth_func = "im_func"
+ _meth_self = "im_self"
+
+ _func_closure = "func_closure"
+ _func_code = "func_code"
+ _func_defaults = "func_defaults"
+ _func_globals = "func_globals"
+
+ _iterkeys = "iterkeys"
+ _itervalues = "itervalues"
+ _iteritems = "iteritems"
+ _iterlists = "iterlists"
+
+
+try:
+ advance_iterator = next
+except NameError:
+ def advance_iterator(it):
+ return it.next()
+next = advance_iterator
+
+
+try:
+ callable = callable
+except NameError:
+ def callable(obj):
+ return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
+
+
+if PY3:
+ def get_unbound_function(unbound):
+ return unbound
+
+ create_bound_method = types.MethodType
+
+ Iterator = object
+else:
+ def get_unbound_function(unbound):
+ return unbound.im_func
+
+ def create_bound_method(func, obj):
+ return types.MethodType(func, obj, obj.__class__)
+
+ class Iterator(object):
+
+ def next(self):
+ return type(self).__next__(self)
+
+ callable = callable
+_add_doc(get_unbound_function,
+ """Get the function out of a possibly unbound function""")
+
+
+get_method_function = operator.attrgetter(_meth_func)
+get_method_self = operator.attrgetter(_meth_self)
+get_function_closure = operator.attrgetter(_func_closure)
+get_function_code = operator.attrgetter(_func_code)
+get_function_defaults = operator.attrgetter(_func_defaults)
+get_function_globals = operator.attrgetter(_func_globals)
+
+
+def iterkeys(d, **kw):
+ """Return an iterator over the keys of a dictionary."""
+ return iter(getattr(d, _iterkeys)(**kw))
+
+def itervalues(d, **kw):
+ """Return an iterator over the values of a dictionary."""
+ return iter(getattr(d, _itervalues)(**kw))
+
+def iteritems(d, **kw):
+ """Return an iterator over the (key, value) pairs of a dictionary."""
+ return iter(getattr(d, _iteritems)(**kw))
+
+def iterlists(d, **kw):
+ """Return an iterator over the (key, [values]) pairs of a dictionary."""
+ return iter(getattr(d, _iterlists)(**kw))
+
+
+if PY3:
+ def b(s):
+ return s.encode("latin-1")
+ def u(s):
+ return s
+ unichr = chr
+ if sys.version_info[1] <= 1:
+ def int2byte(i):
+ return bytes((i,))
+ else:
+ # This is about 2x faster than the implementation above on 3.2+
+ int2byte = operator.methodcaller("to_bytes", 1, "big")
+ byte2int = operator.itemgetter(0)
+ indexbytes = operator.getitem
+ iterbytes = iter
+ import io
+ StringIO = io.StringIO
+ BytesIO = io.BytesIO
+else:
+ def b(s):
+ return s
+ # Workaround for standalone backslash
+ def u(s):
+ return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
+ unichr = unichr
+ int2byte = chr
+ def byte2int(bs):
+ return ord(bs[0])
+ def indexbytes(buf, i):
+ return ord(buf[i])
+ def iterbytes(buf):
+ return (ord(byte) for byte in buf)
+ import StringIO
+ StringIO = BytesIO = StringIO.StringIO
+_add_doc(b, """Byte literal""")
+_add_doc(u, """Text literal""")
+
+
+if PY3:
+ exec_ = getattr(moves.builtins, "exec")
+
+
+ def reraise(tp, value, tb=None):
+ if value.__traceback__ is not tb:
+ raise value.with_traceback(tb)
+ raise value
+
+else:
+ def exec_(_code_, _globs_=None, _locs_=None):
+ """Execute code in a namespace."""
+ if _globs_ is None:
+ frame = sys._getframe(1)
+ _globs_ = frame.f_globals
+ if _locs_ is None:
+ _locs_ = frame.f_locals
+ del frame
+ elif _locs_ is None:
+ _locs_ = _globs_
+ exec("""exec _code_ in _globs_, _locs_""")
+
+
+ exec_("""def reraise(tp, value, tb=None):
+ raise tp, value, tb
+""")
+
+
+print_ = getattr(moves.builtins, "print", None)
+if print_ is None:
+ def print_(*args, **kwargs):
+ """The new-style print function for Python 2.4 and 2.5."""
+ fp = kwargs.pop("file", sys.stdout)
+ if fp is None:
+ return
+ def write(data):
+ if not isinstance(data, basestring):
+ data = str(data)
+ # If the file has an encoding, encode unicode with it.
+ if (isinstance(fp, file) and
+ isinstance(data, unicode) and
+ fp.encoding is not None):
+ errors = getattr(fp, "errors", None)
+ if errors is None:
+ errors = "strict"
+ data = data.encode(fp.encoding, errors)
+ fp.write(data)
+ want_unicode = False
+ sep = kwargs.pop("sep", None)
+ if sep is not None:
+ if isinstance(sep, unicode):
+ want_unicode = True
+ elif not isinstance(sep, str):
+ raise TypeError("sep must be None or a string")
+ end = kwargs.pop("end", None)
+ if end is not None:
+ if isinstance(end, unicode):
+ want_unicode = True
+ elif not isinstance(end, str):
+ raise TypeError("end must be None or a string")
+ if kwargs:
+ raise TypeError("invalid keyword arguments to print()")
+ if not want_unicode:
+ for arg in args:
+ if isinstance(arg, unicode):
+ want_unicode = True
+ break
+ if want_unicode:
+ newline = unicode("\n")
+ space = unicode(" ")
+ else:
+ newline = "\n"
+ space = " "
+ if sep is None:
+ sep = space
+ if end is None:
+ end = newline
+ for i, arg in enumerate(args):
+ if i:
+ write(sep)
+ write(arg)
+ write(end)
+
+_add_doc(reraise, """Reraise an exception.""")
+
+
+def with_metaclass(meta, *bases):
+ """Create a base class with a metaclass."""
+ return meta("NewBase", bases, {})
+
+def add_metaclass(metaclass):
+ """Class decorator for creating a class with a metaclass."""
+ def wrapper(cls):
+ orig_vars = cls.__dict__.copy()
+ orig_vars.pop('__dict__', None)
+ orig_vars.pop('__weakref__', None)
+ slots = orig_vars.get('__slots__')
+ if slots is not None:
+ if isinstance(slots, str):
+ slots = [slots]
+ for slots_var in slots:
+ orig_vars.pop(slots_var)
+ return metaclass(cls.__name__, cls.__bases__, orig_vars)
+ return wrapper
+
+
+### Additional customizations for Django ###
+
+if PY3:
+ _assertRaisesRegex = "assertRaisesRegex"
+ _assertRegex = "assertRegex"
+ memoryview = memoryview
+else:
+ _assertRaisesRegex = "assertRaisesRegexp"
+ _assertRegex = "assertRegexpMatches"
+ # memoryview and buffer are not stricly equivalent, but should be fine for
+ # django core usage (mainly BinaryField). However, Jython doesn't support
+ # buffer (see http://bugs.jython.org/issue1521), so we have to be careful.
+ if sys.platform.startswith('java'):
+ memoryview = memoryview
+ else:
+ memoryview = buffer
+
+
+def assertRaisesRegex(self, *args, **kwargs):
+ return getattr(self, _assertRaisesRegex)(*args, **kwargs)
+
+
+def assertRegex(self, *args, **kwargs):
+ return getattr(self, _assertRegex)(*args, **kwargs)
+
+
+add_move(MovedModule("_dummy_thread", "dummy_thread"))
+add_move(MovedModule("_thread", "thread"))
diff --git a/lib/python2.7/site-packages/django/utils/synch.py b/lib/python2.7/site-packages/django/utils/synch.py
new file mode 100644
index 0000000..4859907
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/synch.py
@@ -0,0 +1,93 @@
+"""
+Synchronization primitives:
+
+ - reader-writer lock (preference to writers)
+
+(Contributed to Django by eugene@lazutkin.com)
+"""
+
+import contextlib
+try:
+ import threading
+except ImportError:
+ import dummy_threading as threading
+
+
+class RWLock(object):
+ """
+ Classic implementation of reader-writer lock with preference to writers.
+
+ Readers can access a resource simultaneously.
+ Writers get an exclusive access.
+
+ API is self-descriptive:
+ reader_enters()
+ reader_leaves()
+ writer_enters()
+ writer_leaves()
+ """
+ def __init__(self):
+ self.mutex = threading.RLock()
+ self.can_read = threading.Semaphore(0)
+ self.can_write = threading.Semaphore(0)
+ self.active_readers = 0
+ self.active_writers = 0
+ self.waiting_readers = 0
+ self.waiting_writers = 0
+
+ def reader_enters(self):
+ with self.mutex:
+ if self.active_writers == 0 and self.waiting_writers == 0:
+ self.active_readers += 1
+ self.can_read.release()
+ else:
+ self.waiting_readers += 1
+ self.can_read.acquire()
+
+ def reader_leaves(self):
+ with self.mutex:
+ self.active_readers -= 1
+ if self.active_readers == 0 and self.waiting_writers != 0:
+ self.active_writers += 1
+ self.waiting_writers -= 1
+ self.can_write.release()
+
+ @contextlib.contextmanager
+ def reader(self):
+ self.reader_enters()
+ try:
+ yield
+ finally:
+ self.reader_leaves()
+
+ def writer_enters(self):
+ with self.mutex:
+ if self.active_writers == 0 and self.waiting_writers == 0 and self.active_readers == 0:
+ self.active_writers += 1
+ self.can_write.release()
+ else:
+ self.waiting_writers += 1
+ self.can_write.acquire()
+
+ def writer_leaves(self):
+ with self.mutex:
+ self.active_writers -= 1
+ if self.waiting_writers != 0:
+ self.active_writers += 1
+ self.waiting_writers -= 1
+ self.can_write.release()
+ elif self.waiting_readers != 0:
+ t = self.waiting_readers
+ self.waiting_readers = 0
+ self.active_readers += t
+ while t > 0:
+ self.can_read.release()
+ t -= 1
+
+ @contextlib.contextmanager
+ def writer(self):
+ self.writer_enters()
+ try:
+ yield
+ finally:
+ self.writer_leaves()
diff --git a/lib/python2.7/site-packages/django/utils/termcolors.py b/lib/python2.7/site-packages/django/utils/termcolors.py
new file mode 100644
index 0000000..bb14837
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/termcolors.py
@@ -0,0 +1,200 @@
+"""
+termcolors.py
+"""
+
+from django.utils import six
+
+color_names = ('black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white')
+foreground = dict([(color_names[x], '3%s' % x) for x in range(8)])
+background = dict([(color_names[x], '4%s' % x) for x in range(8)])
+
+RESET = '0'
+opt_dict = {'bold': '1', 'underscore': '4', 'blink': '5', 'reverse': '7', 'conceal': '8'}
+
+def colorize(text='', opts=(), **kwargs):
+ """
+ Returns your text, enclosed in ANSI graphics codes.
+
+ Depends on the keyword arguments 'fg' and 'bg', and the contents of
+ the opts tuple/list.
+
+ Returns the RESET code if no parameters are given.
+
+ Valid colors:
+ 'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'
+
+ Valid options:
+ 'bold'
+ 'underscore'
+ 'blink'
+ 'reverse'
+ 'conceal'
+ 'noreset' - string will not be auto-terminated with the RESET code
+
+ Examples:
+ colorize('hello', fg='red', bg='blue', opts=('blink',))
+ colorize()
+ colorize('goodbye', opts=('underscore',))
+ print(colorize('first line', fg='red', opts=('noreset',)))
+ print('this should be red too')
+ print(colorize('and so should this'))
+ print('this should not be red')
+ """
+ code_list = []
+ if text == '' and len(opts) == 1 and opts[0] == 'reset':
+ return '\x1b[%sm' % RESET
+ for k, v in six.iteritems(kwargs):
+ if k == 'fg':
+ code_list.append(foreground[v])
+ elif k == 'bg':
+ code_list.append(background[v])
+ for o in opts:
+ if o in opt_dict:
+ code_list.append(opt_dict[o])
+ if 'noreset' not in opts:
+ text = '%s\x1b[%sm' % (text or '', RESET)
+ return '%s%s' % (('\x1b[%sm' % ';'.join(code_list)), text or '')
+
+def make_style(opts=(), **kwargs):
+ """
+ Returns a function with default parameters for colorize()
+
+ Example:
+ bold_red = make_style(opts=('bold',), fg='red')
+ print(bold_red('hello'))
+ KEYWORD = make_style(fg='yellow')
+ COMMENT = make_style(fg='blue', opts=('bold',))
+ """
+ return lambda text: colorize(text, opts, **kwargs)
+
+NOCOLOR_PALETTE = 'nocolor'
+DARK_PALETTE = 'dark'
+LIGHT_PALETTE = 'light'
+
+PALETTES = {
+ NOCOLOR_PALETTE: {
+ 'ERROR': {},
+ 'NOTICE': {},
+ 'SQL_FIELD': {},
+ 'SQL_COLTYPE': {},
+ 'SQL_KEYWORD': {},
+ 'SQL_TABLE': {},
+ 'HTTP_INFO': {},
+ 'HTTP_SUCCESS': {},
+ 'HTTP_REDIRECT': {},
+ 'HTTP_NOT_MODIFIED': {},
+ 'HTTP_BAD_REQUEST': {},
+ 'HTTP_NOT_FOUND': {},
+ 'HTTP_SERVER_ERROR': {},
+ },
+ DARK_PALETTE: {
+ 'ERROR': { 'fg': 'red', 'opts': ('bold',) },
+ 'NOTICE': { 'fg': 'red' },
+ 'SQL_FIELD': { 'fg': 'green', 'opts': ('bold',) },
+ 'SQL_COLTYPE': { 'fg': 'green' },
+ 'SQL_KEYWORD': { 'fg': 'yellow' },
+ 'SQL_TABLE': { 'opts': ('bold',) },
+ 'HTTP_INFO': { 'opts': ('bold',) },
+ 'HTTP_SUCCESS': { },
+ 'HTTP_REDIRECT': { 'fg': 'green' },
+ 'HTTP_NOT_MODIFIED': { 'fg': 'cyan' },
+ 'HTTP_BAD_REQUEST': { 'fg': 'red', 'opts': ('bold',) },
+ 'HTTP_NOT_FOUND': { 'fg': 'yellow' },
+ 'HTTP_SERVER_ERROR': { 'fg': 'magenta', 'opts': ('bold',) },
+ },
+ LIGHT_PALETTE: {
+ 'ERROR': { 'fg': 'red', 'opts': ('bold',) },
+ 'NOTICE': { 'fg': 'red' },
+ 'SQL_FIELD': { 'fg': 'green', 'opts': ('bold',) },
+ 'SQL_COLTYPE': { 'fg': 'green' },
+ 'SQL_KEYWORD': { 'fg': 'blue' },
+ 'SQL_TABLE': { 'opts': ('bold',) },
+ 'HTTP_INFO': { 'opts': ('bold',) },
+ 'HTTP_SUCCESS': { },
+ 'HTTP_REDIRECT': { 'fg': 'green', 'opts': ('bold',) },
+ 'HTTP_NOT_MODIFIED': { 'fg': 'green' },
+ 'HTTP_BAD_REQUEST': { 'fg': 'red', 'opts': ('bold',) },
+ 'HTTP_NOT_FOUND': { 'fg': 'red' },
+ 'HTTP_SERVER_ERROR': { 'fg': 'magenta', 'opts': ('bold',) },
+ }
+}
+DEFAULT_PALETTE = DARK_PALETTE
+
+def parse_color_setting(config_string):
+ """Parse a DJANGO_COLORS environment variable to produce the system palette
+
+ The general form of a pallete definition is:
+
+ "palette;role=fg;role=fg/bg;role=fg,option,option;role=fg/bg,option,option"
+
+ where:
+ palette is a named palette; one of 'light', 'dark', or 'nocolor'.
+ role is a named style used by Django
+ fg is a background color.
+ bg is a background color.
+ option is a display options.
+
+ Specifying a named palette is the same as manually specifying the individual
+ definitions for each role. Any individual definitions following the pallete
+ definition will augment the base palette definition.
+
+ Valid roles:
+ 'error', 'notice', 'sql_field', 'sql_coltype', 'sql_keyword', 'sql_table',
+ 'http_info', 'http_success', 'http_redirect', 'http_bad_request',
+ 'http_not_found', 'http_server_error'
+
+ Valid colors:
+ 'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'
+
+ Valid options:
+ 'bold', 'underscore', 'blink', 'reverse', 'conceal'
+
+ """
+ if not config_string:
+ return PALETTES[DEFAULT_PALETTE]
+
+ # Split the color configuration into parts
+ parts = config_string.lower().split(';')
+ palette = PALETTES[NOCOLOR_PALETTE].copy()
+ for part in parts:
+ if part in PALETTES:
+ # A default palette has been specified
+ palette.update(PALETTES[part])
+ elif '=' in part:
+ # Process a palette defining string
+ definition = {}
+
+ # Break the definition into the role,
+ # plus the list of specific instructions.
+ # The role must be in upper case
+ role, instructions = part.split('=')
+ role = role.upper()
+
+ styles = instructions.split(',')
+ styles.reverse()
+
+ # The first instruction can contain a slash
+ # to break apart fg/bg.
+ colors = styles.pop().split('/')
+ colors.reverse()
+ fg = colors.pop()
+ if fg in color_names:
+ definition['fg'] = fg
+ if colors and colors[-1] in color_names:
+ definition['bg'] = colors[-1]
+
+ # All remaining instructions are options
+ opts = tuple(s for s in styles if s in opt_dict.keys())
+ if opts:
+ definition['opts'] = opts
+
+ # The nocolor palette has all available roles.
+ # Use that palette as the basis for determining
+ # if the role is valid.
+ if role in PALETTES[NOCOLOR_PALETTE] and definition:
+ palette[role] = definition
+
+ # If there are no colors specified, return the empty palette.
+ if palette == PALETTES[NOCOLOR_PALETTE]:
+ return None
+ return palette
diff --git a/lib/python2.7/site-packages/django/utils/text.py b/lib/python2.7/site-packages/django/utils/text.py
new file mode 100644
index 0000000..92eda53
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/text.py
@@ -0,0 +1,412 @@
+from __future__ import unicode_literals
+
+import re
+import unicodedata
+from gzip import GzipFile
+from io import BytesIO
+
+from django.utils.encoding import force_text
+from django.utils.functional import allow_lazy, SimpleLazyObject
+from django.utils import six
+from django.utils.six.moves import html_entities
+from django.utils.translation import ugettext_lazy, ugettext as _, pgettext
+from django.utils.safestring import mark_safe
+
+if six.PY2:
+ # Import force_unicode even though this module doesn't use it, because some
+ # people rely on it being here.
+ from django.utils.encoding import force_unicode
+
+# Capitalizes the first letter of a string.
+capfirst = lambda x: x and force_text(x)[0].upper() + force_text(x)[1:]
+capfirst = allow_lazy(capfirst, six.text_type)
+
+# Set up regular expressions
+re_words = re.compile(r'&.*?;|<.*?>|(\w[\w-]*)', re.U|re.S)
+re_tag = re.compile(r'<(/)?([^ ]+?)(?:(\s*/)| .*?)?>', re.S)
+
+
+def wrap(text, width):
+ """
+ A word-wrap function that preserves existing line breaks and most spaces in
+ the text. Expects that existing line breaks are posix newlines.
+ """
+ text = force_text(text)
+ def _generator():
+ it = iter(text.split(' '))
+ word = next(it)
+ yield word
+ pos = len(word) - word.rfind('\n') - 1
+ for word in it:
+ if "\n" in word:
+ lines = word.split('\n')
+ else:
+ lines = (word,)
+ pos += len(lines[0]) + 1
+ if pos > width:
+ yield '\n'
+ pos = len(lines[-1])
+ else:
+ yield ' '
+ if len(lines) > 1:
+ pos = len(lines[-1])
+ yield word
+ return ''.join(_generator())
+wrap = allow_lazy(wrap, six.text_type)
+
+
+class Truncator(SimpleLazyObject):
+ """
+ An object used to truncate text, either by characters or words.
+ """
+ def __init__(self, text):
+ super(Truncator, self).__init__(lambda: force_text(text))
+
+ def add_truncation_text(self, text, truncate=None):
+ if truncate is None:
+ truncate = pgettext(
+ 'String to return when truncating text',
+ '%(truncated_text)s...')
+ truncate = force_text(truncate)
+ if '%(truncated_text)s' in truncate:
+ return truncate % {'truncated_text': text}
+ # The truncation text didn't contain the %(truncated_text)s string
+ # replacement argument so just append it to the text.
+ if text.endswith(truncate):
+ # But don't append the truncation text if the current text already
+ # ends in this.
+ return text
+ return '%s%s' % (text, truncate)
+
+ def chars(self, num, truncate=None):
+ """
+ Returns the text truncated to be no longer than the specified number
+ of characters.
+
+ Takes an optional argument of what should be used to notify that the
+ string has been truncated, defaulting to a translatable string of an
+ ellipsis (...).
+ """
+ length = int(num)
+ text = unicodedata.normalize('NFC', self._wrapped)
+
+ # Calculate the length to truncate to (max length - end_text length)
+ truncate_len = length
+ for char in self.add_truncation_text('', truncate):
+ if not unicodedata.combining(char):
+ truncate_len -= 1
+ if truncate_len == 0:
+ break
+
+ s_len = 0
+ end_index = None
+ for i, char in enumerate(text):
+ if unicodedata.combining(char):
+ # Don't consider combining characters
+ # as adding to the string length
+ continue
+ s_len += 1
+ if end_index is None and s_len > truncate_len:
+ end_index = i
+ if s_len > length:
+ # Return the truncated string
+ return self.add_truncation_text(text[:end_index or 0],
+ truncate)
+
+ # Return the original string since no truncation was necessary
+ return text
+ chars = allow_lazy(chars)
+
+ def words(self, num, truncate=None, html=False):
+ """
+ Truncates a string after a certain number of words. Takes an optional
+ argument of what should be used to notify that the string has been
+ truncated, defaulting to ellipsis (...).
+ """
+ length = int(num)
+ if html:
+ return self._html_words(length, truncate)
+ return self._text_words(length, truncate)
+ words = allow_lazy(words)
+
+ def _text_words(self, length, truncate):
+ """
+ Truncates a string after a certain number of words.
+
+ Newlines in the string will be stripped.
+ """
+ words = self._wrapped.split()
+ if len(words) > length:
+ words = words[:length]
+ return self.add_truncation_text(' '.join(words), truncate)
+ return ' '.join(words)
+
+ def _html_words(self, length, truncate):
+ """
+ Truncates HTML to a certain number of words (not counting tags and
+ comments). Closes opened tags if they were correctly closed in the
+ given HTML.
+
+ Newlines in the HTML are preserved.
+ """
+ if length <= 0:
+ return ''
+ html4_singlets = (
+ 'br', 'col', 'link', 'base', 'img',
+ 'param', 'area', 'hr', 'input'
+ )
+ # Count non-HTML words and keep note of open tags
+ pos = 0
+ end_text_pos = 0
+ words = 0
+ open_tags = []
+ while words <= length:
+ m = re_words.search(self._wrapped, pos)
+ if not m:
+ # Checked through whole string
+ break
+ pos = m.end(0)
+ if m.group(1):
+ # It's an actual non-HTML word
+ words += 1
+ if words == length:
+ end_text_pos = pos
+ continue
+ # Check for tag
+ tag = re_tag.match(m.group(0))
+ if not tag or end_text_pos:
+ # Don't worry about non tags or tags after our truncate point
+ continue
+ closing_tag, tagname, self_closing = tag.groups()
+ # Element names are always case-insensitive
+ tagname = tagname.lower()
+ if self_closing or tagname in html4_singlets:
+ pass
+ elif closing_tag:
+ # Check for match in open tags list
+ try:
+ i = open_tags.index(tagname)
+ except ValueError:
+ pass
+ else:
+ # SGML: An end tag closes, back to the matching start tag,
+ # all unclosed intervening start tags with omitted end tags
+ open_tags = open_tags[i + 1:]
+ else:
+ # Add it to the start of the open tags list
+ open_tags.insert(0, tagname)
+ if words <= length:
+ # Don't try to close tags if we don't need to truncate
+ return self._wrapped
+ out = self._wrapped[:end_text_pos]
+ truncate_text = self.add_truncation_text('', truncate)
+ if truncate_text:
+ out += truncate_text
+ # Close any tags still open
+ for tag in open_tags:
+ out += '</%s>' % tag
+ # Return string
+ return out
+
+def get_valid_filename(s):
+ """
+ Returns the given string converted to a string that can be used for a clean
+ filename. Specifically, leading and trailing spaces are removed; other
+ spaces are converted to underscores; and anything that is not a unicode
+ alphanumeric, dash, underscore, or dot, is removed.
+ >>> get_valid_filename("john's portrait in 2004.jpg")
+ 'johns_portrait_in_2004.jpg'
+ """
+ s = force_text(s).strip().replace(' ', '_')
+ return re.sub(r'(?u)[^-\w.]', '', s)
+get_valid_filename = allow_lazy(get_valid_filename, six.text_type)
+
+def get_text_list(list_, last_word=ugettext_lazy('or')):
+ """
+ >>> get_text_list(['a', 'b', 'c', 'd'])
+ 'a, b, c or d'
+ >>> get_text_list(['a', 'b', 'c'], 'and')
+ 'a, b and c'
+ >>> get_text_list(['a', 'b'], 'and')
+ 'a and b'
+ >>> get_text_list(['a'])
+ 'a'
+ >>> get_text_list([])
+ ''
+ """
+ if len(list_) == 0: return ''
+ if len(list_) == 1: return force_text(list_[0])
+ return '%s %s %s' % (
+ # Translators: This string is used as a separator between list elements
+ _(', ').join([force_text(i) for i in list_][:-1]),
+ force_text(last_word), force_text(list_[-1]))
+get_text_list = allow_lazy(get_text_list, six.text_type)
+
+def normalize_newlines(text):
+ return force_text(re.sub(r'\r\n|\r|\n', '\n', text))
+normalize_newlines = allow_lazy(normalize_newlines, six.text_type)
+
+def recapitalize(text):
+ "Recapitalizes text, placing caps after end-of-sentence punctuation."
+ text = force_text(text).lower()
+ capsRE = re.compile(r'(?:^|(?<=[\.\?\!] ))([a-z])')
+ text = capsRE.sub(lambda x: x.group(1).upper(), text)
+ return text
+recapitalize = allow_lazy(recapitalize)
+
+def phone2numeric(phone):
+ "Converts a phone number with letters into its numeric equivalent."
+ char2number = {'a': '2', 'b': '2', 'c': '2', 'd': '3', 'e': '3', 'f': '3',
+ 'g': '4', 'h': '4', 'i': '4', 'j': '5', 'k': '5', 'l': '5', 'm': '6',
+ 'n': '6', 'o': '6', 'p': '7', 'q': '7', 'r': '7', 's': '7', 't': '8',
+ 'u': '8', 'v': '8', 'w': '9', 'x': '9', 'y': '9', 'z': '9',
+ }
+ return ''.join(char2number.get(c, c) for c in phone.lower())
+phone2numeric = allow_lazy(phone2numeric)
+
+# From http://www.xhaus.com/alan/python/httpcomp.html#gzip
+# Used with permission.
+def compress_string(s):
+ zbuf = BytesIO()
+ zfile = GzipFile(mode='wb', compresslevel=6, fileobj=zbuf)
+ zfile.write(s)
+ zfile.close()
+ return zbuf.getvalue()
+
+class StreamingBuffer(object):
+ def __init__(self):
+ self.vals = []
+
+ def write(self, val):
+ self.vals.append(val)
+
+ def read(self):
+ ret = b''.join(self.vals)
+ self.vals = []
+ return ret
+
+ def flush(self):
+ return
+
+ def close(self):
+ return
+
+# Like compress_string, but for iterators of strings.
+def compress_sequence(sequence):
+ buf = StreamingBuffer()
+ zfile = GzipFile(mode='wb', compresslevel=6, fileobj=buf)
+ # Output headers...
+ yield buf.read()
+ for item in sequence:
+ zfile.write(item)
+ zfile.flush()
+ yield buf.read()
+ zfile.close()
+ yield buf.read()
+
+ustring_re = re.compile("([\u0080-\uffff])")
+
+def javascript_quote(s, quote_double_quotes=False):
+
+ def fix(match):
+ return "\\u%04x" % ord(match.group(1))
+
+ if type(s) == bytes:
+ s = s.decode('utf-8')
+ elif type(s) != six.text_type:
+ raise TypeError(s)
+ s = s.replace('\\', '\\\\')
+ s = s.replace('\r', '\\r')
+ s = s.replace('\n', '\\n')
+ s = s.replace('\t', '\\t')
+ s = s.replace("'", "\\'")
+ if quote_double_quotes:
+ s = s.replace('"', '&quot;')
+ return str(ustring_re.sub(fix, s))
+javascript_quote = allow_lazy(javascript_quote, six.text_type)
+
+# Expression to match some_token and some_token="with spaces" (and similarly
+# for single-quoted strings).
+smart_split_re = re.compile(r"""
+ ((?:
+ [^\s'"]*
+ (?:
+ (?:"(?:[^"\\]|\\.)*" | '(?:[^'\\]|\\.)*')
+ [^\s'"]*
+ )+
+ ) | \S+)
+""", re.VERBOSE)
+
+def smart_split(text):
+ r"""
+ Generator that splits a string by spaces, leaving quoted phrases together.
+ Supports both single and double quotes, and supports escaping quotes with
+ backslashes. In the output, strings will keep their initial and trailing
+ quote marks and escaped quotes will remain escaped (the results can then
+ be further processed with unescape_string_literal()).
+
+ >>> list(smart_split(r'This is "a person\'s" test.'))
+ ['This', 'is', '"a person\\\'s"', 'test.']
+ >>> list(smart_split(r"Another 'person\'s' test."))
+ ['Another', "'person\\'s'", 'test.']
+ >>> list(smart_split(r'A "\"funky\" style" test.'))
+ ['A', '"\\"funky\\" style"', 'test.']
+ """
+ text = force_text(text)
+ for bit in smart_split_re.finditer(text):
+ yield bit.group(0)
+
+def _replace_entity(match):
+ text = match.group(1)
+ if text[0] == '#':
+ text = text[1:]
+ try:
+ if text[0] in 'xX':
+ c = int(text[1:], 16)
+ else:
+ c = int(text)
+ return six.unichr(c)
+ except ValueError:
+ return match.group(0)
+ else:
+ try:
+ return six.unichr(html_entities.name2codepoint[text])
+ except (ValueError, KeyError):
+ return match.group(0)
+
+_entity_re = re.compile(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));")
+
+def unescape_entities(text):
+ return _entity_re.sub(_replace_entity, text)
+unescape_entities = allow_lazy(unescape_entities, six.text_type)
+
+def unescape_string_literal(s):
+ r"""
+ Convert quoted string literals to unquoted strings with escaped quotes and
+ backslashes unquoted::
+
+ >>> unescape_string_literal('"abc"')
+ 'abc'
+ >>> unescape_string_literal("'abc'")
+ 'abc'
+ >>> unescape_string_literal('"a \"bc\""')
+ 'a "bc"'
+ >>> unescape_string_literal("'\'ab\' c'")
+ "'ab' c"
+ """
+ if s[0] not in "\"'" or s[-1] != s[0]:
+ raise ValueError("Not a string literal: %r" % s)
+ quote = s[0]
+ return s[1:-1].replace(r'\%s' % quote, quote).replace(r'\\', '\\')
+unescape_string_literal = allow_lazy(unescape_string_literal)
+
+def slugify(value):
+ """
+ Converts to lowercase, removes non-word characters (alphanumerics and
+ underscores) and converts spaces to hyphens. Also strips leading and
+ trailing whitespace.
+ """
+ value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
+ value = re.sub('[^\w\s-]', '', value).strip().lower()
+ return mark_safe(re.sub('[-\s]+', '-', value))
+slugify = allow_lazy(slugify, six.text_type)
diff --git a/lib/python2.7/site-packages/django/utils/timesince.py b/lib/python2.7/site-packages/django/utils/timesince.py
new file mode 100644
index 0000000..8fb0f64
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/timesince.py
@@ -0,0 +1,64 @@
+from __future__ import unicode_literals
+
+import datetime
+
+from django.utils.html import avoid_wrapping
+from django.utils.timezone import is_aware, utc
+from django.utils.translation import ugettext, ungettext_lazy
+
+def timesince(d, now=None, reversed=False):
+ """
+ Takes two datetime objects and returns the time between d and now
+ as a nicely formatted string, e.g. "10 minutes". If d occurs after now,
+ then "0 minutes" is returned.
+
+ Units used are years, months, weeks, days, hours, and minutes.
+ Seconds and microseconds are ignored. Up to two adjacent units will be
+ displayed. For example, "2 weeks, 3 days" and "1 year, 3 months" are
+ possible outputs, but "2 weeks, 3 hours" and "1 year, 5 days" are not.
+
+ Adapted from
+ http://web.archive.org/web/20060617175230/http://blog.natbat.co.uk/archive/2003/Jun/14/time_since
+ """
+ chunks = (
+ (60 * 60 * 24 * 365, ungettext_lazy('%d year', '%d years')),
+ (60 * 60 * 24 * 30, ungettext_lazy('%d month', '%d months')),
+ (60 * 60 * 24 * 7, ungettext_lazy('%d week', '%d weeks')),
+ (60 * 60 * 24, ungettext_lazy('%d day', '%d days')),
+ (60 * 60, ungettext_lazy('%d hour', '%d hours')),
+ (60, ungettext_lazy('%d minute', '%d minutes'))
+ )
+ # Convert datetime.date to datetime.datetime for comparison.
+ if not isinstance(d, datetime.datetime):
+ d = datetime.datetime(d.year, d.month, d.day)
+ if now and not isinstance(now, datetime.datetime):
+ now = datetime.datetime(now.year, now.month, now.day)
+
+ if not now:
+ now = datetime.datetime.now(utc if is_aware(d) else None)
+
+ delta = (d - now) if reversed else (now - d)
+ # ignore microseconds
+ since = delta.days * 24 * 60 * 60 + delta.seconds
+ if since <= 0:
+ # d is in the future compared to now, stop processing.
+ return avoid_wrapping(ugettext('0 minutes'))
+ for i, (seconds, name) in enumerate(chunks):
+ count = since // seconds
+ if count != 0:
+ break
+ result = avoid_wrapping(name % count)
+ if i + 1 < len(chunks):
+ # Now get the second item
+ seconds2, name2 = chunks[i + 1]
+ count2 = (since - (seconds * count)) // seconds2
+ if count2 != 0:
+ result += ugettext(', ') + avoid_wrapping(name2 % count2)
+ return result
+
+def timeuntil(d, now=None):
+ """
+ Like timesince, but returns a string measuring the time until
+ the given time.
+ """
+ return timesince(d, now, reversed=True)
diff --git a/lib/python2.7/site-packages/django/utils/timezone.py b/lib/python2.7/site-packages/django/utils/timezone.py
new file mode 100644
index 0000000..73749cf
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/timezone.py
@@ -0,0 +1,317 @@
+"""
+Timezone-related classes and functions.
+
+This module uses pytz when it's available and fallbacks when it isn't.
+"""
+
+from datetime import datetime, timedelta, tzinfo
+from threading import local
+import sys
+import time as _time
+
+try:
+ import pytz
+except ImportError:
+ pytz = None
+
+from django.conf import settings
+from django.utils import six
+
+__all__ = [
+ 'utc',
+ 'get_default_timezone', 'get_default_timezone_name',
+ 'get_current_timezone', 'get_current_timezone_name',
+ 'activate', 'deactivate', 'override',
+ 'localtime', 'now',
+ 'is_aware', 'is_naive', 'make_aware', 'make_naive',
+]
+
+
+# UTC and local time zones
+
+ZERO = timedelta(0)
+
+class UTC(tzinfo):
+ """
+ UTC implementation taken from Python's docs.
+
+ Used only when pytz isn't available.
+ """
+
+ def __repr__(self):
+ return "<UTC>"
+
+ def utcoffset(self, dt):
+ return ZERO
+
+ def tzname(self, dt):
+ return "UTC"
+
+ def dst(self, dt):
+ return ZERO
+
+class ReferenceLocalTimezone(tzinfo):
+ """
+ Local time implementation taken from Python's docs.
+
+ Used only when pytz isn't available, and most likely inaccurate. If you're
+ having trouble with this class, don't waste your time, just install pytz.
+
+ Kept identical to the reference version. Subclasses contain improvements.
+ """
+
+ def __init__(self):
+ # This code is moved in __init__ to execute it as late as possible
+ # See get_default_timezone().
+ self.STDOFFSET = timedelta(seconds=-_time.timezone)
+ if _time.daylight:
+ self.DSTOFFSET = timedelta(seconds=-_time.altzone)
+ else:
+ self.DSTOFFSET = self.STDOFFSET
+ self.DSTDIFF = self.DSTOFFSET - self.STDOFFSET
+ tzinfo.__init__(self)
+
+ def __repr__(self):
+ return "<LocalTimezone>"
+
+ def utcoffset(self, dt):
+ if self._isdst(dt):
+ return self.DSTOFFSET
+ else:
+ return self.STDOFFSET
+
+ def dst(self, dt):
+ if self._isdst(dt):
+ return self.DSTDIFF
+ else:
+ return ZERO
+
+ def tzname(self, dt):
+ is_dst = False if dt is None else self._isdst(dt)
+ return _time.tzname[is_dst]
+
+ def _isdst(self, dt):
+ tt = (dt.year, dt.month, dt.day,
+ dt.hour, dt.minute, dt.second,
+ dt.weekday(), 0, 0)
+ stamp = _time.mktime(tt)
+ tt = _time.localtime(stamp)
+ return tt.tm_isdst > 0
+
+class LocalTimezone(ReferenceLocalTimezone):
+ """
+ Slightly improved local time implementation focusing on correctness.
+
+ It still crashes on dates before 1970 or after 2038, but at least the
+ error message is helpful.
+ """
+
+ def _isdst(self, dt):
+ try:
+ return super(LocalTimezone, self)._isdst(dt)
+ except (OverflowError, ValueError) as exc:
+ exc_type = type(exc)
+ exc_value = exc_type(
+ "Unsupported value: %r. You should install pytz." % dt)
+ exc_value.__cause__ = exc
+ six.reraise(exc_type, exc_value, sys.exc_info()[2])
+
+utc = pytz.utc if pytz else UTC()
+"""UTC time zone as a tzinfo instance."""
+
+# In order to avoid accessing the settings at compile time,
+# wrap the expression in a function and cache the result.
+_localtime = None
+
+def get_default_timezone():
+ """
+ Returns the default time zone as a tzinfo instance.
+
+ This is the time zone defined by settings.TIME_ZONE.
+
+ See also :func:`get_current_timezone`.
+ """
+ global _localtime
+ if _localtime is None:
+ if isinstance(settings.TIME_ZONE, six.string_types) and pytz is not None:
+ _localtime = pytz.timezone(settings.TIME_ZONE)
+ else:
+ # This relies on os.environ['TZ'] being set to settings.TIME_ZONE.
+ _localtime = LocalTimezone()
+ return _localtime
+
+# This function exists for consistency with get_current_timezone_name
+def get_default_timezone_name():
+ """
+ Returns the name of the default time zone.
+ """
+ return _get_timezone_name(get_default_timezone())
+
+_active = local()
+
+def get_current_timezone():
+ """
+ Returns the currently active time zone as a tzinfo instance.
+ """
+ return getattr(_active, "value", get_default_timezone())
+
+def get_current_timezone_name():
+ """
+ Returns the name of the currently active time zone.
+ """
+ return _get_timezone_name(get_current_timezone())
+
+def _get_timezone_name(timezone):
+ """
+ Returns the name of ``timezone``.
+ """
+ try:
+ # for pytz timezones
+ return timezone.zone
+ except AttributeError:
+ # for regular tzinfo objects
+ return timezone.tzname(None)
+
+# Timezone selection functions.
+
+# These functions don't change os.environ['TZ'] and call time.tzset()
+# because it isn't thread safe.
+
+def activate(timezone):
+ """
+ Sets the time zone for the current thread.
+
+ The ``timezone`` argument must be an instance of a tzinfo subclass or a
+ time zone name. If it is a time zone name, pytz is required.
+ """
+ if isinstance(timezone, tzinfo):
+ _active.value = timezone
+ elif isinstance(timezone, six.string_types) and pytz is not None:
+ _active.value = pytz.timezone(timezone)
+ else:
+ raise ValueError("Invalid timezone: %r" % timezone)
+
+def deactivate():
+ """
+ Unsets the time zone for the current thread.
+
+ Django will then use the time zone defined by settings.TIME_ZONE.
+ """
+ if hasattr(_active, "value"):
+ del _active.value
+
+class override(object):
+ """
+ Temporarily set the time zone for the current thread.
+
+ This is a context manager that uses ``~django.utils.timezone.activate()``
+ to set the timezone on entry, and restores the previously active timezone
+ on exit.
+
+ The ``timezone`` argument must be an instance of a ``tzinfo`` subclass, a
+ time zone name, or ``None``. If is it a time zone name, pytz is required.
+ If it is ``None``, Django enables the default time zone.
+ """
+ def __init__(self, timezone):
+ self.timezone = timezone
+ self.old_timezone = getattr(_active, 'value', None)
+
+ def __enter__(self):
+ if self.timezone is None:
+ deactivate()
+ else:
+ activate(self.timezone)
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ if self.old_timezone is None:
+ deactivate()
+ else:
+ _active.value = self.old_timezone
+
+
+# Templates
+
+def template_localtime(value, use_tz=None):
+ """
+ Checks if value is a datetime and converts it to local time if necessary.
+
+ If use_tz is provided and is not None, that will force the value to
+ be converted (or not), overriding the value of settings.USE_TZ.
+
+ This function is designed for use by the template engine.
+ """
+ should_convert = (isinstance(value, datetime)
+ and (settings.USE_TZ if use_tz is None else use_tz)
+ and not is_naive(value)
+ and getattr(value, 'convert_to_local_time', True))
+ return localtime(value) if should_convert else value
+
+
+# Utilities
+
+def localtime(value, timezone=None):
+ """
+ Converts an aware datetime.datetime to local time.
+
+ Local time is defined by the current time zone, unless another time zone
+ is specified.
+ """
+ if timezone is None:
+ timezone = get_current_timezone()
+ value = value.astimezone(timezone)
+ if hasattr(timezone, 'normalize'):
+ # available for pytz time zones
+ value = timezone.normalize(value)
+ return value
+
+def now():
+ """
+ Returns an aware or naive datetime.datetime, depending on settings.USE_TZ.
+ """
+ if settings.USE_TZ:
+ # timeit shows that datetime.now(tz=utc) is 24% slower
+ return datetime.utcnow().replace(tzinfo=utc)
+ else:
+ return datetime.now()
+
+# By design, these four functions don't perform any checks on their arguments.
+# The caller should ensure that they don't receive an invalid value like None.
+
+def is_aware(value):
+ """
+ Determines if a given datetime.datetime is aware.
+
+ The logic is described in Python's docs:
+ http://docs.python.org/library/datetime.html#datetime.tzinfo
+ """
+ return value.tzinfo is not None and value.tzinfo.utcoffset(value) is not None
+
+def is_naive(value):
+ """
+ Determines if a given datetime.datetime is naive.
+
+ The logic is described in Python's docs:
+ http://docs.python.org/library/datetime.html#datetime.tzinfo
+ """
+ return value.tzinfo is None or value.tzinfo.utcoffset(value) is None
+
+def make_aware(value, timezone):
+ """
+ Makes a naive datetime.datetime in a given time zone aware.
+ """
+ if hasattr(timezone, 'localize'):
+ # available for pytz time zones
+ return timezone.localize(value, is_dst=None)
+ else:
+ # may be wrong around DST changes
+ return value.replace(tzinfo=timezone)
+
+def make_naive(value, timezone):
+ """
+ Makes an aware datetime.datetime naive in a given time zone.
+ """
+ value = value.astimezone(timezone)
+ if hasattr(timezone, 'normalize'):
+ # available for pytz time zones
+ value = timezone.normalize(value)
+ return value.replace(tzinfo=None)
diff --git a/lib/python2.7/site-packages/django/utils/translation/__init__.py b/lib/python2.7/site-packages/django/utils/translation/__init__.py
new file mode 100644
index 0000000..10a6cd6
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/translation/__init__.py
@@ -0,0 +1,196 @@
+"""
+Internationalization support.
+"""
+from __future__ import unicode_literals
+
+from django.utils.encoding import force_text
+from django.utils.functional import lazy
+from django.utils import six
+
+
+__all__ = [
+ 'activate', 'deactivate', 'override', 'deactivate_all',
+ 'get_language', 'get_language_from_request',
+ 'get_language_info', 'get_language_bidi',
+ 'check_for_language', 'to_locale', 'templatize', 'string_concat',
+ 'gettext', 'gettext_lazy', 'gettext_noop',
+ 'ugettext', 'ugettext_lazy', 'ugettext_noop',
+ 'ngettext', 'ngettext_lazy',
+ 'ungettext', 'ungettext_lazy',
+ 'pgettext', 'pgettext_lazy',
+ 'npgettext', 'npgettext_lazy',
+]
+
+
+class TranslatorCommentWarning(SyntaxWarning):
+ pass
+
+
+# Here be dragons, so a short explanation of the logic won't hurt:
+# We are trying to solve two problems: (1) access settings, in particular
+# settings.USE_I18N, as late as possible, so that modules can be imported
+# without having to first configure Django, and (2) if some other code creates
+# a reference to one of these functions, don't break that reference when we
+# replace the functions with their real counterparts (once we do access the
+# settings).
+
+class Trans(object):
+ """
+ The purpose of this class is to store the actual translation function upon
+ receiving the first call to that function. After this is done, changes to
+ USE_I18N will have no effect to which function is served upon request. If
+ your tests rely on changing USE_I18N, you can delete all the functions
+ from _trans.__dict__.
+
+ Note that storing the function with setattr will have a noticeable
+ performance effect, as access to the function goes the normal path,
+ instead of using __getattr__.
+ """
+
+ def __getattr__(self, real_name):
+ from django.conf import settings
+ if settings.USE_I18N:
+ from django.utils.translation import trans_real as trans
+ else:
+ from django.utils.translation import trans_null as trans
+ setattr(self, real_name, getattr(trans, real_name))
+ return getattr(trans, real_name)
+
+_trans = Trans()
+
+# The Trans class is no more needed, so remove it from the namespace.
+del Trans
+
+def gettext_noop(message):
+ return _trans.gettext_noop(message)
+
+ugettext_noop = gettext_noop
+
+def gettext(message):
+ return _trans.gettext(message)
+
+def ngettext(singular, plural, number):
+ return _trans.ngettext(singular, plural, number)
+
+def ugettext(message):
+ return _trans.ugettext(message)
+
+def ungettext(singular, plural, number):
+ return _trans.ungettext(singular, plural, number)
+
+def pgettext(context, message):
+ return _trans.pgettext(context, message)
+
+def npgettext(context, singular, plural, number):
+ return _trans.npgettext(context, singular, plural, number)
+
+gettext_lazy = lazy(gettext, str)
+ugettext_lazy = lazy(ugettext, six.text_type)
+pgettext_lazy = lazy(pgettext, six.text_type)
+
+def lazy_number(func, resultclass, number=None, **kwargs):
+ if isinstance(number, int):
+ kwargs['number'] = number
+ proxy = lazy(func, resultclass)(**kwargs)
+ else:
+ class NumberAwareString(resultclass):
+ def __mod__(self, rhs):
+ if isinstance(rhs, dict) and number:
+ try:
+ number_value = rhs[number]
+ except KeyError:
+ raise KeyError('Your dictionary lacks key \'%s\'. '
+ 'Please provide it, because it is required to '
+ 'determine whether string is singular or plural.'
+ % number)
+ else:
+ number_value = rhs
+ kwargs['number'] = number_value
+ translated = func(**kwargs)
+ try:
+ translated = translated % rhs
+ except TypeError:
+ # String doesn't contain a placeholder for the number
+ pass
+ return translated
+
+ proxy = lazy(lambda **kwargs: NumberAwareString(), NumberAwareString)(**kwargs)
+ return proxy
+
+def ngettext_lazy(singular, plural, number=None):
+ return lazy_number(ngettext, str, singular=singular, plural=plural, number=number)
+
+def ungettext_lazy(singular, plural, number=None):
+ return lazy_number(ungettext, six.text_type, singular=singular, plural=plural, number=number)
+
+def npgettext_lazy(context, singular, plural, number=None):
+ return lazy_number(npgettext, six.text_type, context=context, singular=singular, plural=plural, number=number)
+
+def activate(language):
+ return _trans.activate(language)
+
+def deactivate():
+ return _trans.deactivate()
+
+class override(object):
+ def __init__(self, language, deactivate=False):
+ self.language = language
+ self.deactivate = deactivate
+ self.old_language = get_language()
+
+ def __enter__(self):
+ if self.language is not None:
+ activate(self.language)
+ else:
+ deactivate_all()
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ if self.deactivate:
+ deactivate()
+ else:
+ activate(self.old_language)
+
+def get_language():
+ return _trans.get_language()
+
+def get_language_bidi():
+ return _trans.get_language_bidi()
+
+def check_for_language(lang_code):
+ return _trans.check_for_language(lang_code)
+
+def to_locale(language):
+ return _trans.to_locale(language)
+
+def get_language_from_request(request, check_path=False):
+ return _trans.get_language_from_request(request, check_path)
+
+def get_language_from_path(path, supported=None):
+ return _trans.get_language_from_path(path, supported=supported)
+
+def templatize(src, origin=None):
+ return _trans.templatize(src, origin)
+
+def deactivate_all():
+ return _trans.deactivate_all()
+
+def _string_concat(*strings):
+ """
+ Lazy variant of string concatenation, needed for translations that are
+ constructed from multiple parts.
+ """
+ return ''.join([force_text(s) for s in strings])
+string_concat = lazy(_string_concat, six.text_type)
+
+def get_language_info(lang_code):
+ from django.conf.locale import LANG_INFO
+ try:
+ return LANG_INFO[lang_code]
+ except KeyError:
+ if '-' not in lang_code:
+ raise KeyError("Unknown language code %s." % lang_code)
+ generic_lang_code = lang_code.split('-')[0]
+ try:
+ return LANG_INFO[generic_lang_code]
+ except KeyError:
+ raise KeyError("Unknown language code %s and %s." % (lang_code, generic_lang_code))
diff --git a/lib/python2.7/site-packages/django/utils/translation/trans_null.py b/lib/python2.7/site-packages/django/utils/translation/trans_null.py
new file mode 100644
index 0000000..7ef0779
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/translation/trans_null.py
@@ -0,0 +1,63 @@
+# These are versions of the functions in django.utils.translation.trans_real
+# that don't actually do anything. This is purely for performance, so that
+# settings.USE_I18N = False can use this module rather than trans_real.py.
+
+from django.conf import settings
+from django.utils.encoding import force_text
+from django.utils.safestring import mark_safe, SafeData
+
+def ngettext(singular, plural, number):
+ if number == 1: return singular
+ return plural
+ngettext_lazy = ngettext
+
+def ungettext(singular, plural, number):
+ return force_text(ngettext(singular, plural, number))
+
+def pgettext(context, message):
+ return ugettext(message)
+
+def npgettext(context, singular, plural, number):
+ return ungettext(singular, plural, number)
+
+activate = lambda x: None
+deactivate = deactivate_all = lambda: None
+get_language = lambda: settings.LANGUAGE_CODE
+get_language_bidi = lambda: settings.LANGUAGE_CODE in settings.LANGUAGES_BIDI
+check_for_language = lambda x: True
+
+# date formats shouldn't be used using gettext anymore. This
+# is kept for backward compatibility
+TECHNICAL_ID_MAP = {
+ "DATE_WITH_TIME_FULL": settings.DATETIME_FORMAT,
+ "DATE_FORMAT": settings.DATE_FORMAT,
+ "DATETIME_FORMAT": settings.DATETIME_FORMAT,
+ "TIME_FORMAT": settings.TIME_FORMAT,
+ "YEAR_MONTH_FORMAT": settings.YEAR_MONTH_FORMAT,
+ "MONTH_DAY_FORMAT": settings.MONTH_DAY_FORMAT,
+}
+
+def gettext(message):
+ result = TECHNICAL_ID_MAP.get(message, message)
+ if isinstance(message, SafeData):
+ return mark_safe(result)
+ return result
+
+def ugettext(message):
+ return force_text(gettext(message))
+
+gettext_noop = gettext_lazy = _ = gettext
+
+def to_locale(language):
+ p = language.find('-')
+ if p >= 0:
+ return language[:p].lower()+'_'+language[p+1:].upper()
+ else:
+ return language.lower()
+
+def get_language_from_request(request, check_path=False):
+ return settings.LANGUAGE_CODE
+
+def get_language_from_path(request, supported=None):
+ return None
+
diff --git a/lib/python2.7/site-packages/django/utils/translation/trans_real.py b/lib/python2.7/site-packages/django/utils/translation/trans_real.py
new file mode 100644
index 0000000..195badf
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/translation/trans_real.py
@@ -0,0 +1,676 @@
+"""Translation helper functions."""
+from __future__ import unicode_literals
+
+import locale
+import os
+import re
+import sys
+import gettext as gettext_module
+from threading import local
+import warnings
+
+from django.utils.importlib import import_module
+from django.utils.datastructures import SortedDict
+from django.utils.encoding import force_str, force_text
+from django.utils.functional import memoize
+from django.utils._os import upath
+from django.utils.safestring import mark_safe, SafeData
+from django.utils import six
+from django.utils.six import StringIO
+from django.utils.translation import TranslatorCommentWarning
+
+
+# Translations are cached in a dictionary for every language+app tuple.
+# The active translations are stored by threadid to make them thread local.
+_translations = {}
+_active = local()
+
+# The default translation is based on the settings file.
+_default = None
+
+# This is a cache for normalized accept-header languages to prevent multiple
+# file lookups when checking the same locale on repeated requests.
+_accepted = {}
+_checked_languages = {}
+
+# magic gettext number to separate context from message
+CONTEXT_SEPARATOR = "\x04"
+
+# Format of Accept-Language header values. From RFC 2616, section 14.4 and 3.9
+# and RFC 3066, section 2.1
+accept_language_re = re.compile(r'''
+ ([A-Za-z]{1,8}(?:-[A-Za-z0-9]{1,8})*|\*) # "en", "en-au", "x-y-z", "es-419", "*"
+ (?:\s*;\s*q=(0(?:\.\d{,3})?|1(?:.0{,3})?))? # Optional "q=1.00", "q=0.8"
+ (?:\s*,\s*|$) # Multiple accepts per header.
+ ''', re.VERBOSE)
+
+language_code_prefix_re = re.compile(r'^/([\w-]+)(/|$)')
+
+
+def to_locale(language, to_lower=False):
+ """
+ Turns a language name (en-us) into a locale name (en_US). If 'to_lower' is
+ True, the last component is lower-cased (en_us).
+ """
+ p = language.find('-')
+ if p >= 0:
+ if to_lower:
+ return language[:p].lower()+'_'+language[p+1:].lower()
+ else:
+ # Get correct locale for sr-latn
+ if len(language[p+1:]) > 2:
+ return language[:p].lower()+'_'+language[p+1].upper()+language[p+2:].lower()
+ return language[:p].lower()+'_'+language[p+1:].upper()
+ else:
+ return language.lower()
+
+def to_language(locale):
+ """Turns a locale name (en_US) into a language name (en-us)."""
+ p = locale.find('_')
+ if p >= 0:
+ return locale[:p].lower()+'-'+locale[p+1:].lower()
+ else:
+ return locale.lower()
+
+class DjangoTranslation(gettext_module.GNUTranslations):
+ """
+ This class sets up the GNUTranslations context with regard to output
+ charset.
+ """
+ def __init__(self, *args, **kw):
+ gettext_module.GNUTranslations.__init__(self, *args, **kw)
+ self.set_output_charset('utf-8')
+ self.__language = '??'
+
+ def merge(self, other):
+ self._catalog.update(other._catalog)
+
+ def set_language(self, language):
+ self.__language = language
+ self.__to_language = to_language(language)
+
+ def language(self):
+ return self.__language
+
+ def to_language(self):
+ return self.__to_language
+
+ def __repr__(self):
+ return "<DjangoTranslation lang:%s>" % self.__language
+
+def translation(language):
+ """
+ Returns a translation object.
+
+ This translation object will be constructed out of multiple GNUTranslations
+ objects by merging their catalogs. It will construct a object for the
+ requested language and add a fallback to the default language, if it's
+ different from the requested language.
+ """
+ global _translations
+
+ t = _translations.get(language, None)
+ if t is not None:
+ return t
+
+ from django.conf import settings
+
+ globalpath = os.path.join(os.path.dirname(upath(sys.modules[settings.__module__].__file__)), 'locale')
+
+ def _fetch(lang, fallback=None):
+
+ global _translations
+
+ res = _translations.get(lang, None)
+ if res is not None:
+ return res
+
+ loc = to_locale(lang)
+
+ def _translation(path):
+ try:
+ t = gettext_module.translation('django', path, [loc], DjangoTranslation)
+ t.set_language(lang)
+ return t
+ except IOError:
+ return None
+
+ res = _translation(globalpath)
+
+ # We want to ensure that, for example, "en-gb" and "en-us" don't share
+ # the same translation object (thus, merging en-us with a local update
+ # doesn't affect en-gb), even though they will both use the core "en"
+ # translation. So we have to subvert Python's internal gettext caching.
+ base_lang = lambda x: x.split('-', 1)[0]
+ if base_lang(lang) in [base_lang(trans) for trans in list(_translations)]:
+ res._info = res._info.copy()
+ res._catalog = res._catalog.copy()
+
+ def _merge(path):
+ t = _translation(path)
+ if t is not None:
+ if res is None:
+ return t
+ else:
+ res.merge(t)
+ return res
+
+ for appname in reversed(settings.INSTALLED_APPS):
+ app = import_module(appname)
+ apppath = os.path.join(os.path.dirname(upath(app.__file__)), 'locale')
+
+ if os.path.isdir(apppath):
+ res = _merge(apppath)
+
+ for localepath in reversed(settings.LOCALE_PATHS):
+ if os.path.isdir(localepath):
+ res = _merge(localepath)
+
+ if res is None:
+ if fallback is not None:
+ res = fallback
+ else:
+ return gettext_module.NullTranslations()
+ _translations[lang] = res
+ return res
+
+ default_translation = _fetch(settings.LANGUAGE_CODE)
+ current_translation = _fetch(language, fallback=default_translation)
+
+ return current_translation
+
+def activate(language):
+ """
+ Fetches the translation object for a given tuple of application name and
+ language and installs it as the current translation object for the current
+ thread.
+ """
+ _active.value = translation(language)
+
+def deactivate():
+ """
+ Deinstalls the currently active translation object so that further _ calls
+ will resolve against the default translation object, again.
+ """
+ if hasattr(_active, "value"):
+ del _active.value
+
+def deactivate_all():
+ """
+ Makes the active translation object a NullTranslations() instance. This is
+ useful when we want delayed translations to appear as the original string
+ for some reason.
+ """
+ _active.value = gettext_module.NullTranslations()
+
+def get_language():
+ """Returns the currently selected language."""
+ t = getattr(_active, "value", None)
+ if t is not None:
+ try:
+ return t.to_language()
+ except AttributeError:
+ pass
+ # If we don't have a real translation object, assume it's the default language.
+ from django.conf import settings
+ return settings.LANGUAGE_CODE
+
+def get_language_bidi():
+ """
+ Returns selected language's BiDi layout.
+
+ * False = left-to-right layout
+ * True = right-to-left layout
+ """
+ from django.conf import settings
+
+ base_lang = get_language().split('-')[0]
+ return base_lang in settings.LANGUAGES_BIDI
+
+def catalog():
+ """
+ Returns the current active catalog for further processing.
+ This can be used if you need to modify the catalog or want to access the
+ whole message catalog instead of just translating one string.
+ """
+ global _default
+
+ t = getattr(_active, "value", None)
+ if t is not None:
+ return t
+ if _default is None:
+ from django.conf import settings
+ _default = translation(settings.LANGUAGE_CODE)
+ return _default
+
+def do_translate(message, translation_function):
+ """
+ Translates 'message' using the given 'translation_function' name -- which
+ will be either gettext or ugettext. It uses the current thread to find the
+ translation object to use. If no current translation is activated, the
+ message will be run through the default translation object.
+ """
+ global _default
+
+ # str() is allowing a bytestring message to remain bytestring on Python 2
+ eol_message = message.replace(str('\r\n'), str('\n')).replace(str('\r'), str('\n'))
+ t = getattr(_active, "value", None)
+ if t is not None:
+ result = getattr(t, translation_function)(eol_message)
+ else:
+ if _default is None:
+ from django.conf import settings
+ _default = translation(settings.LANGUAGE_CODE)
+ result = getattr(_default, translation_function)(eol_message)
+ if isinstance(message, SafeData):
+ return mark_safe(result)
+ return result
+
+def gettext(message):
+ """
+ Returns a string of the translation of the message.
+
+ Returns a string on Python 3 and an UTF-8-encoded bytestring on Python 2.
+ """
+ return do_translate(message, 'gettext')
+
+if six.PY3:
+ ugettext = gettext
+else:
+ def ugettext(message):
+ return do_translate(message, 'ugettext')
+
+def pgettext(context, message):
+ msg_with_ctxt = "%s%s%s" % (context, CONTEXT_SEPARATOR, message)
+ result = ugettext(msg_with_ctxt)
+ if CONTEXT_SEPARATOR in result:
+ # Translation not found
+ result = message
+ return result
+
+def gettext_noop(message):
+ """
+ Marks strings for translation but doesn't translate them now. This can be
+ used to store strings in global variables that should stay in the base
+ language (because they might be used externally) and will be translated
+ later.
+ """
+ return message
+
+def do_ntranslate(singular, plural, number, translation_function):
+ global _default
+
+ t = getattr(_active, "value", None)
+ if t is not None:
+ return getattr(t, translation_function)(singular, plural, number)
+ if _default is None:
+ from django.conf import settings
+ _default = translation(settings.LANGUAGE_CODE)
+ return getattr(_default, translation_function)(singular, plural, number)
+
+def ngettext(singular, plural, number):
+ """
+ Returns a string of the translation of either the singular or plural,
+ based on the number.
+
+ Returns a string on Python 3 and an UTF-8-encoded bytestring on Python 2.
+ """
+ return do_ntranslate(singular, plural, number, 'ngettext')
+
+if six.PY3:
+ ungettext = ngettext
+else:
+ def ungettext(singular, plural, number):
+ """
+ Returns a unicode strings of the translation of either the singular or
+ plural, based on the number.
+ """
+ return do_ntranslate(singular, plural, number, 'ungettext')
+
+def npgettext(context, singular, plural, number):
+ msgs_with_ctxt = ("%s%s%s" % (context, CONTEXT_SEPARATOR, singular),
+ "%s%s%s" % (context, CONTEXT_SEPARATOR, plural),
+ number)
+ result = ungettext(*msgs_with_ctxt)
+ if CONTEXT_SEPARATOR in result:
+ # Translation not found
+ result = ungettext(singular, plural, number)
+ return result
+
+def all_locale_paths():
+ """
+ Returns a list of paths to user-provides languages files.
+ """
+ from django.conf import settings
+ globalpath = os.path.join(
+ os.path.dirname(upath(sys.modules[settings.__module__].__file__)), 'locale')
+ return [globalpath] + list(settings.LOCALE_PATHS)
+
+def check_for_language(lang_code):
+ """
+ Checks whether there is a global language file for the given language
+ code. This is used to decide whether a user-provided language is
+ available. This is only used for language codes from either the cookies
+ or session and during format localization.
+ """
+ for path in all_locale_paths():
+ if gettext_module.find('django', path, [to_locale(lang_code)]) is not None:
+ return True
+ return False
+check_for_language = memoize(check_for_language, _checked_languages, 1)
+
+def get_supported_language_variant(lang_code, supported=None, strict=False):
+ """
+ Returns the language-code that's listed in supported languages, possibly
+ selecting a more generic variant. Raises LookupError if nothing found.
+
+ If `strict` is False (the default), the function will look for an alternative
+ country-specific variant when the currently checked is not found.
+ """
+ if supported is None:
+ from django.conf import settings
+ supported = SortedDict(settings.LANGUAGES)
+ if lang_code:
+ # if fr-CA is not supported, try fr-ca; if that fails, fallback to fr.
+ generic_lang_code = lang_code.split('-')[0]
+ variants = (lang_code, lang_code.lower(), generic_lang_code,
+ generic_lang_code.lower())
+ for code in variants:
+ if code in supported and check_for_language(code):
+ return code
+ if not strict:
+ # if fr-fr is not supported, try fr-ca.
+ for supported_code in supported:
+ if supported_code.startswith((generic_lang_code + '-',
+ generic_lang_code.lower() + '-')):
+ return supported_code
+ raise LookupError(lang_code)
+
+def get_language_from_path(path, supported=None, strict=False):
+ """
+ Returns the language-code if there is a valid language-code
+ found in the `path`.
+
+ If `strict` is False (the default), the function will look for an alternative
+ country-specific variant when the currently checked is not found.
+ """
+ if supported is None:
+ from django.conf import settings
+ supported = SortedDict(settings.LANGUAGES)
+ regex_match = language_code_prefix_re.match(path)
+ if not regex_match:
+ return None
+ lang_code = regex_match.group(1)
+ try:
+ return get_supported_language_variant(lang_code, supported, strict=strict)
+ except LookupError:
+ return None
+
+def get_language_from_request(request, check_path=False):
+ """
+ Analyzes the request to find what language the user wants the system to
+ show. Only languages listed in settings.LANGUAGES are taken into account.
+ If the user requests a sublanguage where we have a main language, we send
+ out the main language.
+
+ If check_path is True, the URL path prefix will be checked for a language
+ code, otherwise this is skipped for backwards compatibility.
+ """
+ global _accepted
+ from django.conf import settings
+ supported = SortedDict(settings.LANGUAGES)
+
+ if check_path:
+ lang_code = get_language_from_path(request.path_info, supported)
+ if lang_code is not None:
+ return lang_code
+
+ if hasattr(request, 'session'):
+ lang_code = request.session.get('django_language', None)
+ if lang_code in supported and lang_code is not None and check_for_language(lang_code):
+ return lang_code
+
+ lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)
+
+ try:
+ return get_supported_language_variant(lang_code, supported)
+ except LookupError:
+ pass
+
+ accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '')
+ for accept_lang, unused in parse_accept_lang_header(accept):
+ if accept_lang == '*':
+ break
+
+ # 'normalized' is the root name of the locale in POSIX format (which is
+ # the format used for the directories holding the MO files).
+ normalized = locale.locale_alias.get(to_locale(accept_lang, True))
+ if not normalized:
+ continue
+ # Remove the default encoding from locale_alias.
+ normalized = normalized.split('.')[0]
+
+ if normalized in _accepted:
+ # We've seen this locale before and have an MO file for it, so no
+ # need to check again.
+ return _accepted[normalized]
+
+ try:
+ accept_lang = get_supported_language_variant(accept_lang, supported)
+ except LookupError:
+ continue
+ else:
+ _accepted[normalized] = accept_lang
+ return accept_lang
+
+ try:
+ return get_supported_language_variant(settings.LANGUAGE_CODE, supported)
+ except LookupError:
+ return settings.LANGUAGE_CODE
+
+dot_re = re.compile(r'\S')
+def blankout(src, char):
+ """
+ Changes every non-whitespace character to the given char.
+ Used in the templatize function.
+ """
+ return dot_re.sub(char, src)
+
+context_re = re.compile(r"""^\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?'))\s*""")
+inline_re = re.compile(r"""^\s*trans\s+((?:"[^"]*?")|(?:'[^']*?'))(\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?')))?\s*""")
+block_re = re.compile(r"""^\s*blocktrans(\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?')))?(?:\s+|$)""")
+endblock_re = re.compile(r"""^\s*endblocktrans$""")
+plural_re = re.compile(r"""^\s*plural$""")
+constant_re = re.compile(r"""_\(((?:".*?")|(?:'.*?'))\)""")
+one_percent_re = re.compile(r"""(?<!%)%(?!%)""")
+
+
+def templatize(src, origin=None):
+ """
+ Turns a Django template into something that is understood by xgettext. It
+ does so by translating the Django translation tags into standard gettext
+ function invocations.
+ """
+ from django.conf import settings
+ from django.template import (Lexer, TOKEN_TEXT, TOKEN_VAR, TOKEN_BLOCK,
+ TOKEN_COMMENT, TRANSLATOR_COMMENT_MARK)
+ src = force_text(src, settings.FILE_CHARSET)
+ out = StringIO()
+ message_context = None
+ intrans = False
+ inplural = False
+ singular = []
+ plural = []
+ incomment = False
+ comment = []
+ lineno_comment_map = {}
+ comment_lineno_cache = None
+
+ for t in Lexer(src, origin).tokenize():
+ if incomment:
+ if t.token_type == TOKEN_BLOCK and t.contents == 'endcomment':
+ content = ''.join(comment)
+ translators_comment_start = None
+ for lineno, line in enumerate(content.splitlines(True)):
+ if line.lstrip().startswith(TRANSLATOR_COMMENT_MARK):
+ translators_comment_start = lineno
+ for lineno, line in enumerate(content.splitlines(True)):
+ if translators_comment_start is not None and lineno >= translators_comment_start:
+ out.write(' # %s' % line)
+ else:
+ out.write(' #\n')
+ incomment = False
+ comment = []
+ else:
+ comment.append(t.contents)
+ elif intrans:
+ if t.token_type == TOKEN_BLOCK:
+ endbmatch = endblock_re.match(t.contents)
+ pluralmatch = plural_re.match(t.contents)
+ if endbmatch:
+ if inplural:
+ if message_context:
+ out.write(' npgettext(%r, %r, %r,count) ' % (message_context, ''.join(singular), ''.join(plural)))
+ else:
+ out.write(' ngettext(%r, %r, count) ' % (''.join(singular), ''.join(plural)))
+ for part in singular:
+ out.write(blankout(part, 'S'))
+ for part in plural:
+ out.write(blankout(part, 'P'))
+ else:
+ if message_context:
+ out.write(' pgettext(%r, %r) ' % (message_context, ''.join(singular)))
+ else:
+ out.write(' gettext(%r) ' % ''.join(singular))
+ for part in singular:
+ out.write(blankout(part, 'S'))
+ message_context = None
+ intrans = False
+ inplural = False
+ singular = []
+ plural = []
+ elif pluralmatch:
+ inplural = True
+ else:
+ filemsg = ''
+ if origin:
+ filemsg = 'file %s, ' % origin
+ raise SyntaxError("Translation blocks must not include other block tags: %s (%sline %d)" % (t.contents, filemsg, t.lineno))
+ elif t.token_type == TOKEN_VAR:
+ if inplural:
+ plural.append('%%(%s)s' % t.contents)
+ else:
+ singular.append('%%(%s)s' % t.contents)
+ elif t.token_type == TOKEN_TEXT:
+ contents = one_percent_re.sub('%%', t.contents)
+ if inplural:
+ plural.append(contents)
+ else:
+ singular.append(contents)
+
+ else:
+ # Handle comment tokens (`{# ... #}`) plus other constructs on
+ # the same line:
+ if comment_lineno_cache is not None:
+ cur_lineno = t.lineno + t.contents.count('\n')
+ if comment_lineno_cache == cur_lineno:
+ if t.token_type != TOKEN_COMMENT:
+ for c in lineno_comment_map[comment_lineno_cache]:
+ filemsg = ''
+ if origin:
+ filemsg = 'file %s, ' % origin
+ warn_msg = ("The translator-targeted comment '%s' "
+ "(%sline %d) was ignored, because it wasn't the last item "
+ "on the line.") % (c, filemsg, comment_lineno_cache)
+ warnings.warn(warn_msg, TranslatorCommentWarning)
+ lineno_comment_map[comment_lineno_cache] = []
+ else:
+ out.write('# %s' % ' | '.join(lineno_comment_map[comment_lineno_cache]))
+ comment_lineno_cache = None
+
+ if t.token_type == TOKEN_BLOCK:
+ imatch = inline_re.match(t.contents)
+ bmatch = block_re.match(t.contents)
+ cmatches = constant_re.findall(t.contents)
+ if imatch:
+ g = imatch.group(1)
+ if g[0] == '"':
+ g = g.strip('"')
+ elif g[0] == "'":
+ g = g.strip("'")
+ g = one_percent_re.sub('%%', g)
+ if imatch.group(2):
+ # A context is provided
+ context_match = context_re.match(imatch.group(2))
+ message_context = context_match.group(1)
+ if message_context[0] == '"':
+ message_context = message_context.strip('"')
+ elif message_context[0] == "'":
+ message_context = message_context.strip("'")
+ out.write(' pgettext(%r, %r) ' % (message_context, g))
+ message_context = None
+ else:
+ out.write(' gettext(%r) ' % g)
+ elif bmatch:
+ for fmatch in constant_re.findall(t.contents):
+ out.write(' _(%s) ' % fmatch)
+ if bmatch.group(1):
+ # A context is provided
+ context_match = context_re.match(bmatch.group(1))
+ message_context = context_match.group(1)
+ if message_context[0] == '"':
+ message_context = message_context.strip('"')
+ elif message_context[0] == "'":
+ message_context = message_context.strip("'")
+ intrans = True
+ inplural = False
+ singular = []
+ plural = []
+ elif cmatches:
+ for cmatch in cmatches:
+ out.write(' _(%s) ' % cmatch)
+ elif t.contents == 'comment':
+ incomment = True
+ else:
+ out.write(blankout(t.contents, 'B'))
+ elif t.token_type == TOKEN_VAR:
+ parts = t.contents.split('|')
+ cmatch = constant_re.match(parts[0])
+ if cmatch:
+ out.write(' _(%s) ' % cmatch.group(1))
+ for p in parts[1:]:
+ if p.find(':_(') >= 0:
+ out.write(' %s ' % p.split(':',1)[1])
+ else:
+ out.write(blankout(p, 'F'))
+ elif t.token_type == TOKEN_COMMENT:
+ if t.contents.lstrip().startswith(TRANSLATOR_COMMENT_MARK):
+ lineno_comment_map.setdefault(t.lineno,
+ []).append(t.contents)
+ comment_lineno_cache = t.lineno
+ else:
+ out.write(blankout(t.contents, 'X'))
+ return force_str(out.getvalue())
+
+def parse_accept_lang_header(lang_string):
+ """
+ Parses the lang_string, which is the body of an HTTP Accept-Language
+ header, and returns a list of (lang, q-value), ordered by 'q' values.
+
+ Any format errors in lang_string results in an empty list being returned.
+ """
+ result = []
+ pieces = accept_language_re.split(lang_string)
+ if pieces[-1]:
+ return []
+ for i in range(0, len(pieces) - 1, 3):
+ first, lang, priority = pieces[i : i + 3]
+ if first:
+ return []
+ if priority:
+ priority = float(priority)
+ if not priority: # if priority is 0.0 at this point make it 1.0
+ priority = 1.0
+ result.append((lang, priority))
+ result.sort(key=lambda k: k[1], reverse=True)
+ return result
diff --git a/lib/python2.7/site-packages/django/utils/tree.py b/lib/python2.7/site-packages/django/utils/tree.py
new file mode 100644
index 0000000..3f93738
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/tree.py
@@ -0,0 +1,136 @@
+"""
+A class for storing a tree graph. Primarily used for filter constructs in the
+ORM.
+"""
+
+import copy
+
+class Node(object):
+ """
+ A single internal node in the tree graph. A Node should be viewed as a
+ connection (the root) with the children being either leaf nodes or other
+ Node instances.
+ """
+ # Standard connector type. Clients usually won't use this at all and
+ # subclasses will usually override the value.
+ default = 'DEFAULT'
+
+ def __init__(self, children=None, connector=None, negated=False):
+ """
+ Constructs a new Node. If no connector is given, the default will be
+ used.
+ """
+ self.children = children[:] if children else []
+ self.connector = connector or self.default
+ self.negated = negated
+
+ # We need this because of django.db.models.query_utils.Q. Q. __init__() is
+ # problematic, but it is a natural Node subclass in all other respects.
+ def _new_instance(cls, children=None, connector=None, negated=False):
+ """
+ This is called to create a new instance of this class when we need new
+ Nodes (or subclasses) in the internal code in this class. Normally, it
+ just shadows __init__(). However, subclasses with an __init__ signature
+ that is not an extension of Node.__init__ might need to implement this
+ method to allow a Node to create a new instance of them (if they have
+ any extra setting up to do).
+ """
+ obj = Node(children, connector, negated)
+ obj.__class__ = cls
+ return obj
+ _new_instance = classmethod(_new_instance)
+
+ def __str__(self):
+ if self.negated:
+ return '(NOT (%s: %s))' % (self.connector, ', '.join([str(c) for c
+ in self.children]))
+ return '(%s: %s)' % (self.connector, ', '.join([str(c) for c in
+ self.children]))
+
+ def __deepcopy__(self, memodict):
+ """
+ Utility method used by copy.deepcopy().
+ """
+ obj = Node(connector=self.connector, negated=self.negated)
+ obj.__class__ = self.__class__
+ obj.children = copy.deepcopy(self.children, memodict)
+ return obj
+
+ def __len__(self):
+ """
+ The size of a node if the number of children it has.
+ """
+ return len(self.children)
+
+ def __bool__(self):
+ """
+ For truth value testing.
+ """
+ return bool(self.children)
+
+ def __nonzero__(self): # Python 2 compatibility
+ return type(self).__bool__(self)
+
+ def __contains__(self, other):
+ """
+ Returns True is 'other' is a direct child of this instance.
+ """
+ return other in self.children
+
+ def _prepare_data(self, data):
+ """
+ A subclass hook for doing subclass specific transformations of the
+ given data on combine() or add().
+ """
+ return data
+
+ def add(self, data, conn_type, squash=True):
+ """
+ Combines this tree and the data represented by data using the
+ connector conn_type. The combine is done by squashing the node other
+ away if possible.
+
+ This tree (self) will never be pushed to a child node of the
+ combined tree, nor will the connector or negated properties change.
+
+ The function returns a node which can be used in place of data
+ regardless if the node other got squashed or not.
+
+ If `squash` is False the data is prepared and added as a child to
+ this tree without further logic.
+ """
+ if data in self.children:
+ return data
+ data = self._prepare_data(data)
+ if not squash:
+ self.children.append(data)
+ return data
+ if self.connector == conn_type:
+ # We can reuse self.children to append or squash the node other.
+ if (isinstance(data, Node) and not data.negated
+ and (data.connector == conn_type or len(data) == 1)):
+ # We can squash the other node's children directly into this
+ # node. We are just doing (AB)(CD) == (ABCD) here, with the
+ # addition that if the length of the other node is 1 the
+ # connector doesn't matter. However, for the len(self) == 1
+ # case we don't want to do the squashing, as it would alter
+ # self.connector.
+ self.children.extend(data.children)
+ return self
+ else:
+ # We could use perhaps additional logic here to see if some
+ # children could be used for pushdown here.
+ self.children.append(data)
+ return data
+ else:
+ obj = self._new_instance(self.children, self.connector,
+ self.negated)
+ self.connector = conn_type
+ self.children = [obj, data]
+ return data
+
+ def negate(self):
+ """
+ Negate the sense of the root connector.
+ """
+ self.negated = not self.negated
diff --git a/lib/python2.7/site-packages/django/utils/tzinfo.py b/lib/python2.7/site-packages/django/utils/tzinfo.py
new file mode 100644
index 0000000..fd221ea
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/tzinfo.py
@@ -0,0 +1,100 @@
+"Implementation of tzinfo classes for use with datetime.datetime."
+
+from __future__ import unicode_literals
+
+import time
+from datetime import timedelta, tzinfo
+
+from django.utils.encoding import force_str, force_text, DEFAULT_LOCALE_ENCODING
+
+# Python's doc say: "A tzinfo subclass must have an __init__() method that can
+# be called with no arguments". FixedOffset and LocalTimezone don't honor this
+# requirement. Defining __getinitargs__ is sufficient to fix copy/deepcopy as
+# well as pickling/unpickling.
+
+class FixedOffset(tzinfo):
+ "Fixed offset in minutes east from UTC."
+ def __init__(self, offset):
+ if isinstance(offset, timedelta):
+ self.__offset = offset
+ offset = self.__offset.seconds // 60
+ else:
+ self.__offset = timedelta(minutes=offset)
+
+ sign = '-' if offset < 0 else '+'
+ self.__name = "%s%02d%02d" % (sign, abs(offset) / 60., abs(offset) % 60)
+
+ def __repr__(self):
+ return self.__name
+
+ def __getinitargs__(self):
+ return self.__offset,
+
+ def utcoffset(self, dt):
+ return self.__offset
+
+ def tzname(self, dt):
+ return self.__name
+
+ def dst(self, dt):
+ return timedelta(0)
+
+# This implementation is used for display purposes. It uses an approximation
+# for DST computations on dates >= 2038.
+
+# A similar implementation exists in django.utils.timezone. It's used for
+# timezone support (when USE_TZ = True) and focuses on correctness.
+
+class LocalTimezone(tzinfo):
+ "Proxy timezone information from time module."
+ def __init__(self, dt):
+ tzinfo.__init__(self)
+ self.__dt = dt
+ self._tzname = self.tzname(dt)
+
+ def __repr__(self):
+ return force_str(self._tzname)
+
+ def __getinitargs__(self):
+ return self.__dt,
+
+ def utcoffset(self, dt):
+ if self._isdst(dt):
+ return timedelta(seconds=-time.altzone)
+ else:
+ return timedelta(seconds=-time.timezone)
+
+ def dst(self, dt):
+ if self._isdst(dt):
+ return timedelta(seconds=-time.altzone) - timedelta(seconds=-time.timezone)
+ else:
+ return timedelta(0)
+
+ def tzname(self, dt):
+ is_dst = False if dt is None else self._isdst(dt)
+ try:
+ return force_text(time.tzname[is_dst], DEFAULT_LOCALE_ENCODING)
+ except UnicodeDecodeError:
+ return None
+
+ def _isdst(self, dt):
+ tt = (dt.year, dt.month, dt.day,
+ dt.hour, dt.minute, dt.second,
+ dt.weekday(), 0, 0)
+ try:
+ stamp = time.mktime(tt)
+ except (OverflowError, ValueError):
+ # 32 bit systems can't handle dates after Jan 2038, and certain
+ # systems can't handle dates before ~1901-12-01:
+ #
+ # >>> time.mktime((1900, 1, 13, 0, 0, 0, 0, 0, 0))
+ # OverflowError: mktime argument out of range
+ # >>> time.mktime((1850, 1, 13, 0, 0, 0, 0, 0, 0))
+ # ValueError: year out of range
+ #
+ # In this case, we fake the date, because we only care about the
+ # DST flag.
+ tt = (2037,) + tt[1:]
+ stamp = time.mktime(tt)
+ tt = time.localtime(stamp)
+ return tt.tm_isdst > 0
diff --git a/lib/python2.7/site-packages/django/utils/unittest/__init__.py b/lib/python2.7/site-packages/django/utils/unittest/__init__.py
new file mode 100644
index 0000000..ac852a3
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/unittest/__init__.py
@@ -0,0 +1,80 @@
+"""
+unittest2
+
+unittest2 is a backport of the new features added to the unittest testing
+framework in Python 2.7. It is tested to run on Python 2.4 - 2.6.
+
+To use unittest2 instead of unittest simply replace ``import unittest`` with
+``import unittest2``.
+
+
+Copyright (c) 1999-2003 Steve Purcell
+Copyright (c) 2003-2010 Python Software Foundation
+This module is free software, and you may redistribute it and/or modify
+it under the same terms as Python itself, so long as this copyright message
+and disclaimer are retained in their original form.
+
+IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
+SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
+THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGE.
+
+THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
+AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
+SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
+"""
+
+import sys
+
+# Django hackery to load the appropriate version of unittest
+
+try:
+ # check the system path first
+ from unittest2 import *
+except ImportError:
+ if sys.version_info >= (2,7):
+ # unittest2 features are native in Python 2.7
+ from unittest import *
+ else:
+ # otherwise use our bundled version
+ __all__ = ['TestResult', 'TestCase', 'TestSuite',
+ 'TextTestRunner', 'TestLoader', 'FunctionTestCase', 'main',
+ 'defaultTestLoader', 'SkipTest', 'skip', 'skipIf', 'skipUnless',
+ 'expectedFailure', 'TextTestResult', '__version__', 'collector']
+
+ __version__ = '0.5.1'
+
+ # Expose obsolete functions for backwards compatibility
+ __all__.extend(['getTestCaseNames', 'makeSuite', 'findTestCases'])
+
+
+ from django.utils.unittest.collector import collector
+ from django.utils.unittest.result import TestResult
+ from django.utils.unittest.case import \
+ TestCase, FunctionTestCase, SkipTest, skip, skipIf,\
+ skipUnless, expectedFailure
+
+ from django.utils.unittest.suite import BaseTestSuite, TestSuite
+ from django.utils.unittest.loader import \
+ TestLoader, defaultTestLoader, makeSuite, getTestCaseNames,\
+ findTestCases
+
+ from django.utils.unittest.main import TestProgram, main, main_
+ from django.utils.unittest.runner import TextTestRunner, TextTestResult
+
+ try:
+ from django.utils.unittest.signals import\
+ installHandler, registerResult, removeResult, removeHandler
+ except ImportError:
+ # Compatibility with platforms that don't have the signal module
+ pass
+ else:
+ __all__.extend(['installHandler', 'registerResult', 'removeResult',
+ 'removeHandler'])
+
+ # deprecated
+ _TextTestResult = TextTestResult
+
+ __unittest = True
diff --git a/lib/python2.7/site-packages/django/utils/unittest/__main__.py b/lib/python2.7/site-packages/django/utils/unittest/__main__.py
new file mode 100644
index 0000000..68b893d
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/unittest/__main__.py
@@ -0,0 +1,10 @@
+"""Main entry point"""
+
+import sys
+if sys.argv[0].endswith("__main__.py"):
+ sys.argv[0] = "unittest2"
+
+__unittest = True
+
+from django.utils.unittest.main import main_
+main_()
diff --git a/lib/python2.7/site-packages/django/utils/unittest/case.py b/lib/python2.7/site-packages/django/utils/unittest/case.py
new file mode 100644
index 0000000..fffd3c2
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/unittest/case.py
@@ -0,0 +1,1076 @@
+"""Test case implementation"""
+
+import sys
+import difflib
+import pprint
+import re
+import unittest
+import warnings
+
+from django.utils.unittest import result
+from django.utils.unittest.util import\
+ safe_repr, safe_str, strclass,\
+ unorderable_list_difference
+
+from django.utils.unittest.compatibility import wraps
+
+__unittest = True
+
+
+DIFF_OMITTED = ('\nDiff is %s characters long. '
+ 'Set self.maxDiff to None to see it.')
+
+class SkipTest(Exception):
+ """
+ Raise this exception in a test to skip it.
+
+ Usually you can use TestResult.skip() or one of the skipping decorators
+ instead of raising this directly.
+ """
+
+class _ExpectedFailure(Exception):
+ """
+ Raise this when a test is expected to fail.
+
+ This is an implementation detail.
+ """
+
+ def __init__(self, exc_info):
+ # can't use super because Python 2.4 exceptions are old style
+ Exception.__init__(self)
+ self.exc_info = exc_info
+
+class _UnexpectedSuccess(Exception):
+ """
+ The test was supposed to fail, but it didn't!
+ """
+
+def _id(obj):
+ return obj
+
+def skip(reason):
+ """
+ Unconditionally skip a test.
+ """
+ def decorator(test_item):
+ if not (isinstance(test_item, type) and issubclass(test_item, TestCase)):
+ @wraps(test_item)
+ def skip_wrapper(*args, **kwargs):
+ raise SkipTest(reason)
+ test_item = skip_wrapper
+
+ test_item.__unittest_skip__ = True
+ test_item.__unittest_skip_why__ = reason
+ return test_item
+ return decorator
+
+def skipIf(condition, reason):
+ """
+ Skip a test if the condition is true.
+ """
+ if condition:
+ return skip(reason)
+ return _id
+
+def skipUnless(condition, reason):
+ """
+ Skip a test unless the condition is true.
+ """
+ if not condition:
+ return skip(reason)
+ return _id
+
+
+def expectedFailure(func):
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ try:
+ func(*args, **kwargs)
+ except Exception:
+ raise _ExpectedFailure(sys.exc_info())
+ raise _UnexpectedSuccess
+ return wrapper
+
+
+class _AssertRaisesContext(object):
+ """A context manager used to implement TestCase.assertRaises* methods."""
+
+ def __init__(self, expected, test_case, expected_regexp=None):
+ self.expected = expected
+ self.failureException = test_case.failureException
+ self.expected_regexp = expected_regexp
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, tb):
+ if exc_type is None:
+ try:
+ exc_name = self.expected.__name__
+ except AttributeError:
+ exc_name = str(self.expected)
+ raise self.failureException(
+ "%s not raised" % (exc_name,))
+ if not issubclass(exc_type, self.expected):
+ # let unexpected exceptions pass through
+ return False
+ self.exception = exc_value # store for later retrieval
+ if self.expected_regexp is None:
+ return True
+
+ expected_regexp = self.expected_regexp
+ if isinstance(expected_regexp, basestring):
+ expected_regexp = re.compile(expected_regexp)
+ if not expected_regexp.search(str(exc_value)):
+ raise self.failureException('"%s" does not match "%s"' %
+ (expected_regexp.pattern, str(exc_value)))
+ return True
+
+
+class _TypeEqualityDict(object):
+
+ def __init__(self, testcase):
+ self.testcase = testcase
+ self._store = {}
+
+ def __setitem__(self, key, value):
+ self._store[key] = value
+
+ def __getitem__(self, key):
+ value = self._store[key]
+ if isinstance(value, basestring):
+ return getattr(self.testcase, value)
+ return value
+
+ def get(self, key, default=None):
+ if key in self._store:
+ return self[key]
+ return default
+
+
+class TestCase(unittest.TestCase):
+ """A class whose instances are single test cases.
+
+ By default, the test code itself should be placed in a method named
+ 'runTest'.
+
+ If the fixture may be used for many test cases, create as
+ many test methods as are needed. When instantiating such a TestCase
+ subclass, specify in the constructor arguments the name of the test method
+ that the instance is to execute.
+
+ Test authors should subclass TestCase for their own tests. Construction
+ and deconstruction of the test's environment ('fixture') can be
+ implemented by overriding the 'setUp' and 'tearDown' methods respectively.
+
+ If it is necessary to override the __init__ method, the base class
+ __init__ method must always be called. It is important that subclasses
+ should not change the signature of their __init__ method, since instances
+ of the classes are instantiated automatically by parts of the framework
+ in order to be run.
+ """
+
+ # This attribute determines which exception will be raised when
+ # the instance's assertion methods fail; test methods raising this
+ # exception will be deemed to have 'failed' rather than 'errored'
+
+ failureException = AssertionError
+
+ # This attribute sets the maximum length of a diff in failure messages
+ # by assert methods using difflib. It is looked up as an instance attribute
+ # so can be configured by individual tests if required.
+
+ maxDiff = 80*8
+
+ # This attribute determines whether long messages (including repr of
+ # objects used in assert methods) will be printed on failure in *addition*
+ # to any explicit message passed.
+
+ longMessage = True
+
+ # Attribute used by TestSuite for classSetUp
+
+ _classSetupFailed = False
+
+ def __init__(self, methodName='runTest'):
+ """Create an instance of the class that will use the named test
+ method when executed. Raises a ValueError if the instance does
+ not have a method with the specified name.
+ """
+ self._testMethodName = methodName
+ self._resultForDoCleanups = None
+ try:
+ testMethod = getattr(self, methodName)
+ except AttributeError:
+ raise ValueError("no such test method in %s: %s" % \
+ (self.__class__, methodName))
+ self._testMethodDoc = testMethod.__doc__
+ self._cleanups = []
+
+ # Map types to custom assertEqual functions that will compare
+ # instances of said type in more detail to generate a more useful
+ # error message.
+ self._type_equality_funcs = _TypeEqualityDict(self)
+ self.addTypeEqualityFunc(dict, 'assertDictEqual')
+ self.addTypeEqualityFunc(list, 'assertListEqual')
+ self.addTypeEqualityFunc(tuple, 'assertTupleEqual')
+ self.addTypeEqualityFunc(set, 'assertSetEqual')
+ self.addTypeEqualityFunc(frozenset, 'assertSetEqual')
+ self.addTypeEqualityFunc(unicode, 'assertMultiLineEqual')
+
+ def addTypeEqualityFunc(self, typeobj, function):
+ """Add a type specific assertEqual style function to compare a type.
+
+ This method is for use by TestCase subclasses that need to register
+ their own type equality functions to provide nicer error messages.
+
+ Args:
+ typeobj: The data type to call this function on when both values
+ are of the same type in assertEqual().
+ function: The callable taking two arguments and an optional
+ msg= argument that raises self.failureException with a
+ useful error message when the two arguments are not equal.
+ """
+ self._type_equality_funcs[typeobj] = function
+
+ def addCleanup(self, function, *args, **kwargs):
+ """Add a function, with arguments, to be called when the test is
+ completed. Functions added are called on a LIFO basis and are
+ called after tearDown on test failure or success.
+
+ Cleanup items are called even if setUp fails (unlike tearDown)."""
+ self._cleanups.append((function, args, kwargs))
+
+ @classmethod
+ def setUpClass(cls):
+ "Hook method for setting up class fixture before running tests in the class."
+
+ @classmethod
+ def tearDownClass(cls):
+ "Hook method for deconstructing the class fixture after running all tests in the class."
+
+ def countTestCases(self):
+ return 1
+
+ def defaultTestResult(self):
+ return result.TestResult()
+
+ def shortDescription(self):
+ """Returns a one-line description of the test, or None if no
+ description has been provided.
+
+ The default implementation of this method returns the first line of
+ the specified test method's docstring.
+ """
+ doc = self._testMethodDoc
+ return doc and doc.split("\n")[0].strip() or None
+
+
+ def id(self):
+ return "%s.%s" % (strclass(self.__class__), self._testMethodName)
+
+ def __eq__(self, other):
+ if type(self) is not type(other):
+ return NotImplemented
+
+ return self._testMethodName == other._testMethodName
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __hash__(self):
+ return hash((type(self), self._testMethodName))
+
+ def __str__(self):
+ return "%s (%s)" % (self._testMethodName, strclass(self.__class__))
+
+ def __repr__(self):
+ return "<%s testMethod=%s>" % \
+ (strclass(self.__class__), self._testMethodName)
+
+ def _addSkip(self, result, reason):
+ addSkip = getattr(result, 'addSkip', None)
+ if addSkip is not None:
+ addSkip(self, reason)
+ else:
+ warnings.warn("Use of a TestResult without an addSkip method is deprecated",
+ DeprecationWarning, 2)
+ result.addSuccess(self)
+
+ def run(self, result=None):
+ orig_result = result
+ if result is None:
+ result = self.defaultTestResult()
+ startTestRun = getattr(result, 'startTestRun', None)
+ if startTestRun is not None:
+ startTestRun()
+
+ self._resultForDoCleanups = result
+ result.startTest(self)
+
+ testMethod = getattr(self, self._testMethodName)
+
+ if (getattr(self.__class__, "__unittest_skip__", False) or
+ getattr(testMethod, "__unittest_skip__", False)):
+ # If the class or method was skipped.
+ try:
+ skip_why = (getattr(self.__class__, '__unittest_skip_why__', '')
+ or getattr(testMethod, '__unittest_skip_why__', ''))
+ self._addSkip(result, skip_why)
+ finally:
+ result.stopTest(self)
+ return
+ try:
+ success = False
+ try:
+ self.setUp()
+ except SkipTest as e:
+ self._addSkip(result, str(e))
+ except Exception:
+ result.addError(self, sys.exc_info())
+ else:
+ try:
+ testMethod()
+ except self.failureException:
+ result.addFailure(self, sys.exc_info())
+ except _ExpectedFailure as e:
+ addExpectedFailure = getattr(result, 'addExpectedFailure', None)
+ if addExpectedFailure is not None:
+ addExpectedFailure(self, e.exc_info)
+ else:
+ warnings.warn("Use of a TestResult without an addExpectedFailure method is deprecated",
+ DeprecationWarning)
+ result.addSuccess(self)
+ except _UnexpectedSuccess:
+ addUnexpectedSuccess = getattr(result, 'addUnexpectedSuccess', None)
+ if addUnexpectedSuccess is not None:
+ addUnexpectedSuccess(self)
+ else:
+ warnings.warn("Use of a TestResult without an addUnexpectedSuccess method is deprecated",
+ DeprecationWarning)
+ result.addFailure(self, sys.exc_info())
+ except SkipTest as e:
+ self._addSkip(result, str(e))
+ except Exception:
+ result.addError(self, sys.exc_info())
+ else:
+ success = True
+
+ try:
+ self.tearDown()
+ except Exception:
+ result.addError(self, sys.exc_info())
+ success = False
+
+ cleanUpSuccess = self.doCleanups()
+ success = success and cleanUpSuccess
+ if success:
+ result.addSuccess(self)
+ finally:
+ result.stopTest(self)
+ if orig_result is None:
+ stopTestRun = getattr(result, 'stopTestRun', None)
+ if stopTestRun is not None:
+ stopTestRun()
+
+ def doCleanups(self):
+ """Execute all cleanup functions. Normally called for you after
+ tearDown."""
+ result = self._resultForDoCleanups
+ ok = True
+ while self._cleanups:
+ function, args, kwargs = self._cleanups.pop(-1)
+ try:
+ function(*args, **kwargs)
+ except Exception:
+ ok = False
+ result.addError(self, sys.exc_info())
+ return ok
+
+ def __call__(self, *args, **kwds):
+ return self.run(*args, **kwds)
+
+ def debug(self):
+ """Run the test without collecting errors in a TestResult"""
+ self.setUp()
+ getattr(self, self._testMethodName)()
+ self.tearDown()
+ while self._cleanups:
+ function, args, kwargs = self._cleanups.pop(-1)
+ function(*args, **kwargs)
+
+ def skipTest(self, reason):
+ """Skip this test."""
+ raise SkipTest(reason)
+
+ def fail(self, msg=None):
+ """Fail immediately, with the given message."""
+ raise self.failureException(msg)
+
+ def assertFalse(self, expr, msg=None):
+ "Fail the test if the expression is true."
+ if expr:
+ msg = self._formatMessage(msg, "%s is not False" % safe_repr(expr))
+ raise self.failureException(msg)
+
+ def assertTrue(self, expr, msg=None):
+ """Fail the test unless the expression is true."""
+ if not expr:
+ msg = self._formatMessage(msg, "%s is not True" % safe_repr(expr))
+ raise self.failureException(msg)
+
+ def _formatMessage(self, msg, standardMsg):
+ """Honour the longMessage attribute when generating failure messages.
+ If longMessage is False this means:
+ * Use only an explicit message if it is provided
+ * Otherwise use the standard message for the assert
+
+ If longMessage is True:
+ * Use the standard message
+ * If an explicit message is provided, plus ' : ' and the explicit message
+ """
+ if not self.longMessage:
+ return msg or standardMsg
+ if msg is None:
+ return standardMsg
+ try:
+ return '%s : %s' % (standardMsg, msg)
+ except UnicodeDecodeError:
+ return '%s : %s' % (safe_str(standardMsg), safe_str(msg))
+
+
+ def assertRaises(self, excClass, callableObj=None, *args, **kwargs):
+ """Fail unless an exception of class excClass is thrown
+ by callableObj when invoked with arguments args and keyword
+ arguments kwargs. If a different type of exception is
+ thrown, it will not be caught, and the test case will be
+ deemed to have suffered an error, exactly as for an
+ unexpected exception.
+
+ If called with callableObj omitted or None, will return a
+ context object used like this::
+
+ with self.assertRaises(SomeException):
+ do_something()
+
+ The context manager keeps a reference to the exception as
+ the 'exception' attribute. This allows you to inspect the
+ exception after the assertion::
+
+ with self.assertRaises(SomeException) as cm:
+ do_something()
+ the_exception = cm.exception
+ self.assertEqual(the_exception.error_code, 3)
+ """
+ if callableObj is None:
+ return _AssertRaisesContext(excClass, self)
+ try:
+ callableObj(*args, **kwargs)
+ except excClass:
+ return
+
+ if hasattr(excClass,'__name__'):
+ excName = excClass.__name__
+ else:
+ excName = str(excClass)
+ raise self.failureException("%s not raised" % excName)
+
+ def _getAssertEqualityFunc(self, first, second):
+ """Get a detailed comparison function for the types of the two args.
+
+ Returns: A callable accepting (first, second, msg=None) that will
+ raise a failure exception if first != second with a useful human
+ readable error message for those types.
+ """
+ #
+ # NOTE(gregory.p.smith): I considered isinstance(first, type(second))
+ # and vice versa. I opted for the conservative approach in case
+ # subclasses are not intended to be compared in detail to their super
+ # class instances using a type equality func. This means testing
+ # subtypes won't automagically use the detailed comparison. Callers
+ # should use their type specific assertSpamEqual method to compare
+ # subclasses if the detailed comparison is desired and appropriate.
+ # See the discussion in http://bugs.python.org/issue2578.
+ #
+ if type(first) is type(second):
+ asserter = self._type_equality_funcs.get(type(first))
+ if asserter is not None:
+ return asserter
+
+ return self._baseAssertEqual
+
+ def _baseAssertEqual(self, first, second, msg=None):
+ """The default assertEqual implementation, not type specific."""
+ if not first == second:
+ standardMsg = '%s != %s' % (safe_repr(first), safe_repr(second))
+ msg = self._formatMessage(msg, standardMsg)
+ raise self.failureException(msg)
+
+ def assertEqual(self, first, second, msg=None):
+ """Fail if the two objects are unequal as determined by the '=='
+ operator.
+ """
+ assertion_func = self._getAssertEqualityFunc(first, second)
+ assertion_func(first, second, msg=msg)
+
+ def assertNotEqual(self, first, second, msg=None):
+ """Fail if the two objects are equal as determined by the '=='
+ operator.
+ """
+ if not first != second:
+ msg = self._formatMessage(msg, '%s == %s' % (safe_repr(first),
+ safe_repr(second)))
+ raise self.failureException(msg)
+
+ def assertAlmostEqual(self, first, second, places=None, msg=None, delta=None):
+ """Fail if the two objects are unequal as determined by their
+ difference rounded to the given number of decimal places
+ (default 7) and comparing to zero, or by comparing that the
+ between the two objects is more than the given delta.
+
+ Note that decimal places (from zero) are usually not the same
+ as significant digits (measured from the most signficant digit).
+
+ If the two objects compare equal then they will automatically
+ compare almost equal.
+ """
+ if first == second:
+ # shortcut
+ return
+ if delta is not None and places is not None:
+ raise TypeError("specify delta or places not both")
+
+ if delta is not None:
+ if abs(first - second) <= delta:
+ return
+
+ standardMsg = '%s != %s within %s delta' % (safe_repr(first),
+ safe_repr(second),
+ safe_repr(delta))
+ else:
+ if places is None:
+ places = 7
+
+ if round(abs(second-first), places) == 0:
+ return
+
+ standardMsg = '%s != %s within %r places' % (safe_repr(first),
+ safe_repr(second),
+ places)
+ msg = self._formatMessage(msg, standardMsg)
+ raise self.failureException(msg)
+
+ def assertNotAlmostEqual(self, first, second, places=None, msg=None, delta=None):
+ """Fail if the two objects are equal as determined by their
+ difference rounded to the given number of decimal places
+ (default 7) and comparing to zero, or by comparing that the
+ between the two objects is less than the given delta.
+
+ Note that decimal places (from zero) are usually not the same
+ as significant digits (measured from the most signficant digit).
+
+ Objects that are equal automatically fail.
+ """
+ if delta is not None and places is not None:
+ raise TypeError("specify delta or places not both")
+ if delta is not None:
+ if not (first == second) and abs(first - second) > delta:
+ return
+ standardMsg = '%s == %s within %s delta' % (safe_repr(first),
+ safe_repr(second),
+ safe_repr(delta))
+ else:
+ if places is None:
+ places = 7
+ if not (first == second) and round(abs(second-first), places) != 0:
+ return
+ standardMsg = '%s == %s within %r places' % (safe_repr(first),
+ safe_repr(second),
+ places)
+
+ msg = self._formatMessage(msg, standardMsg)
+ raise self.failureException(msg)
+
+ # Synonyms for assertion methods
+
+ # The plurals are undocumented. Keep them that way to discourage use.
+ # Do not add more. Do not remove.
+ # Going through a deprecation cycle on these would annoy many people.
+ assertEquals = assertEqual
+ assertNotEquals = assertNotEqual
+ assertAlmostEquals = assertAlmostEqual
+ assertNotAlmostEquals = assertNotAlmostEqual
+ assert_ = assertTrue
+
+ # These fail* assertion method names are pending deprecation and will
+ # be a DeprecationWarning in 3.2; http://bugs.python.org/issue2578
+ def _deprecate(original_func):
+ def deprecated_func(*args, **kwargs):
+ warnings.warn(
+ ('Please use %s instead.' % original_func.__name__),
+ PendingDeprecationWarning, 2)
+ return original_func(*args, **kwargs)
+ return deprecated_func
+
+ failUnlessEqual = _deprecate(assertEqual)
+ failIfEqual = _deprecate(assertNotEqual)
+ failUnlessAlmostEqual = _deprecate(assertAlmostEqual)
+ failIfAlmostEqual = _deprecate(assertNotAlmostEqual)
+ failUnless = _deprecate(assertTrue)
+ failUnlessRaises = _deprecate(assertRaises)
+ failIf = _deprecate(assertFalse)
+
+ def assertSequenceEqual(self, seq1, seq2,
+ msg=None, seq_type=None, max_diff=80*8):
+ """An equality assertion for ordered sequences (like lists and tuples).
+
+ For the purposes of this function, a valid ordered sequence type is one
+ which can be indexed, has a length, and has an equality operator.
+
+ Args:
+ seq1: The first sequence to compare.
+ seq2: The second sequence to compare.
+ seq_type: The expected datatype of the sequences, or None if no
+ datatype should be enforced.
+ msg: Optional message to use on failure instead of a list of
+ differences.
+ max_diff: Maximum size off the diff, larger diffs are not shown
+ """
+ if seq_type is not None:
+ seq_type_name = seq_type.__name__
+ if not isinstance(seq1, seq_type):
+ raise self.failureException('First sequence is not a %s: %s'
+ % (seq_type_name, safe_repr(seq1)))
+ if not isinstance(seq2, seq_type):
+ raise self.failureException('Second sequence is not a %s: %s'
+ % (seq_type_name, safe_repr(seq2)))
+ else:
+ seq_type_name = "sequence"
+
+ differing = None
+ try:
+ len1 = len(seq1)
+ except (TypeError, NotImplementedError):
+ differing = 'First %s has no length. Non-sequence?' % (
+ seq_type_name)
+
+ if differing is None:
+ try:
+ len2 = len(seq2)
+ except (TypeError, NotImplementedError):
+ differing = 'Second %s has no length. Non-sequence?' % (
+ seq_type_name)
+
+ if differing is None:
+ if seq1 == seq2:
+ return
+
+ seq1_repr = repr(seq1)
+ seq2_repr = repr(seq2)
+ if len(seq1_repr) > 30:
+ seq1_repr = seq1_repr[:30] + '...'
+ if len(seq2_repr) > 30:
+ seq2_repr = seq2_repr[:30] + '...'
+ elements = (seq_type_name.capitalize(), seq1_repr, seq2_repr)
+ differing = '%ss differ: %s != %s\n' % elements
+
+ for i in xrange(min(len1, len2)):
+ try:
+ item1 = seq1[i]
+ except (TypeError, IndexError, NotImplementedError):
+ differing += ('\nUnable to index element %d of first %s\n' %
+ (i, seq_type_name))
+ break
+
+ try:
+ item2 = seq2[i]
+ except (TypeError, IndexError, NotImplementedError):
+ differing += ('\nUnable to index element %d of second %s\n' %
+ (i, seq_type_name))
+ break
+
+ if item1 != item2:
+ differing += ('\nFirst differing element %d:\n%s\n%s\n' %
+ (i, item1, item2))
+ break
+ else:
+ if (len1 == len2 and seq_type is None and
+ type(seq1) != type(seq2)):
+ # The sequences are the same, but have differing types.
+ return
+
+ if len1 > len2:
+ differing += ('\nFirst %s contains %d additional '
+ 'elements.\n' % (seq_type_name, len1 - len2))
+ try:
+ differing += ('First extra element %d:\n%s\n' %
+ (len2, seq1[len2]))
+ except (TypeError, IndexError, NotImplementedError):
+ differing += ('Unable to index element %d '
+ 'of first %s\n' % (len2, seq_type_name))
+ elif len1 < len2:
+ differing += ('\nSecond %s contains %d additional '
+ 'elements.\n' % (seq_type_name, len2 - len1))
+ try:
+ differing += ('First extra element %d:\n%s\n' %
+ (len1, seq2[len1]))
+ except (TypeError, IndexError, NotImplementedError):
+ differing += ('Unable to index element %d '
+ 'of second %s\n' % (len1, seq_type_name))
+ standardMsg = differing
+ diffMsg = '\n' + '\n'.join(
+ difflib.ndiff(pprint.pformat(seq1).splitlines(),
+ pprint.pformat(seq2).splitlines()))
+
+ standardMsg = self._truncateMessage(standardMsg, diffMsg)
+ msg = self._formatMessage(msg, standardMsg)
+ self.fail(msg)
+
+ def _truncateMessage(self, message, diff):
+ max_diff = self.maxDiff
+ if max_diff is None or len(diff) <= max_diff:
+ return message + diff
+ return message + (DIFF_OMITTED % len(diff))
+
+ def assertListEqual(self, list1, list2, msg=None):
+ """A list-specific equality assertion.
+
+ Args:
+ list1: The first list to compare.
+ list2: The second list to compare.
+ msg: Optional message to use on failure instead of a list of
+ differences.
+
+ """
+ self.assertSequenceEqual(list1, list2, msg, seq_type=list)
+
+ def assertTupleEqual(self, tuple1, tuple2, msg=None):
+ """A tuple-specific equality assertion.
+
+ Args:
+ tuple1: The first tuple to compare.
+ tuple2: The second tuple to compare.
+ msg: Optional message to use on failure instead of a list of
+ differences.
+ """
+ self.assertSequenceEqual(tuple1, tuple2, msg, seq_type=tuple)
+
+ def assertSetEqual(self, set1, set2, msg=None):
+ """A set-specific equality assertion.
+
+ Args:
+ set1: The first set to compare.
+ set2: The second set to compare.
+ msg: Optional message to use on failure instead of a list of
+ differences.
+
+ assertSetEqual uses ducktyping to support
+ different types of sets, and is optimized for sets specifically
+ (parameters must support a difference method).
+ """
+ try:
+ difference1 = set1.difference(set2)
+ except TypeError as e:
+ self.fail('invalid type when attempting set difference: %s' % e)
+ except AttributeError as e:
+ self.fail('first argument does not support set difference: %s' % e)
+
+ try:
+ difference2 = set2.difference(set1)
+ except TypeError as e:
+ self.fail('invalid type when attempting set difference: %s' % e)
+ except AttributeError as e:
+ self.fail('second argument does not support set difference: %s' % e)
+
+ if not (difference1 or difference2):
+ return
+
+ lines = []
+ if difference1:
+ lines.append('Items in the first set but not the second:')
+ for item in difference1:
+ lines.append(repr(item))
+ if difference2:
+ lines.append('Items in the second set but not the first:')
+ for item in difference2:
+ lines.append(repr(item))
+
+ standardMsg = '\n'.join(lines)
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertIn(self, member, container, msg=None):
+ """Just like self.assertTrue(a in b), but with a nicer default message."""
+ if member not in container:
+ standardMsg = '%s not found in %s' % (safe_repr(member),
+ safe_repr(container))
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertNotIn(self, member, container, msg=None):
+ """Just like self.assertTrue(a not in b), but with a nicer default message."""
+ if member in container:
+ standardMsg = '%s unexpectedly found in %s' % (safe_repr(member),
+ safe_repr(container))
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertIs(self, expr1, expr2, msg=None):
+ """Just like self.assertTrue(a is b), but with a nicer default message."""
+ if expr1 is not expr2:
+ standardMsg = '%s is not %s' % (safe_repr(expr1), safe_repr(expr2))
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertIsNot(self, expr1, expr2, msg=None):
+ """Just like self.assertTrue(a is not b), but with a nicer default message."""
+ if expr1 is expr2:
+ standardMsg = 'unexpectedly identical: %s' % (safe_repr(expr1),)
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertDictEqual(self, d1, d2, msg=None):
+ self.assertTrue(isinstance(d1, dict), 'First argument is not a dictionary')
+ self.assertTrue(isinstance(d2, dict), 'Second argument is not a dictionary')
+
+ if d1 != d2:
+ standardMsg = '%s != %s' % (safe_repr(d1, True), safe_repr(d2, True))
+ diff = ('\n' + '\n'.join(difflib.ndiff(
+ pprint.pformat(d1).splitlines(),
+ pprint.pformat(d2).splitlines())))
+ standardMsg = self._truncateMessage(standardMsg, diff)
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertDictContainsSubset(self, expected, actual, msg=None):
+ """Checks whether actual is a superset of expected."""
+ missing = []
+ mismatched = []
+ for key, value in expected.iteritems():
+ if key not in actual:
+ missing.append(key)
+ elif value != actual[key]:
+ mismatched.append('%s, expected: %s, actual: %s' %
+ (safe_repr(key), safe_repr(value),
+ safe_repr(actual[key])))
+
+ if not (missing or mismatched):
+ return
+
+ standardMsg = ''
+ if missing:
+ standardMsg = 'Missing: %s' % ','.join(safe_repr(m) for m in
+ missing)
+ if mismatched:
+ if standardMsg:
+ standardMsg += '; '
+ standardMsg += 'Mismatched values: %s' % ','.join(mismatched)
+
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertItemsEqual(self, expected_seq, actual_seq, msg=None):
+ """An unordered sequence specific comparison. It asserts that
+ expected_seq and actual_seq contain the same elements. It is
+ the equivalent of::
+
+ self.assertEqual(sorted(expected_seq), sorted(actual_seq))
+
+ Raises with an error message listing which elements of expected_seq
+ are missing from actual_seq and vice versa if any.
+
+ Asserts that each element has the same count in both sequences.
+ Example:
+ - [0, 1, 1] and [1, 0, 1] compare equal.
+ - [0, 0, 1] and [0, 1] compare unequal.
+ """
+ try:
+ expected = sorted(expected_seq)
+ actual = sorted(actual_seq)
+ except TypeError:
+ # Unsortable items (example: set(), complex(), ...)
+ expected = list(expected_seq)
+ actual = list(actual_seq)
+ missing, unexpected = unorderable_list_difference(
+ expected, actual, ignore_duplicate=False
+ )
+ else:
+ return self.assertSequenceEqual(expected, actual, msg=msg)
+
+ errors = []
+ if missing:
+ errors.append('Expected, but missing:\n %s' %
+ safe_repr(missing))
+ if unexpected:
+ errors.append('Unexpected, but present:\n %s' %
+ safe_repr(unexpected))
+ if errors:
+ standardMsg = '\n'.join(errors)
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertMultiLineEqual(self, first, second, msg=None):
+ """Assert that two multi-line strings are equal."""
+ self.assertTrue(isinstance(first, basestring), (
+ 'First argument is not a string'))
+ self.assertTrue(isinstance(second, basestring), (
+ 'Second argument is not a string'))
+
+ if first != second:
+ standardMsg = '%s != %s' % (safe_repr(first, True), safe_repr(second, True))
+ diff = '\n' + ''.join(difflib.ndiff(first.splitlines(True),
+ second.splitlines(True)))
+ standardMsg = self._truncateMessage(standardMsg, diff)
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertLess(self, a, b, msg=None):
+ """Just like self.assertTrue(a < b), but with a nicer default message."""
+ if not a < b:
+ standardMsg = '%s not less than %s' % (safe_repr(a), safe_repr(b))
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertLessEqual(self, a, b, msg=None):
+ """Just like self.assertTrue(a <= b), but with a nicer default message."""
+ if not a <= b:
+ standardMsg = '%s not less than or equal to %s' % (safe_repr(a), safe_repr(b))
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertGreater(self, a, b, msg=None):
+ """Just like self.assertTrue(a > b), but with a nicer default message."""
+ if not a > b:
+ standardMsg = '%s not greater than %s' % (safe_repr(a), safe_repr(b))
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertGreaterEqual(self, a, b, msg=None):
+ """Just like self.assertTrue(a >= b), but with a nicer default message."""
+ if not a >= b:
+ standardMsg = '%s not greater than or equal to %s' % (safe_repr(a), safe_repr(b))
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertIsNone(self, obj, msg=None):
+ """Same as self.assertTrue(obj is None), with a nicer default message."""
+ if obj is not None:
+ standardMsg = '%s is not None' % (safe_repr(obj),)
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertIsNotNone(self, obj, msg=None):
+ """Included for symmetry with assertIsNone."""
+ if obj is None:
+ standardMsg = 'unexpectedly None'
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertIsInstance(self, obj, cls, msg=None):
+ """Same as self.assertTrue(isinstance(obj, cls)), with a nicer
+ default message."""
+ if not isinstance(obj, cls):
+ standardMsg = '%s is not an instance of %r' % (safe_repr(obj), cls)
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertNotIsInstance(self, obj, cls, msg=None):
+ """Included for symmetry with assertIsInstance."""
+ if isinstance(obj, cls):
+ standardMsg = '%s is an instance of %r' % (safe_repr(obj), cls)
+ self.fail(self._formatMessage(msg, standardMsg))
+
+ def assertRaisesRegexp(self, expected_exception, expected_regexp,
+ callable_obj=None, *args, **kwargs):
+ """Asserts that the message in a raised exception matches a regexp.
+
+ Args:
+ expected_exception: Exception class expected to be raised.
+ expected_regexp: Regexp (re pattern object or string) expected
+ to be found in error message.
+ callable_obj: Function to be called.
+ args: Extra args.
+ kwargs: Extra kwargs.
+ """
+ if callable_obj is None:
+ return _AssertRaisesContext(expected_exception, self, expected_regexp)
+ try:
+ callable_obj(*args, **kwargs)
+ except expected_exception as exc_value:
+ if isinstance(expected_regexp, basestring):
+ expected_regexp = re.compile(expected_regexp)
+ if not expected_regexp.search(str(exc_value)):
+ raise self.failureException('"%s" does not match "%s"' %
+ (expected_regexp.pattern, str(exc_value)))
+ else:
+ if hasattr(expected_exception, '__name__'):
+ excName = expected_exception.__name__
+ else:
+ excName = str(expected_exception)
+ raise self.failureException("%s not raised" % excName)
+
+ def assertRegexpMatches(self, text, expected_regexp, msg=None):
+ """Fail the test unless the text matches the regular expression."""
+ if isinstance(expected_regexp, basestring):
+ expected_regexp = re.compile(expected_regexp)
+ if not expected_regexp.search(text):
+ msg = msg or "Regexp didn't match"
+ msg = '%s: %r not found in %r' % (msg, expected_regexp.pattern, text)
+ raise self.failureException(msg)
+
+ def assertNotRegexpMatches(self, text, unexpected_regexp, msg=None):
+ """Fail the test if the text matches the regular expression."""
+ if isinstance(unexpected_regexp, basestring):
+ unexpected_regexp = re.compile(unexpected_regexp)
+ match = unexpected_regexp.search(text)
+ if match:
+ msg = msg or "Regexp matched"
+ msg = '%s: %r matches %r in %r' % (msg,
+ text[match.start():match.end()],
+ unexpected_regexp.pattern,
+ text)
+ raise self.failureException(msg)
+
+class FunctionTestCase(TestCase):
+ """A test case that wraps a test function.
+
+ This is useful for slipping pre-existing test functions into the
+ unittest framework. Optionally, set-up and tidy-up functions can be
+ supplied. As with TestCase, the tidy-up ('tearDown') function will
+ always be called if the set-up ('setUp') function ran successfully.
+ """
+
+ def __init__(self, testFunc, setUp=None, tearDown=None, description=None):
+ super(FunctionTestCase, self).__init__()
+ self._setUpFunc = setUp
+ self._tearDownFunc = tearDown
+ self._testFunc = testFunc
+ self._description = description
+
+ def setUp(self):
+ if self._setUpFunc is not None:
+ self._setUpFunc()
+
+ def tearDown(self):
+ if self._tearDownFunc is not None:
+ self._tearDownFunc()
+
+ def runTest(self):
+ self._testFunc()
+
+ def id(self):
+ return self._testFunc.__name__
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+
+ return self._setUpFunc == other._setUpFunc and \
+ self._tearDownFunc == other._tearDownFunc and \
+ self._testFunc == other._testFunc and \
+ self._description == other._description
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __hash__(self):
+ return hash((type(self), self._setUpFunc, self._tearDownFunc,
+ self._testFunc, self._description))
+
+ def __str__(self):
+ return "%s (%s)" % (strclass(self.__class__),
+ self._testFunc.__name__)
+
+ def __repr__(self):
+ return "<%s testFunc=%s>" % (strclass(self.__class__),
+ self._testFunc)
+
+ def shortDescription(self):
+ if self._description is not None:
+ return self._description
+ doc = self._testFunc.__doc__
+ return doc and doc.split("\n")[0].strip() or None
diff --git a/lib/python2.7/site-packages/django/utils/unittest/collector.py b/lib/python2.7/site-packages/django/utils/unittest/collector.py
new file mode 100644
index 0000000..0f76fc3
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/unittest/collector.py
@@ -0,0 +1,9 @@
+import os
+import sys
+from django.utils.unittest.loader import defaultTestLoader
+
+def collector():
+ # import __main__ triggers code re-execution
+ __main__ = sys.modules['__main__']
+ setupDir = os.path.abspath(os.path.dirname(__main__.__file__))
+ return defaultTestLoader.discover(setupDir)
diff --git a/lib/python2.7/site-packages/django/utils/unittest/compatibility.py b/lib/python2.7/site-packages/django/utils/unittest/compatibility.py
new file mode 100644
index 0000000..a0dc499
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/unittest/compatibility.py
@@ -0,0 +1,64 @@
+import os
+import sys
+
+try:
+ from functools import wraps
+except ImportError:
+ # only needed for Python 2.4
+ def wraps(_):
+ def _wraps(func):
+ return func
+ return _wraps
+
+__unittest = True
+
+def _relpath_nt(path, start=os.path.curdir):
+ """Return a relative version of a path"""
+
+ if not path:
+ raise ValueError("no path specified")
+ start_list = os.path.abspath(start).split(os.path.sep)
+ path_list = os.path.abspath(path).split(os.path.sep)
+ if start_list[0].lower() != path_list[0].lower():
+ unc_path, rest = os.path.splitunc(path)
+ unc_start, rest = os.path.splitunc(start)
+ if bool(unc_path) ^ bool(unc_start):
+ raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
+ % (path, start))
+ else:
+ raise ValueError("path is on drive %s, start on drive %s"
+ % (path_list[0], start_list[0]))
+ # Work out how much of the filepath is shared by start and path.
+ for i in range(min(len(start_list), len(path_list))):
+ if start_list[i].lower() != path_list[i].lower():
+ break
+ else:
+ i += 1
+
+ rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]
+ if not rel_list:
+ return os.path.curdir
+ return os.path.join(*rel_list)
+
+# default to posixpath definition
+def _relpath_posix(path, start=os.path.curdir):
+ """Return a relative version of a path"""
+
+ if not path:
+ raise ValueError("no path specified")
+
+ start_list = os.path.abspath(start).split(os.path.sep)
+ path_list = os.path.abspath(path).split(os.path.sep)
+
+ # Work out how much of the filepath is shared by start and path.
+ i = len(os.path.commonprefix([start_list, path_list]))
+
+ rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]
+ if not rel_list:
+ return os.path.curdir
+ return os.path.join(*rel_list)
+
+if os.path is sys.modules.get('ntpath'):
+ relpath = _relpath_nt
+else:
+ relpath = _relpath_posix
diff --git a/lib/python2.7/site-packages/django/utils/unittest/loader.py b/lib/python2.7/site-packages/django/utils/unittest/loader.py
new file mode 100644
index 0000000..695bac4
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/unittest/loader.py
@@ -0,0 +1,322 @@
+"""Loading unittests."""
+
+import os
+import re
+import sys
+import traceback
+import types
+import unittest
+
+from fnmatch import fnmatch
+
+from django.utils.unittest import case, suite
+
+try:
+ from os.path import relpath
+except ImportError:
+ from django.utils.unittest.compatibility import relpath
+
+__unittest = True
+
+
+def _CmpToKey(mycmp):
+ 'Convert a cmp= function into a key= function'
+ class K(object):
+ def __init__(self, obj):
+ self.obj = obj
+ def __lt__(self, other):
+ return mycmp(self.obj, other.obj) == -1
+ return K
+
+
+# what about .pyc or .pyo (etc)
+# we would need to avoid loading the same tests multiple times
+# from '.py', '.pyc' *and* '.pyo'
+VALID_MODULE_NAME = re.compile(r'[_a-z]\w*\.py$', re.IGNORECASE)
+
+
+def _make_failed_import_test(name, suiteClass):
+ message = 'Failed to import test module: %s' % name
+ if hasattr(traceback, 'format_exc'):
+ # Python 2.3 compatibility
+ # format_exc returns two frames of discover.py as well
+ message += '\n%s' % traceback.format_exc()
+ return _make_failed_test('ModuleImportFailure', name, ImportError(message),
+ suiteClass)
+
+def _make_failed_load_tests(name, exception, suiteClass):
+ return _make_failed_test('LoadTestsFailure', name, exception, suiteClass)
+
+def _make_failed_test(classname, methodname, exception, suiteClass):
+ def testFailure(self):
+ raise exception
+ attrs = {methodname: testFailure}
+ TestClass = type(classname, (case.TestCase,), attrs)
+ return suiteClass((TestClass(methodname),))
+
+
+class TestLoader(unittest.TestLoader):
+ """
+ This class is responsible for loading tests according to various criteria
+ and returning them wrapped in a TestSuite
+ """
+ testMethodPrefix = 'test'
+ sortTestMethodsUsing = cmp
+ suiteClass = suite.TestSuite
+ _top_level_dir = None
+
+ def loadTestsFromTestCase(self, testCaseClass):
+ """Return a suite of all tests cases contained in testCaseClass"""
+ if issubclass(testCaseClass, suite.TestSuite):
+ raise TypeError("Test cases should not be derived from TestSuite."
+ " Maybe you meant to derive from TestCase?")
+ testCaseNames = self.getTestCaseNames(testCaseClass)
+ if not testCaseNames and hasattr(testCaseClass, 'runTest'):
+ testCaseNames = ['runTest']
+ loaded_suite = self.suiteClass(map(testCaseClass, testCaseNames))
+ return loaded_suite
+
+ def loadTestsFromModule(self, module, use_load_tests=True):
+ """Return a suite of all tests cases contained in the given module"""
+ tests = []
+ for name in dir(module):
+ obj = getattr(module, name)
+ if isinstance(obj, type) and issubclass(obj, unittest.TestCase):
+ tests.append(self.loadTestsFromTestCase(obj))
+
+ load_tests = getattr(module, 'load_tests', None)
+ tests = self.suiteClass(tests)
+ if use_load_tests and load_tests is not None:
+ try:
+ return load_tests(self, tests, None)
+ except Exception as e:
+ return _make_failed_load_tests(module.__name__, e,
+ self.suiteClass)
+ return tests
+
+ def loadTestsFromName(self, name, module=None):
+ """Return a suite of all tests cases given a string specifier.
+
+ The name may resolve either to a module, a test case class, a
+ test method within a test case class, or a callable object which
+ returns a TestCase or TestSuite instance.
+
+ The method optionally resolves the names relative to a given module.
+ """
+ parts = name.split('.')
+ if module is None:
+ parts_copy = parts[:]
+ while parts_copy:
+ try:
+ module = __import__('.'.join(parts_copy))
+ break
+ except ImportError:
+ del parts_copy[-1]
+ if not parts_copy:
+ raise
+ parts = parts[1:]
+ obj = module
+ for part in parts:
+ parent, obj = obj, getattr(obj, part)
+
+ if isinstance(obj, types.ModuleType):
+ return self.loadTestsFromModule(obj)
+ elif isinstance(obj, type) and issubclass(obj, unittest.TestCase):
+ return self.loadTestsFromTestCase(obj)
+ elif (isinstance(obj, types.UnboundMethodType) and
+ isinstance(parent, type) and
+ issubclass(parent, unittest.TestCase)):
+ return self.suiteClass([parent(obj.__name__)])
+ elif isinstance(obj, unittest.TestSuite):
+ return obj
+ elif hasattr(obj, '__call__'):
+ test = obj()
+ if isinstance(test, unittest.TestSuite):
+ return test
+ elif isinstance(test, unittest.TestCase):
+ return self.suiteClass([test])
+ else:
+ raise TypeError("calling %s returned %s, not a test" %
+ (obj, test))
+ else:
+ raise TypeError("don't know how to make test from: %s" % obj)
+
+ def loadTestsFromNames(self, names, module=None):
+ """Return a suite of all tests cases found using the given sequence
+ of string specifiers. See 'loadTestsFromName()'.
+ """
+ suites = [self.loadTestsFromName(name, module) for name in names]
+ return self.suiteClass(suites)
+
+ def getTestCaseNames(self, testCaseClass):
+ """Return a sorted sequence of method names found within testCaseClass
+ """
+ def isTestMethod(attrname, testCaseClass=testCaseClass,
+ prefix=self.testMethodPrefix):
+ return attrname.startswith(prefix) and \
+ hasattr(getattr(testCaseClass, attrname), '__call__')
+ testFnNames = filter(isTestMethod, dir(testCaseClass))
+ if self.sortTestMethodsUsing:
+ testFnNames.sort(key=_CmpToKey(self.sortTestMethodsUsing))
+ return testFnNames
+
+ def discover(self, start_dir, pattern='test*.py', top_level_dir=None):
+ """Find and return all test modules from the specified start
+ directory, recursing into subdirectories to find them. Only test files
+ that match the pattern will be loaded. (Using shell style pattern
+ matching.)
+
+ All test modules must be importable from the top level of the project.
+ If the start directory is not the top level directory then the top
+ level directory must be specified separately.
+
+ If a test package name (directory with '__init__.py') matches the
+ pattern then the package will be checked for a 'load_tests' function. If
+ this exists then it will be called with loader, tests, pattern.
+
+ If load_tests exists then discovery does *not* recurse into the package,
+ load_tests is responsible for loading all tests in the package.
+
+ The pattern is deliberately not stored as a loader attribute so that
+ packages can continue discovery themselves. top_level_dir is stored so
+ load_tests does not need to pass this argument in to loader.discover().
+ """
+ set_implicit_top = False
+ if top_level_dir is None and self._top_level_dir is not None:
+ # make top_level_dir optional if called from load_tests in a package
+ top_level_dir = self._top_level_dir
+ elif top_level_dir is None:
+ set_implicit_top = True
+ top_level_dir = start_dir
+
+ top_level_dir = os.path.abspath(top_level_dir)
+
+ if not top_level_dir in sys.path:
+ # all test modules must be importable from the top level directory
+ # should we *unconditionally* put the start directory in first
+ # in sys.path to minimise likelihood of conflicts between installed
+ # modules and development versions?
+ sys.path.insert(0, top_level_dir)
+ self._top_level_dir = top_level_dir
+
+ is_not_importable = False
+ if os.path.isdir(os.path.abspath(start_dir)):
+ start_dir = os.path.abspath(start_dir)
+ if start_dir != top_level_dir:
+ is_not_importable = not os.path.isfile(os.path.join(start_dir, '__init__.py'))
+ else:
+ # support for discovery from dotted module names
+ try:
+ __import__(start_dir)
+ except ImportError:
+ is_not_importable = True
+ else:
+ the_module = sys.modules[start_dir]
+ top_part = start_dir.split('.')[0]
+ start_dir = os.path.abspath(os.path.dirname((the_module.__file__)))
+ if set_implicit_top:
+ self._top_level_dir = os.path.abspath(os.path.dirname(os.path.dirname(sys.modules[top_part].__file__)))
+ sys.path.remove(top_level_dir)
+
+ if is_not_importable:
+ raise ImportError('Start directory is not importable: %r' % start_dir)
+
+ tests = list(self._find_tests(start_dir, pattern))
+ return self.suiteClass(tests)
+
+ def _get_name_from_path(self, path):
+ path = os.path.splitext(os.path.normpath(path))[0]
+
+ _relpath = relpath(path, self._top_level_dir)
+ assert not os.path.isabs(_relpath), "Path must be within the project"
+ assert not _relpath.startswith('..'), "Path must be within the project"
+
+ name = _relpath.replace(os.path.sep, '.')
+ return name
+
+ def _get_module_from_name(self, name):
+ __import__(name)
+ return sys.modules[name]
+
+ def _match_path(self, path, full_path, pattern):
+ # override this method to use alternative matching strategy
+ return fnmatch(path, pattern)
+
+ def _find_tests(self, start_dir, pattern):
+ """Used by discovery. Yields test suites it loads."""
+ paths = os.listdir(start_dir)
+
+ for path in paths:
+ full_path = os.path.join(start_dir, path)
+ if os.path.isfile(full_path):
+ if not VALID_MODULE_NAME.match(path):
+ # valid Python identifiers only
+ continue
+ if not self._match_path(path, full_path, pattern):
+ continue
+ # if the test file matches, load it
+ name = self._get_name_from_path(full_path)
+ try:
+ module = self._get_module_from_name(name)
+ except:
+ yield _make_failed_import_test(name, self.suiteClass)
+ else:
+ mod_file = os.path.abspath(getattr(module, '__file__', full_path))
+ realpath = os.path.splitext(mod_file)[0]
+ fullpath_noext = os.path.splitext(full_path)[0]
+ if realpath.lower() != fullpath_noext.lower():
+ module_dir = os.path.dirname(realpath)
+ mod_name = os.path.splitext(os.path.basename(full_path))[0]
+ expected_dir = os.path.dirname(full_path)
+ msg = ("%r module incorrectly imported from %r. Expected %r. "
+ "Is this module globally installed?")
+ raise ImportError(msg % (mod_name, module_dir, expected_dir))
+ yield self.loadTestsFromModule(module)
+ elif os.path.isdir(full_path):
+ if not os.path.isfile(os.path.join(full_path, '__init__.py')):
+ continue
+
+ load_tests = None
+ tests = None
+ if fnmatch(path, pattern):
+ # only check load_tests if the package directory itself matches the filter
+ name = self._get_name_from_path(full_path)
+ package = self._get_module_from_name(name)
+ load_tests = getattr(package, 'load_tests', None)
+ tests = self.loadTestsFromModule(package, use_load_tests=False)
+
+ if load_tests is None:
+ if tests is not None:
+ # tests loaded from package file
+ yield tests
+ # recurse into the package
+ for test in self._find_tests(full_path, pattern):
+ yield test
+ else:
+ try:
+ yield load_tests(self, tests, pattern)
+ except Exception as e:
+ yield _make_failed_load_tests(package.__name__, e,
+ self.suiteClass)
+
+defaultTestLoader = TestLoader()
+
+
+def _makeLoader(prefix, sortUsing, suiteClass=None):
+ loader = TestLoader()
+ loader.sortTestMethodsUsing = sortUsing
+ loader.testMethodPrefix = prefix
+ if suiteClass:
+ loader.suiteClass = suiteClass
+ return loader
+
+def getTestCaseNames(testCaseClass, prefix, sortUsing=cmp):
+ return _makeLoader(prefix, sortUsing).getTestCaseNames(testCaseClass)
+
+def makeSuite(testCaseClass, prefix='test', sortUsing=cmp,
+ suiteClass=suite.TestSuite):
+ return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromTestCase(testCaseClass)
+
+def findTestCases(module, prefix='test', sortUsing=cmp,
+ suiteClass=suite.TestSuite):
+ return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromModule(module)
diff --git a/lib/python2.7/site-packages/django/utils/unittest/main.py b/lib/python2.7/site-packages/django/utils/unittest/main.py
new file mode 100644
index 0000000..659310b
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/unittest/main.py
@@ -0,0 +1,241 @@
+"""Unittest main program"""
+
+import sys
+import os
+import types
+
+from django.utils.unittest import loader, runner
+try:
+ from django.utils.unittest.signals import installHandler
+except ImportError:
+ installHandler = None
+
+__unittest = True
+
+FAILFAST = " -f, --failfast Stop on first failure\n"
+CATCHBREAK = " -c, --catch Catch control-C and display results\n"
+BUFFEROUTPUT = " -b, --buffer Buffer stdout and stderr during test runs\n"
+
+USAGE_AS_MAIN = """\
+Usage: %(progName)s [options] [tests]
+
+Options:
+ -h, --help Show this message
+ -v, --verbose Verbose output
+ -q, --quiet Minimal output
+%(failfast)s%(catchbreak)s%(buffer)s
+Examples:
+ %(progName)s test_module - run tests from test_module
+ %(progName)s test_module.TestClass - run tests from
+ test_module.TestClass
+ %(progName)s test_module.TestClass.test_method - run specified test method
+
+[tests] can be a list of any number of test modules, classes and test
+methods.
+
+Alternative Usage: %(progName)s discover [options]
+
+Options:
+ -v, --verbose Verbose output
+%(failfast)s%(catchbreak)s%(buffer)s -s directory Directory to start discovery ('.' default)
+ -p pattern Pattern to match test files ('test*.py' default)
+ -t directory Top level directory of project (default to
+ start directory)
+
+For test discovery all test modules must be importable from the top
+level directory of the project.
+"""
+
+USAGE_FROM_MODULE = """\
+Usage: %(progName)s [options] [test] [...]
+
+Options:
+ -h, --help Show this message
+ -v, --verbose Verbose output
+ -q, --quiet Minimal output
+%(failfast)s%(catchbreak)s%(buffer)s
+Examples:
+ %(progName)s - run default set of tests
+ %(progName)s MyTestSuite - run suite 'MyTestSuite'
+ %(progName)s MyTestCase.testSomething - run MyTestCase.testSomething
+ %(progName)s MyTestCase - run all 'test*' test methods
+ in MyTestCase
+"""
+
+
+class TestProgram(object):
+ """A command-line program that runs a set of tests; this is primarily
+ for making test modules conveniently executable.
+ """
+ USAGE = USAGE_FROM_MODULE
+
+ # defaults for testing
+ failfast = catchbreak = buffer = progName = None
+
+ def __init__(self, module='__main__', defaultTest=None,
+ argv=None, testRunner=None,
+ testLoader=loader.defaultTestLoader, exit=True,
+ verbosity=1, failfast=None, catchbreak=None, buffer=None):
+ if isinstance(module, basestring):
+ self.module = __import__(module)
+ for part in module.split('.')[1:]:
+ self.module = getattr(self.module, part)
+ else:
+ self.module = module
+ if argv is None:
+ argv = sys.argv
+
+ self.exit = exit
+ self.verbosity = verbosity
+ self.failfast = failfast
+ self.catchbreak = catchbreak
+ self.buffer = buffer
+ self.defaultTest = defaultTest
+ self.testRunner = testRunner
+ self.testLoader = testLoader
+ self.progName = os.path.basename(argv[0])
+ self.parseArgs(argv)
+ self.runTests()
+
+ def usageExit(self, msg=None):
+ if msg:
+ print(msg)
+ usage = {'progName': self.progName, 'catchbreak': '', 'failfast': '',
+ 'buffer': ''}
+ if self.failfast != False:
+ usage['failfast'] = FAILFAST
+ if self.catchbreak != False and installHandler is not None:
+ usage['catchbreak'] = CATCHBREAK
+ if self.buffer != False:
+ usage['buffer'] = BUFFEROUTPUT
+ print(self.USAGE % usage)
+ sys.exit(2)
+
+ def parseArgs(self, argv):
+ if len(argv) > 1 and argv[1].lower() == 'discover':
+ self._do_discovery(argv[2:])
+ return
+
+ import getopt
+ long_opts = ['help', 'verbose', 'quiet', 'failfast', 'catch', 'buffer']
+ try:
+ options, args = getopt.getopt(argv[1:], 'hHvqfcb', long_opts)
+ for opt, value in options:
+ if opt in ('-h','-H','--help'):
+ self.usageExit()
+ if opt in ('-q','--quiet'):
+ self.verbosity = 0
+ if opt in ('-v','--verbose'):
+ self.verbosity = 2
+ if opt in ('-f','--failfast'):
+ if self.failfast is None:
+ self.failfast = True
+ # Should this raise an exception if -f is not valid?
+ if opt in ('-c','--catch'):
+ if self.catchbreak is None and installHandler is not None:
+ self.catchbreak = True
+ # Should this raise an exception if -c is not valid?
+ if opt in ('-b','--buffer'):
+ if self.buffer is None:
+ self.buffer = True
+ # Should this raise an exception if -b is not valid?
+ if len(args) == 0 and self.defaultTest is None:
+ # createTests will load tests from self.module
+ self.testNames = None
+ elif len(args) > 0:
+ self.testNames = args
+ if __name__ == '__main__':
+ # to support python -m unittest ...
+ self.module = None
+ else:
+ self.testNames = (self.defaultTest,)
+ self.createTests()
+ except getopt.error as msg:
+ self.usageExit(msg)
+
+ def createTests(self):
+ if self.testNames is None:
+ self.test = self.testLoader.loadTestsFromModule(self.module)
+ else:
+ self.test = self.testLoader.loadTestsFromNames(self.testNames,
+ self.module)
+
+ def _do_discovery(self, argv, Loader=loader.TestLoader):
+ # handle command line args for test discovery
+ self.progName = '%s discover' % self.progName
+ import optparse
+ parser = optparse.OptionParser()
+ parser.prog = self.progName
+ parser.add_option('-v', '--verbose', dest='verbose', default=False,
+ help='Verbose output', action='store_true')
+ if self.failfast != False:
+ parser.add_option('-f', '--failfast', dest='failfast', default=False,
+ help='Stop on first fail or error',
+ action='store_true')
+ if self.catchbreak != False and installHandler is not None:
+ parser.add_option('-c', '--catch', dest='catchbreak', default=False,
+ help='Catch ctrl-C and display results so far',
+ action='store_true')
+ if self.buffer != False:
+ parser.add_option('-b', '--buffer', dest='buffer', default=False,
+ help='Buffer stdout and stderr during tests',
+ action='store_true')
+ parser.add_option('-s', '--start-directory', dest='start', default='.',
+ help="Directory to start discovery ('.' default)")
+ parser.add_option('-p', '--pattern', dest='pattern', default='test*.py',
+ help="Pattern to match tests ('test*.py' default)")
+ parser.add_option('-t', '--top-level-directory', dest='top', default=None,
+ help='Top level directory of project (defaults to start directory)')
+
+ options, args = parser.parse_args(argv)
+ if len(args) > 3:
+ self.usageExit()
+
+ for name, value in zip(('start', 'pattern', 'top'), args):
+ setattr(options, name, value)
+
+ # only set options from the parsing here
+ # if they weren't set explicitly in the constructor
+ if self.failfast is None:
+ self.failfast = options.failfast
+ if self.catchbreak is None and installHandler is not None:
+ self.catchbreak = options.catchbreak
+ if self.buffer is None:
+ self.buffer = options.buffer
+
+ if options.verbose:
+ self.verbosity = 2
+
+ start_dir = options.start
+ pattern = options.pattern
+ top_level_dir = options.top
+
+ loader = Loader()
+ self.test = loader.discover(start_dir, pattern, top_level_dir)
+
+ def runTests(self):
+ if self.catchbreak:
+ installHandler()
+ if self.testRunner is None:
+ self.testRunner = runner.TextTestRunner
+ if isinstance(self.testRunner, (type, types.ClassType)):
+ try:
+ testRunner = self.testRunner(verbosity=self.verbosity,
+ failfast=self.failfast,
+ buffer=self.buffer)
+ except TypeError:
+ # didn't accept the verbosity, buffer or failfast arguments
+ testRunner = self.testRunner()
+ else:
+ # it is assumed to be a TestRunner instance
+ testRunner = self.testRunner
+ self.result = testRunner.run(self.test)
+ if self.exit:
+ sys.exit(not self.result.wasSuccessful())
+
+main = TestProgram
+
+def main_():
+ TestProgram.USAGE = USAGE_AS_MAIN
+ main(module=None)
+
diff --git a/lib/python2.7/site-packages/django/utils/unittest/result.py b/lib/python2.7/site-packages/django/utils/unittest/result.py
new file mode 100644
index 0000000..2d2a1ad
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/unittest/result.py
@@ -0,0 +1,183 @@
+"""Test result object"""
+
+import sys
+import traceback
+import unittest
+
+from StringIO import StringIO
+
+from django.utils.unittest import util
+from django.utils.unittest.compatibility import wraps
+
+__unittest = True
+
+def failfast(method):
+ @wraps(method)
+ def inner(self, *args, **kw):
+ if getattr(self, 'failfast', False):
+ self.stop()
+ return method(self, *args, **kw)
+ return inner
+
+
+STDOUT_LINE = '\nStdout:\n%s'
+STDERR_LINE = '\nStderr:\n%s'
+
+class TestResult(unittest.TestResult):
+ """Holder for test result information.
+
+ Test results are automatically managed by the TestCase and TestSuite
+ classes, and do not need to be explicitly manipulated by writers of tests.
+
+ Each instance holds the total number of tests run, and collections of
+ failures and errors that occurred among those test runs. The collections
+ contain tuples of (testcase, exceptioninfo), where exceptioninfo is the
+ formatted traceback of the error that occurred.
+ """
+ _previousTestClass = None
+ _moduleSetUpFailed = False
+
+ def __init__(self):
+ self.failfast = False
+ self.failures = []
+ self.errors = []
+ self.testsRun = 0
+ self.skipped = []
+ self.expectedFailures = []
+ self.unexpectedSuccesses = []
+ self.shouldStop = False
+ self.buffer = False
+ self._stdout_buffer = None
+ self._stderr_buffer = None
+ self._original_stdout = sys.stdout
+ self._original_stderr = sys.stderr
+ self._mirrorOutput = False
+
+ def startTest(self, test):
+ "Called when the given test is about to be run"
+ self.testsRun += 1
+ self._mirrorOutput = False
+ if self.buffer:
+ if self._stderr_buffer is None:
+ self._stderr_buffer = StringIO()
+ self._stdout_buffer = StringIO()
+ sys.stdout = self._stdout_buffer
+ sys.stderr = self._stderr_buffer
+
+ def startTestRun(self):
+ """Called once before any tests are executed.
+
+ See startTest for a method called before each test.
+ """
+
+ def stopTest(self, test):
+ """Called when the given test has been run"""
+ if self.buffer:
+ if self._mirrorOutput:
+ output = sys.stdout.getvalue()
+ error = sys.stderr.getvalue()
+ if output:
+ if not output.endswith('\n'):
+ output += '\n'
+ self._original_stdout.write(STDOUT_LINE % output)
+ if error:
+ if not error.endswith('\n'):
+ error += '\n'
+ self._original_stderr.write(STDERR_LINE % error)
+
+ sys.stdout = self._original_stdout
+ sys.stderr = self._original_stderr
+ self._stdout_buffer.seek(0)
+ self._stdout_buffer.truncate()
+ self._stderr_buffer.seek(0)
+ self._stderr_buffer.truncate()
+ self._mirrorOutput = False
+
+
+ def stopTestRun(self):
+ """Called once after all tests are executed.
+
+ See stopTest for a method called after each test.
+ """
+
+ @failfast
+ def addError(self, test, err):
+ """Called when an error has occurred. 'err' is a tuple of values as
+ returned by sys.exc_info().
+ """
+ self.errors.append((test, self._exc_info_to_string(err, test)))
+ self._mirrorOutput = True
+
+ @failfast
+ def addFailure(self, test, err):
+ """Called when an error has occurred. 'err' is a tuple of values as
+ returned by sys.exc_info()."""
+ self.failures.append((test, self._exc_info_to_string(err, test)))
+ self._mirrorOutput = True
+
+ def addSuccess(self, test):
+ "Called when a test has completed successfully"
+ pass
+
+ def addSkip(self, test, reason):
+ """Called when a test is skipped."""
+ self.skipped.append((test, reason))
+
+ def addExpectedFailure(self, test, err):
+ """Called when an expected failure/error occured."""
+ self.expectedFailures.append(
+ (test, self._exc_info_to_string(err, test)))
+
+ @failfast
+ def addUnexpectedSuccess(self, test):
+ """Called when a test was expected to fail, but succeed."""
+ self.unexpectedSuccesses.append(test)
+
+ def wasSuccessful(self):
+ "Tells whether or not this result was a success"
+ return (len(self.failures) + len(self.errors) == 0)
+
+ def stop(self):
+ "Indicates that the tests should be aborted"
+ self.shouldStop = True
+
+ def _exc_info_to_string(self, err, test):
+ """Converts a sys.exc_info()-style tuple of values into a string."""
+ exctype, value, tb = err
+ # Skip test runner traceback levels
+ while tb and self._is_relevant_tb_level(tb):
+ tb = tb.tb_next
+ if exctype is test.failureException:
+ # Skip assert*() traceback levels
+ length = self._count_relevant_tb_levels(tb)
+ msgLines = traceback.format_exception(exctype, value, tb, length)
+ else:
+ msgLines = traceback.format_exception(exctype, value, tb)
+
+ if self.buffer:
+ output = sys.stdout.getvalue()
+ error = sys.stderr.getvalue()
+ if output:
+ if not output.endswith('\n'):
+ output += '\n'
+ msgLines.append(STDOUT_LINE % output)
+ if error:
+ if not error.endswith('\n'):
+ error += '\n'
+ msgLines.append(STDERR_LINE % error)
+ return ''.join(msgLines)
+
+ def _is_relevant_tb_level(self, tb):
+ return '__unittest' in tb.tb_frame.f_globals
+
+ def _count_relevant_tb_levels(self, tb):
+ length = 0
+ while tb and not self._is_relevant_tb_level(tb):
+ length += 1
+ tb = tb.tb_next
+ return length
+
+ def __repr__(self):
+ return "<%s run=%i errors=%i failures=%i>" % \
+ (util.strclass(self.__class__), self.testsRun, len(self.errors),
+ len(self.failures))
diff --git a/lib/python2.7/site-packages/django/utils/unittest/runner.py b/lib/python2.7/site-packages/django/utils/unittest/runner.py
new file mode 100644
index 0000000..242173e
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/unittest/runner.py
@@ -0,0 +1,206 @@
+"""Running tests"""
+
+import sys
+import time
+import unittest
+
+from django.utils.unittest import result
+
+try:
+ from django.utils.unittest.signals import registerResult
+except ImportError:
+ def registerResult(_):
+ pass
+
+__unittest = True
+
+
+class _WritelnDecorator(object):
+ """Used to decorate file-like objects with a handy 'writeln' method"""
+ def __init__(self,stream):
+ self.stream = stream
+
+ def __getattr__(self, attr):
+ if attr in ('stream', '__getstate__'):
+ raise AttributeError(attr)
+ return getattr(self.stream,attr)
+
+ def writeln(self, arg=None):
+ if arg:
+ self.write(arg)
+ self.write('\n') # text-mode streams translate to \r\n if needed
+
+
+class TextTestResult(result.TestResult):
+ """A test result class that can print formatted text results to a stream.
+
+ Used by TextTestRunner.
+ """
+ separator1 = '=' * 70
+ separator2 = '-' * 70
+
+ def __init__(self, stream, descriptions, verbosity):
+ super(TextTestResult, self).__init__()
+ self.stream = stream
+ self.showAll = verbosity > 1
+ self.dots = verbosity == 1
+ self.descriptions = descriptions
+
+ def getDescription(self, test):
+ doc_first_line = test.shortDescription()
+ if self.descriptions and doc_first_line:
+ return '\n'.join((str(test), doc_first_line))
+ else:
+ return str(test)
+
+ def startTest(self, test):
+ super(TextTestResult, self).startTest(test)
+ if self.showAll:
+ self.stream.write(self.getDescription(test))
+ self.stream.write(" ... ")
+ self.stream.flush()
+
+ def addSuccess(self, test):
+ super(TextTestResult, self).addSuccess(test)
+ if self.showAll:
+ self.stream.writeln("ok")
+ elif self.dots:
+ self.stream.write('.')
+ self.stream.flush()
+
+ def addError(self, test, err):
+ super(TextTestResult, self).addError(test, err)
+ if self.showAll:
+ self.stream.writeln("ERROR")
+ elif self.dots:
+ self.stream.write('E')
+ self.stream.flush()
+
+ def addFailure(self, test, err):
+ super(TextTestResult, self).addFailure(test, err)
+ if self.showAll:
+ self.stream.writeln("FAIL")
+ elif self.dots:
+ self.stream.write('F')
+ self.stream.flush()
+
+ def addSkip(self, test, reason):
+ super(TextTestResult, self).addSkip(test, reason)
+ if self.showAll:
+ self.stream.writeln("skipped %r" % (reason,))
+ elif self.dots:
+ self.stream.write("s")
+ self.stream.flush()
+
+ def addExpectedFailure(self, test, err):
+ super(TextTestResult, self).addExpectedFailure(test, err)
+ if self.showAll:
+ self.stream.writeln("expected failure")
+ elif self.dots:
+ self.stream.write("x")
+ self.stream.flush()
+
+ def addUnexpectedSuccess(self, test):
+ super(TextTestResult, self).addUnexpectedSuccess(test)
+ if self.showAll:
+ self.stream.writeln("unexpected success")
+ elif self.dots:
+ self.stream.write("u")
+ self.stream.flush()
+
+ def printErrors(self):
+ if self.dots or self.showAll:
+ self.stream.writeln()
+ self.printErrorList('ERROR', self.errors)
+ self.printErrorList('FAIL', self.failures)
+
+ def printErrorList(self, flavour, errors):
+ for test, err in errors:
+ self.stream.writeln(self.separator1)
+ self.stream.writeln("%s: %s" % (flavour, self.getDescription(test)))
+ self.stream.writeln(self.separator2)
+ self.stream.writeln("%s" % err)
+
+ def stopTestRun(self):
+ super(TextTestResult, self).stopTestRun()
+ self.printErrors()
+
+
+class TextTestRunner(unittest.TextTestRunner):
+ """A test runner class that displays results in textual form.
+
+ It prints out the names of tests as they are run, errors as they
+ occur, and a summary of the results at the end of the test run.
+ """
+ resultclass = TextTestResult
+
+ def __init__(self, stream=sys.stderr, descriptions=True, verbosity=1,
+ failfast=False, buffer=False, resultclass=None):
+ self.stream = _WritelnDecorator(stream)
+ self.descriptions = descriptions
+ self.verbosity = verbosity
+ self.failfast = failfast
+ self.buffer = buffer
+ if resultclass is not None:
+ self.resultclass = resultclass
+
+ def _makeResult(self):
+ return self.resultclass(self.stream, self.descriptions, self.verbosity)
+
+ def run(self, test):
+ "Run the given test case or test suite."
+ result = self._makeResult()
+ result.failfast = self.failfast
+ result.buffer = self.buffer
+ registerResult(result)
+
+ startTime = time.time()
+ startTestRun = getattr(result, 'startTestRun', None)
+ if startTestRun is not None:
+ startTestRun()
+ try:
+ test(result)
+ finally:
+ stopTestRun = getattr(result, 'stopTestRun', None)
+ if stopTestRun is not None:
+ stopTestRun()
+ else:
+ result.printErrors()
+ stopTime = time.time()
+ timeTaken = stopTime - startTime
+ if hasattr(result, 'separator2'):
+ self.stream.writeln(result.separator2)
+ run = result.testsRun
+ self.stream.writeln("Ran %d test%s in %.3fs" %
+ (run, run != 1 and "s" or "", timeTaken))
+ self.stream.writeln()
+
+ expectedFails = unexpectedSuccesses = skipped = 0
+ try:
+ results = map(len, (result.expectedFailures,
+ result.unexpectedSuccesses,
+ result.skipped))
+ expectedFails, unexpectedSuccesses, skipped = results
+ except AttributeError:
+ pass
+ infos = []
+ if not result.wasSuccessful():
+ self.stream.write("FAILED")
+ failed, errored = map(len, (result.failures, result.errors))
+ if failed:
+ infos.append("failures=%d" % failed)
+ if errored:
+ infos.append("errors=%d" % errored)
+ else:
+ self.stream.write("OK")
+ if skipped:
+ infos.append("skipped=%d" % skipped)
+ if expectedFails:
+ infos.append("expected failures=%d" % expectedFails)
+ if unexpectedSuccesses:
+ infos.append("unexpected successes=%d" % unexpectedSuccesses)
+ if infos:
+ self.stream.writeln(" (%s)" % (", ".join(infos),))
+ else:
+ self.stream.write("\n")
+ return result
diff --git a/lib/python2.7/site-packages/django/utils/unittest/signals.py b/lib/python2.7/site-packages/django/utils/unittest/signals.py
new file mode 100644
index 0000000..f1731ea
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/unittest/signals.py
@@ -0,0 +1,57 @@
+import signal
+import weakref
+
+from django.utils.unittest.compatibility import wraps
+
+__unittest = True
+
+
+class _InterruptHandler(object):
+ def __init__(self, default_handler):
+ self.called = False
+ self.default_handler = default_handler
+
+ def __call__(self, signum, frame):
+ installed_handler = signal.getsignal(signal.SIGINT)
+ if installed_handler is not self:
+ # if we aren't the installed handler, then delegate immediately
+ # to the default handler
+ self.default_handler(signum, frame)
+
+ if self.called:
+ self.default_handler(signum, frame)
+ self.called = True
+ for result in _results.keys():
+ result.stop()
+
+_results = weakref.WeakKeyDictionary()
+def registerResult(result):
+ _results[result] = 1
+
+def removeResult(result):
+ return bool(_results.pop(result, None))
+
+_interrupt_handler = None
+def installHandler():
+ global _interrupt_handler
+ if _interrupt_handler is None:
+ default_handler = signal.getsignal(signal.SIGINT)
+ _interrupt_handler = _InterruptHandler(default_handler)
+ signal.signal(signal.SIGINT, _interrupt_handler)
+
+
+def removeHandler(method=None):
+ if method is not None:
+ @wraps(method)
+ def inner(*args, **kwargs):
+ initial = signal.getsignal(signal.SIGINT)
+ removeHandler()
+ try:
+ return method(*args, **kwargs)
+ finally:
+ signal.signal(signal.SIGINT, initial)
+ return inner
+
+ global _interrupt_handler
+ if _interrupt_handler is not None:
+ signal.signal(signal.SIGINT, _interrupt_handler.default_handler)
diff --git a/lib/python2.7/site-packages/django/utils/unittest/suite.py b/lib/python2.7/site-packages/django/utils/unittest/suite.py
new file mode 100644
index 0000000..da8ac2e
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/unittest/suite.py
@@ -0,0 +1,287 @@
+"""TestSuite"""
+
+import sys
+import unittest
+from django.utils.unittest import case, util
+
+__unittest = True
+
+
+class BaseTestSuite(unittest.TestSuite):
+ """A simple test suite that doesn't provide class or module shared fixtures.
+ """
+ def __init__(self, tests=()):
+ self._tests = []
+ self.addTests(tests)
+
+ def __repr__(self):
+ return "<%s tests=%s>" % (util.strclass(self.__class__), list(self))
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return list(self) == list(other)
+
+ def __ne__(self, other):
+ return not self == other
+
+ # Can't guarantee hash invariant, so flag as unhashable
+ __hash__ = None
+
+ def __iter__(self):
+ return iter(self._tests)
+
+ def countTestCases(self):
+ cases = 0
+ for test in self:
+ cases += test.countTestCases()
+ return cases
+
+ def addTest(self, test):
+ # sanity checks
+ if not hasattr(test, '__call__'):
+ raise TypeError("%r is not callable" % (repr(test),))
+ if isinstance(test, type) and issubclass(test,
+ (case.TestCase, TestSuite)):
+ raise TypeError("TestCases and TestSuites must be instantiated "
+ "before passing them to addTest()")
+ self._tests.append(test)
+
+ def addTests(self, tests):
+ if isinstance(tests, basestring):
+ raise TypeError("tests must be an iterable of tests, not a string")
+ for test in tests:
+ self.addTest(test)
+
+ def run(self, result):
+ for test in self:
+ if result.shouldStop:
+ break
+ test(result)
+ return result
+
+ def __call__(self, *args, **kwds):
+ return self.run(*args, **kwds)
+
+ def debug(self):
+ """Run the tests without collecting errors in a TestResult"""
+ for test in self:
+ test.debug()
+
+
+class TestSuite(BaseTestSuite):
+ """A test suite is a composite test consisting of a number of TestCases.
+
+ For use, create an instance of TestSuite, then add test case instances.
+ When all tests have been added, the suite can be passed to a test
+ runner, such as TextTestRunner. It will run the individual test cases
+ in the order in which they were added, aggregating the results. When
+ subclassing, do not forget to call the base class constructor.
+ """
+
+
+ def run(self, result):
+ self._wrapped_run(result)
+ self._tearDownPreviousClass(None, result)
+ self._handleModuleTearDown(result)
+ return result
+
+ def debug(self):
+ """Run the tests without collecting errors in a TestResult"""
+ debug = _DebugResult()
+ self._wrapped_run(debug, True)
+ self._tearDownPreviousClass(None, debug)
+ self._handleModuleTearDown(debug)
+
+ ################################
+ # private methods
+ def _wrapped_run(self, result, debug=False):
+ for test in self:
+ if result.shouldStop:
+ break
+
+ if _isnotsuite(test):
+ self._tearDownPreviousClass(test, result)
+ self._handleModuleFixture(test, result)
+ self._handleClassSetUp(test, result)
+ result._previousTestClass = test.__class__
+
+ if (getattr(test.__class__, '_classSetupFailed', False) or
+ getattr(result, '_moduleSetUpFailed', False)):
+ continue
+
+ if hasattr(test, '_wrapped_run'):
+ test._wrapped_run(result, debug)
+ elif not debug:
+ test(result)
+ else:
+ test.debug()
+
+ def _handleClassSetUp(self, test, result):
+ previousClass = getattr(result, '_previousTestClass', None)
+ currentClass = test.__class__
+ if currentClass == previousClass:
+ return
+ if result._moduleSetUpFailed:
+ return
+ if getattr(currentClass, "__unittest_skip__", False):
+ return
+
+ try:
+ currentClass._classSetupFailed = False
+ except TypeError:
+ # test may actually be a function
+ # so its class will be a builtin-type
+ pass
+
+ setUpClass = getattr(currentClass, 'setUpClass', None)
+ if setUpClass is not None:
+ try:
+ setUpClass()
+ except Exception as e:
+ if isinstance(result, _DebugResult):
+ raise
+ currentClass._classSetupFailed = True
+ className = util.strclass(currentClass)
+ errorName = 'setUpClass (%s)' % className
+ self._addClassOrModuleLevelException(result, e, errorName)
+
+ def _get_previous_module(self, result):
+ previousModule = None
+ previousClass = getattr(result, '_previousTestClass', None)
+ if previousClass is not None:
+ previousModule = previousClass.__module__
+ return previousModule
+
+
+ def _handleModuleFixture(self, test, result):
+ previousModule = self._get_previous_module(result)
+ currentModule = test.__class__.__module__
+ if currentModule == previousModule:
+ return
+
+ self._handleModuleTearDown(result)
+
+
+ result._moduleSetUpFailed = False
+ try:
+ module = sys.modules[currentModule]
+ except KeyError:
+ return
+ setUpModule = getattr(module, 'setUpModule', None)
+ if setUpModule is not None:
+ try:
+ setUpModule()
+ except Exception as e:
+ if isinstance(result, _DebugResult):
+ raise
+ result._moduleSetUpFailed = True
+ errorName = 'setUpModule (%s)' % currentModule
+ self._addClassOrModuleLevelException(result, e, errorName)
+
+ def _addClassOrModuleLevelException(self, result, exception, errorName):
+ error = _ErrorHolder(errorName)
+ addSkip = getattr(result, 'addSkip', None)
+ if addSkip is not None and isinstance(exception, case.SkipTest):
+ addSkip(error, str(exception))
+ else:
+ result.addError(error, sys.exc_info())
+
+ def _handleModuleTearDown(self, result):
+ previousModule = self._get_previous_module(result)
+ if previousModule is None:
+ return
+ if result._moduleSetUpFailed:
+ return
+
+ try:
+ module = sys.modules[previousModule]
+ except KeyError:
+ return
+
+ tearDownModule = getattr(module, 'tearDownModule', None)
+ if tearDownModule is not None:
+ try:
+ tearDownModule()
+ except Exception as e:
+ if isinstance(result, _DebugResult):
+ raise
+ errorName = 'tearDownModule (%s)' % previousModule
+ self._addClassOrModuleLevelException(result, e, errorName)
+
+ def _tearDownPreviousClass(self, test, result):
+ previousClass = getattr(result, '_previousTestClass', None)
+ currentClass = test.__class__
+ if currentClass == previousClass:
+ return
+ if getattr(previousClass, '_classSetupFailed', False):
+ return
+ if getattr(result, '_moduleSetUpFailed', False):
+ return
+ if getattr(previousClass, "__unittest_skip__", False):
+ return
+
+ tearDownClass = getattr(previousClass, 'tearDownClass', None)
+ if tearDownClass is not None:
+ try:
+ tearDownClass()
+ except Exception as e:
+ if isinstance(result, _DebugResult):
+ raise
+ className = util.strclass(previousClass)
+ errorName = 'tearDownClass (%s)' % className
+ self._addClassOrModuleLevelException(result, e, errorName)
+
+
+class _ErrorHolder(object):
+ """
+ Placeholder for a TestCase inside a result. As far as a TestResult
+ is concerned, this looks exactly like a unit test. Used to insert
+ arbitrary errors into a test suite run.
+ """
+ # Inspired by the ErrorHolder from Twisted:
+ # http://twistedmatrix.com/trac/browser/trunk/twisted/trial/runner.py
+
+ # attribute used by TestResult._exc_info_to_string
+ failureException = None
+
+ def __init__(self, description):
+ self.description = description
+
+ def id(self):
+ return self.description
+
+ def shortDescription(self):
+ return None
+
+ def __repr__(self):
+ return "<ErrorHolder description=%r>" % (self.description,)
+
+ def __str__(self):
+ return self.id()
+
+ def run(self, result):
+ # could call result.addError(...) - but this test-like object
+ # shouldn't be run anyway
+ pass
+
+ def __call__(self, result):
+ return self.run(result)
+
+ def countTestCases(self):
+ return 0
+
+def _isnotsuite(test):
+ "A crude way to tell apart testcases and suites with duck-typing"
+ try:
+ iter(test)
+ except TypeError:
+ return True
+ return False
+
+
+class _DebugResult(object):
+ "Used by the TestSuite to hold previous class when running in debug."
+ _previousTestClass = None
+ _moduleSetUpFailed = False
+ shouldStop = False
diff --git a/lib/python2.7/site-packages/django/utils/unittest/util.py b/lib/python2.7/site-packages/django/utils/unittest/util.py
new file mode 100644
index 0000000..c45d008
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/unittest/util.py
@@ -0,0 +1,99 @@
+"""Various utility functions."""
+
+__unittest = True
+
+
+_MAX_LENGTH = 80
+def safe_repr(obj, short=False):
+ try:
+ result = repr(obj)
+ except Exception:
+ result = object.__repr__(obj)
+ if not short or len(result) < _MAX_LENGTH:
+ return result
+ return result[:_MAX_LENGTH] + ' [truncated]...'
+
+def safe_str(obj):
+ try:
+ return str(obj)
+ except Exception:
+ return object.__str__(obj)
+
+def strclass(cls):
+ return "%s.%s" % (cls.__module__, cls.__name__)
+
+def sorted_list_difference(expected, actual):
+ """Finds elements in only one or the other of two, sorted input lists.
+
+ Returns a two-element tuple of lists. The first list contains those
+ elements in the "expected" list but not in the "actual" list, and the
+ second contains those elements in the "actual" list but not in the
+ "expected" list. Duplicate elements in either input list are ignored.
+ """
+ i = j = 0
+ missing = []
+ unexpected = []
+ while True:
+ try:
+ e = expected[i]
+ a = actual[j]
+ if e < a:
+ missing.append(e)
+ i += 1
+ while expected[i] == e:
+ i += 1
+ elif e > a:
+ unexpected.append(a)
+ j += 1
+ while actual[j] == a:
+ j += 1
+ else:
+ i += 1
+ try:
+ while expected[i] == e:
+ i += 1
+ finally:
+ j += 1
+ while actual[j] == a:
+ j += 1
+ except IndexError:
+ missing.extend(expected[i:])
+ unexpected.extend(actual[j:])
+ break
+ return missing, unexpected
+
+def unorderable_list_difference(expected, actual, ignore_duplicate=False):
+ """Same behavior as sorted_list_difference but
+ for lists of unorderable items (like dicts).
+
+ As it does a linear search per item (remove) it
+ has O(n*n) performance.
+ """
+ missing = []
+ unexpected = []
+ while expected:
+ item = expected.pop()
+ try:
+ actual.remove(item)
+ except ValueError:
+ missing.append(item)
+ if ignore_duplicate:
+ for lst in expected, actual:
+ try:
+ while True:
+ lst.remove(item)
+ except ValueError:
+ pass
+ if ignore_duplicate:
+ while actual:
+ item = actual.pop()
+ unexpected.append(item)
+ try:
+ while True:
+ actual.remove(item)
+ except ValueError:
+ pass
+ return missing, unexpected
+
+ # anything left in actual is unexpected
+ return missing, actual
diff --git a/lib/python2.7/site-packages/django/utils/version.py b/lib/python2.7/site-packages/django/utils/version.py
new file mode 100644
index 0000000..e0a8286
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/version.py
@@ -0,0 +1,51 @@
+from __future__ import unicode_literals
+
+import datetime
+import os
+import subprocess
+
+def get_version(version=None):
+ "Returns a PEP 386-compliant version number from VERSION."
+ if version is None:
+ from django import VERSION as version
+ else:
+ assert len(version) == 5
+ assert version[3] in ('alpha', 'beta', 'rc', 'final')
+
+ # Now build the two parts of the version number:
+ # main = X.Y[.Z]
+ # sub = .devN - for pre-alpha releases
+ # | {a|b|c}N - for alpha, beta and rc releases
+
+ parts = 2 if version[2] == 0 else 3
+ main = '.'.join(str(x) for x in version[:parts])
+
+ sub = ''
+ if version[3] == 'alpha' and version[4] == 0:
+ git_changeset = get_git_changeset()
+ if git_changeset:
+ sub = '.dev%s' % git_changeset
+
+ elif version[3] != 'final':
+ mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'}
+ sub = mapping[version[3]] + str(version[4])
+
+ return str(main + sub)
+
+def get_git_changeset():
+ """Returns a numeric identifier of the latest git changeset.
+
+ The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.
+ This value isn't guaranteed to be unique, but collisions are very unlikely,
+ so it's sufficient for generating the development version numbers.
+ """
+ repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+ git_log = subprocess.Popen('git log --pretty=format:%ct --quiet -1 HEAD',
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+ shell=True, cwd=repo_dir, universal_newlines=True)
+ timestamp = git_log.communicate()[0]
+ try:
+ timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))
+ except ValueError:
+ return None
+ return timestamp.strftime('%Y%m%d%H%M%S')
diff --git a/lib/python2.7/site-packages/django/utils/xmlutils.py b/lib/python2.7/site-packages/django/utils/xmlutils.py
new file mode 100644
index 0000000..a1eb5fb
--- /dev/null
+++ b/lib/python2.7/site-packages/django/utils/xmlutils.py
@@ -0,0 +1,14 @@
+"""
+Utilities for XML generation/parsing.
+"""
+
+from xml.sax.saxutils import XMLGenerator
+
+class SimplerXMLGenerator(XMLGenerator):
+ def addQuickElement(self, name, contents=None, attrs=None):
+ "Convenience method for adding an element with no children"
+ if attrs is None: attrs = {}
+ self.startElement(name, attrs)
+ if contents is not None:
+ self.characters(contents)
+ self.endElement(name)