summaryrefslogtreecommitdiff
path: root/lib/python2.7/site-packages/django/db/backends
diff options
context:
space:
mode:
authorttt2017-05-13 00:29:47 +0530
committerttt2017-05-13 00:29:47 +0530
commitabf599be33b383a6a5baf9493093b2126a622ac8 (patch)
tree4c5ab6e0d935d5e65fabcf0258e4a00dd20a5afa /lib/python2.7/site-packages/django/db/backends
downloadSBHS-2018-Rpi-abf599be33b383a6a5baf9493093b2126a622ac8.tar.gz
SBHS-2018-Rpi-abf599be33b383a6a5baf9493093b2126a622ac8.tar.bz2
SBHS-2018-Rpi-abf599be33b383a6a5baf9493093b2126a622ac8.zip
added all server files
Diffstat (limited to 'lib/python2.7/site-packages/django/db/backends')
-rw-r--r--lib/python2.7/site-packages/django/db/backends/__init__.py1356
-rw-r--r--lib/python2.7/site-packages/django/db/backends/creation.py489
-rw-r--r--lib/python2.7/site-packages/django/db/backends/dummy/__init__.py0
-rw-r--r--lib/python2.7/site-packages/django/db/backends/dummy/base.py73
-rw-r--r--lib/python2.7/site-packages/django/db/backends/mysql/__init__.py0
-rw-r--r--lib/python2.7/site-packages/django/db/backends/mysql/base.py533
-rw-r--r--lib/python2.7/site-packages/django/db/backends/mysql/client.py40
-rw-r--r--lib/python2.7/site-packages/django/db/backends/mysql/compiler.py37
-rw-r--r--lib/python2.7/site-packages/django/db/backends/mysql/creation.py70
-rw-r--r--lib/python2.7/site-packages/django/db/backends/mysql/introspection.py119
-rw-r--r--lib/python2.7/site-packages/django/db/backends/mysql/validation.py16
-rw-r--r--lib/python2.7/site-packages/django/db/backends/oracle/__init__.py0
-rw-r--r--lib/python2.7/site-packages/django/db/backends/oracle/base.py961
-rw-r--r--lib/python2.7/site-packages/django/db/backends/oracle/client.py16
-rw-r--r--lib/python2.7/site-packages/django/db/backends/oracle/compiler.py72
-rw-r--r--lib/python2.7/site-packages/django/db/backends/oracle/creation.py277
-rw-r--r--lib/python2.7/site-packages/django/db/backends/oracle/introspection.py138
-rw-r--r--lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/__init__.py0
-rw-r--r--lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/base.py184
-rw-r--r--lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/client.py23
-rw-r--r--lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/creation.py77
-rw-r--r--lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/introspection.py111
-rw-r--r--lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/operations.py222
-rw-r--r--lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/version.py43
-rw-r--r--lib/python2.7/site-packages/django/db/backends/signals.py3
-rw-r--r--lib/python2.7/site-packages/django/db/backends/sqlite3/__init__.py0
-rw-r--r--lib/python2.7/site-packages/django/db/backends/sqlite3/base.py533
-rw-r--r--lib/python2.7/site-packages/django/db/backends/sqlite3/client.py16
-rw-r--r--lib/python2.7/site-packages/django/db/backends/sqlite3/creation.py88
-rw-r--r--lib/python2.7/site-packages/django/db/backends/sqlite3/introspection.py185
-rw-r--r--lib/python2.7/site-packages/django/db/backends/util.py179
31 files changed, 5861 insertions, 0 deletions
diff --git a/lib/python2.7/site-packages/django/db/backends/__init__.py b/lib/python2.7/site-packages/django/db/backends/__init__.py
new file mode 100644
index 0000000..12f08a2
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/__init__.py
@@ -0,0 +1,1356 @@
+import datetime
+import time
+
+from django.db.utils import DatabaseError
+
+try:
+ from django.utils.six.moves import _thread as thread
+except ImportError:
+ from django.utils.six.moves import _dummy_thread as thread
+from collections import namedtuple
+from contextlib import contextmanager
+
+from django.conf import settings
+from django.db import DEFAULT_DB_ALIAS
+from django.db.backends.signals import connection_created
+from django.db.backends import util
+from django.db.transaction import TransactionManagementError
+from django.db.utils import DatabaseErrorWrapper
+from django.utils.functional import cached_property
+from django.utils.importlib import import_module
+from django.utils import six
+from django.utils import timezone
+
+
+class BaseDatabaseWrapper(object):
+ """
+ Represents a database connection.
+ """
+ ops = None
+ vendor = 'unknown'
+
+ def __init__(self, settings_dict, alias=DEFAULT_DB_ALIAS,
+ allow_thread_sharing=False):
+ # `settings_dict` should be a dictionary containing keys such as
+ # NAME, USER, etc. It's called `settings_dict` instead of `settings`
+ # to disambiguate it from Django settings modules.
+ self.connection = None
+ self.queries = []
+ self.settings_dict = settings_dict
+ self.alias = alias
+ self.use_debug_cursor = None
+
+ # Savepoint management related attributes
+ self.savepoint_state = 0
+
+ # Transaction management related attributes
+ self.autocommit = False
+ self.transaction_state = []
+ # Tracks if the connection is believed to be in transaction. This is
+ # set somewhat aggressively, as the DBAPI doesn't make it easy to
+ # deduce if the connection is in transaction or not.
+ self._dirty = False
+ # Tracks if the connection is in a transaction managed by 'atomic'.
+ self.in_atomic_block = False
+ # List of savepoints created by 'atomic'
+ self.savepoint_ids = []
+ # Tracks if the outermost 'atomic' block should commit on exit,
+ # ie. if autocommit was active on entry.
+ self.commit_on_exit = True
+ # Tracks if the transaction should be rolled back to the next
+ # available savepoint because of an exception in an inner block.
+ self.needs_rollback = False
+
+ # Connection termination related attributes
+ self.close_at = None
+ self.closed_in_transaction = False
+ self.errors_occurred = False
+
+ # Thread-safety related attributes
+ self.allow_thread_sharing = allow_thread_sharing
+ self._thread_ident = thread.get_ident()
+
+ def __eq__(self, other):
+ return self.alias == other.alias
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __hash__(self):
+ return hash(self.alias)
+
+ ##### Backend-specific methods for creating connections and cursors #####
+
+ def get_connection_params(self):
+ """Returns a dict of parameters suitable for get_new_connection."""
+ raise NotImplementedError
+
+ def get_new_connection(self, conn_params):
+ """Opens a connection to the database."""
+ raise NotImplementedError
+
+ def init_connection_state(self):
+ """Initializes the database connection settings."""
+ raise NotImplementedError
+
+ def create_cursor(self):
+ """Creates a cursor. Assumes that a connection is established."""
+ raise NotImplementedError
+
+ ##### Backend-specific methods for creating connections #####
+
+ def connect(self):
+ """Connects to the database. Assumes that the connection is closed."""
+ # In case the previous connection was closed while in an atomic block
+ self.in_atomic_block = False
+ self.savepoint_ids = []
+ self.needs_rollback = False
+ # Reset parameters defining when to close the connection
+ max_age = self.settings_dict['CONN_MAX_AGE']
+ self.close_at = None if max_age is None else time.time() + max_age
+ self.closed_in_transaction = False
+ self.errors_occurred = False
+ # Establish the connection
+ conn_params = self.get_connection_params()
+ self.connection = self.get_new_connection(conn_params)
+ self.init_connection_state()
+ if self.settings_dict['AUTOCOMMIT']:
+ self.set_autocommit(True)
+ connection_created.send(sender=self.__class__, connection=self)
+
+ def ensure_connection(self):
+ """
+ Guarantees that a connection to the database is established.
+ """
+ if self.connection is None:
+ with self.wrap_database_errors:
+ self.connect()
+
+ ##### Backend-specific wrappers for PEP-249 connection methods #####
+
+ def _cursor(self):
+ self.ensure_connection()
+ with self.wrap_database_errors:
+ return self.create_cursor()
+
+ def _commit(self):
+ if self.connection is not None:
+ with self.wrap_database_errors:
+ return self.connection.commit()
+
+ def _rollback(self):
+ if self.connection is not None:
+ with self.wrap_database_errors:
+ return self.connection.rollback()
+
+ def _close(self):
+ if self.connection is not None:
+ with self.wrap_database_errors:
+ return self.connection.close()
+
+ ##### Generic wrappers for PEP-249 connection methods #####
+
+ def cursor(self):
+ """
+ Creates a cursor, opening a connection if necessary.
+ """
+ self.validate_thread_sharing()
+ if (self.use_debug_cursor or
+ (self.use_debug_cursor is None and settings.DEBUG)):
+ cursor = self.make_debug_cursor(self._cursor())
+ else:
+ cursor = util.CursorWrapper(self._cursor(), self)
+ return cursor
+
+ def commit(self):
+ """
+ Commits a transaction and resets the dirty flag.
+ """
+ self.validate_thread_sharing()
+ self.validate_no_atomic_block()
+ self._commit()
+ self.set_clean()
+
+ def rollback(self):
+ """
+ Rolls back a transaction and resets the dirty flag.
+ """
+ self.validate_thread_sharing()
+ self.validate_no_atomic_block()
+ self._rollback()
+ self.set_clean()
+
+ def close(self):
+ """
+ Closes the connection to the database.
+ """
+ self.validate_thread_sharing()
+ # Don't call validate_no_atomic_block() to avoid making it difficult
+ # to get rid of a connection in an invalid state. The next connect()
+ # will reset the transaction state anyway.
+ try:
+ self._close()
+ finally:
+ if self.in_atomic_block:
+ self.closed_in_transaction = True
+ self.needs_rollback = True
+ else:
+ self.connection = None
+ self.set_clean()
+
+ ##### Backend-specific savepoint management methods #####
+
+ def _savepoint(self, sid):
+ self.cursor().execute(self.ops.savepoint_create_sql(sid))
+
+ def _savepoint_rollback(self, sid):
+ self.cursor().execute(self.ops.savepoint_rollback_sql(sid))
+
+ def _savepoint_commit(self, sid):
+ self.cursor().execute(self.ops.savepoint_commit_sql(sid))
+
+ def _savepoint_allowed(self):
+ # Savepoints cannot be created outside a transaction
+ return self.features.uses_savepoints and not self.get_autocommit()
+
+ ##### Generic savepoint management methods #####
+
+ def savepoint(self):
+ """
+ Creates a savepoint inside the current transaction. Returns an
+ identifier for the savepoint that will be used for the subsequent
+ rollback or commit. Does nothing if savepoints are not supported.
+ """
+ if not self._savepoint_allowed():
+ return
+
+ thread_ident = thread.get_ident()
+ tid = str(thread_ident).replace('-', '')
+
+ self.savepoint_state += 1
+ sid = "s%s_x%d" % (tid, self.savepoint_state)
+
+ self.validate_thread_sharing()
+ self._savepoint(sid)
+
+ return sid
+
+ def savepoint_rollback(self, sid):
+ """
+ Rolls back to a savepoint. Does nothing if savepoints are not supported.
+ """
+ if not self._savepoint_allowed():
+ return
+
+ self.validate_thread_sharing()
+ self._savepoint_rollback(sid)
+
+ def savepoint_commit(self, sid):
+ """
+ Releases a savepoint. Does nothing if savepoints are not supported.
+ """
+ if not self._savepoint_allowed():
+ return
+
+ self.validate_thread_sharing()
+ self._savepoint_commit(sid)
+
+ def clean_savepoints(self):
+ """
+ Resets the counter used to generate unique savepoint ids in this thread.
+ """
+ self.savepoint_state = 0
+
+ ##### Backend-specific transaction management methods #####
+
+ def _set_autocommit(self, autocommit):
+ """
+ Backend-specific implementation to enable or disable autocommit.
+ """
+ raise NotImplementedError
+
+ ##### Generic transaction management methods #####
+
+ def enter_transaction_management(self, managed=True, forced=False):
+ """
+ Enters transaction management for a running thread. It must be balanced with
+ the appropriate leave_transaction_management call, since the actual state is
+ managed as a stack.
+
+ The state and dirty flag are carried over from the surrounding block or
+ from the settings, if there is no surrounding block (dirty is always false
+ when no current block is running).
+
+ If you switch off transaction management and there is a pending
+ commit/rollback, the data will be commited, unless "forced" is True.
+ """
+ self.validate_no_atomic_block()
+
+ self.transaction_state.append(managed)
+
+ if not managed and self.is_dirty() and not forced:
+ self.commit()
+ self.set_clean()
+
+ if managed == self.get_autocommit():
+ self.set_autocommit(not managed)
+
+ def leave_transaction_management(self):
+ """
+ Leaves transaction management for a running thread. A dirty flag is carried
+ over to the surrounding block, as a commit will commit all changes, even
+ those from outside. (Commits are on connection level.)
+ """
+ self.validate_no_atomic_block()
+
+ if self.transaction_state:
+ del self.transaction_state[-1]
+ else:
+ raise TransactionManagementError(
+ "This code isn't under transaction management")
+
+ if self.transaction_state:
+ managed = self.transaction_state[-1]
+ else:
+ managed = not self.settings_dict['AUTOCOMMIT']
+
+ if self._dirty:
+ self.rollback()
+ if managed == self.get_autocommit():
+ self.set_autocommit(not managed)
+ raise TransactionManagementError(
+ "Transaction managed block ended with pending COMMIT/ROLLBACK")
+
+ if managed == self.get_autocommit():
+ self.set_autocommit(not managed)
+
+ def get_autocommit(self):
+ """
+ Check the autocommit state.
+ """
+ self.ensure_connection()
+ return self.autocommit
+
+ def set_autocommit(self, autocommit):
+ """
+ Enable or disable autocommit.
+ """
+ self.validate_no_atomic_block()
+ self.ensure_connection()
+ self._set_autocommit(autocommit)
+ self.autocommit = autocommit
+
+ def get_rollback(self):
+ """
+ Get the "needs rollback" flag -- for *advanced use* only.
+ """
+ if not self.in_atomic_block:
+ raise TransactionManagementError(
+ "The rollback flag doesn't work outside of an 'atomic' block.")
+ return self.needs_rollback
+
+ def set_rollback(self, rollback):
+ """
+ Set or unset the "needs rollback" flag -- for *advanced use* only.
+ """
+ if not self.in_atomic_block:
+ raise TransactionManagementError(
+ "The rollback flag doesn't work outside of an 'atomic' block.")
+ self.needs_rollback = rollback
+
+ def validate_no_atomic_block(self):
+ """
+ Raise an error if an atomic block is active.
+ """
+ if self.in_atomic_block:
+ raise TransactionManagementError(
+ "This is forbidden when an 'atomic' block is active.")
+
+ def validate_no_broken_transaction(self):
+ if self.needs_rollback:
+ raise TransactionManagementError(
+ "An error occurred in the current transaction. You can't "
+ "execute queries until the end of the 'atomic' block.")
+
+ def abort(self):
+ """
+ Roll back any ongoing transaction and clean the transaction state
+ stack.
+ """
+ if self._dirty:
+ self.rollback()
+ while self.transaction_state:
+ self.leave_transaction_management()
+
+ def is_dirty(self):
+ """
+ Returns True if the current transaction requires a commit for changes to
+ happen.
+ """
+ return self._dirty
+
+ def set_dirty(self):
+ """
+ Sets a dirty flag for the current thread and code streak. This can be used
+ to decide in a managed block of code to decide whether there are open
+ changes waiting for commit.
+ """
+ if not self.get_autocommit():
+ self._dirty = True
+
+ def set_clean(self):
+ """
+ Resets a dirty flag for the current thread and code streak. This can be used
+ to decide in a managed block of code to decide whether a commit or rollback
+ should happen.
+ """
+ self._dirty = False
+ self.clean_savepoints()
+
+ ##### Foreign key constraints checks handling #####
+
+ @contextmanager
+ def constraint_checks_disabled(self):
+ """
+ Context manager that disables foreign key constraint checking.
+ """
+ disabled = self.disable_constraint_checking()
+ try:
+ yield
+ finally:
+ if disabled:
+ self.enable_constraint_checking()
+
+ def disable_constraint_checking(self):
+ """
+ Backends can implement as needed to temporarily disable foreign key
+ constraint checking. Should return True if the constraints were
+ disabled and will need to be reenabled.
+ """
+ return False
+
+ def enable_constraint_checking(self):
+ """
+ Backends can implement as needed to re-enable foreign key constraint
+ checking.
+ """
+ pass
+
+ def check_constraints(self, table_names=None):
+ """
+ Backends can override this method if they can apply constraint
+ checking (e.g. via "SET CONSTRAINTS ALL IMMEDIATE"). Should raise an
+ IntegrityError if any invalid foreign key references are encountered.
+ """
+ pass
+
+ ##### Connection termination handling #####
+
+ def is_usable(self):
+ """
+ Tests if the database connection is usable.
+
+ This function may assume that self.connection is not None.
+
+ Actual implementations should take care not to raise exceptions
+ as that may prevent Django from recycling unusable connections.
+ """
+ raise NotImplementedError(
+ "subclasses of BaseDatabaseWrapper may require an is_usable() method")
+
+ def close_if_unusable_or_obsolete(self):
+ """
+ Closes the current connection if unrecoverable errors have occurred,
+ or if it outlived its maximum age.
+ """
+ if self.connection is not None:
+ # If the application didn't restore the original autocommit setting,
+ # don't take chances, drop the connection.
+ if self.get_autocommit() != self.settings_dict['AUTOCOMMIT']:
+ self.close()
+ return
+
+ if self.errors_occurred:
+ if self.is_usable():
+ self.errors_occurred = False
+ else:
+ self.close()
+ return
+
+ if self.close_at is not None and time.time() >= self.close_at:
+ self.close()
+ return
+
+ ##### Thread safety handling #####
+
+ def validate_thread_sharing(self):
+ """
+ Validates that the connection isn't accessed by another thread than the
+ one which originally created it, unless the connection was explicitly
+ authorized to be shared between threads (via the `allow_thread_sharing`
+ property). Raises an exception if the validation fails.
+ """
+ if not (self.allow_thread_sharing
+ or self._thread_ident == thread.get_ident()):
+ raise DatabaseError("DatabaseWrapper objects created in a "
+ "thread can only be used in that same thread. The object "
+ "with alias '%s' was created in thread id %s and this is "
+ "thread id %s."
+ % (self.alias, self._thread_ident, thread.get_ident()))
+
+ ##### Miscellaneous #####
+
+ @cached_property
+ def wrap_database_errors(self):
+ """
+ Context manager and decorator that re-throws backend-specific database
+ exceptions using Django's common wrappers.
+ """
+ return DatabaseErrorWrapper(self)
+
+ def make_debug_cursor(self, cursor):
+ """
+ Creates a cursor that logs all queries in self.queries.
+ """
+ return util.CursorDebugWrapper(cursor, self)
+
+ @contextmanager
+ def temporary_connection(self):
+ """
+ Context manager that ensures that a connection is established, and
+ if it opened one, closes it to avoid leaving a dangling connection.
+ This is useful for operations outside of the request-response cycle.
+
+ Provides a cursor: with self.temporary_connection() as cursor: ...
+ """
+ must_close = self.connection is None
+ cursor = self.cursor()
+ try:
+ yield cursor
+ finally:
+ cursor.close()
+ if must_close:
+ self.close()
+
+ def _start_transaction_under_autocommit(self):
+ """
+ Only required when autocommits_when_autocommit_is_off = True.
+ """
+ raise NotImplementedError
+
+
+class BaseDatabaseFeatures(object):
+ allows_group_by_pk = False
+ # True if django.db.backend.utils.typecast_timestamp is used on values
+ # returned from dates() calls.
+ needs_datetime_string_cast = True
+ empty_fetchmany_value = []
+ update_can_self_select = True
+
+ # Does the backend distinguish between '' and None?
+ interprets_empty_strings_as_nulls = False
+
+ # Does the backend allow inserting duplicate rows when a unique_together
+ # constraint exists, but one of the unique_together columns is NULL?
+ ignores_nulls_in_unique_constraints = True
+
+ can_use_chunked_reads = True
+ can_return_id_from_insert = False
+ has_bulk_insert = False
+ uses_savepoints = False
+ can_combine_inserts_with_and_without_auto_increment_pk = False
+
+ # If True, don't use integer foreign keys referring to, e.g., positive
+ # integer primary keys.
+ related_fields_match_type = False
+ allow_sliced_subqueries = True
+ has_select_for_update = False
+ has_select_for_update_nowait = False
+
+ supports_select_related = True
+
+ # Does the default test database allow multiple connections?
+ # Usually an indication that the test database is in-memory
+ test_db_allows_multiple_connections = True
+
+ # Can an object be saved without an explicit primary key?
+ supports_unspecified_pk = False
+
+ # Can a fixture contain forward references? i.e., are
+ # FK constraints checked at the end of transaction, or
+ # at the end of each save operation?
+ supports_forward_references = True
+
+ # Does a dirty transaction need to be rolled back
+ # before the cursor can be used again?
+ requires_rollback_on_dirty_transaction = False
+
+ # Does the backend allow very long model names without error?
+ supports_long_model_names = True
+
+ # Is there a REAL datatype in addition to floats/doubles?
+ has_real_datatype = False
+ supports_subqueries_in_group_by = True
+ supports_bitwise_or = True
+
+ # Do time/datetime fields have microsecond precision?
+ supports_microsecond_precision = True
+
+ # Does the __regex lookup support backreferencing and grouping?
+ supports_regex_backreferencing = True
+
+ # Can date/datetime lookups be performed using a string?
+ supports_date_lookup_using_string = True
+
+ # Can datetimes with timezones be used?
+ supports_timezones = True
+
+ # Does the database have a copy of the zoneinfo database?
+ has_zoneinfo_database = True
+
+ # When performing a GROUP BY, is an ORDER BY NULL required
+ # to remove any ordering?
+ requires_explicit_null_ordering_when_grouping = False
+
+ # Is there a 1000 item limit on query parameters?
+ supports_1000_query_parameters = True
+
+ # Can an object have a primary key of 0? MySQL says No.
+ allows_primary_key_0 = True
+
+ # Do we need to NULL a ForeignKey out, or can the constraint check be
+ # deferred
+ can_defer_constraint_checks = False
+
+ # date_interval_sql can properly handle mixed Date/DateTime fields and timedeltas
+ supports_mixed_date_datetime_comparisons = True
+
+ # Does the backend support tablespaces? Default to False because it isn't
+ # in the SQL standard.
+ supports_tablespaces = False
+
+ # Does the backend reset sequences between tests?
+ supports_sequence_reset = True
+
+ # Confirm support for introspected foreign keys
+ # Every database can do this reliably, except MySQL,
+ # which can't do it for MyISAM tables
+ can_introspect_foreign_keys = True
+
+ # Support for the DISTINCT ON clause
+ can_distinct_on_fields = False
+
+ # Does the backend decide to commit before SAVEPOINT statements
+ # when autocommit is disabled? http://bugs.python.org/issue8145#msg109965
+ autocommits_when_autocommit_is_off = False
+
+ # Does the backend prevent running SQL queries in broken transactions?
+ atomic_transactions = True
+
+ # Does the backend support 'pyformat' style ("... %(name)s ...", {'name': value})
+ # parameter passing? Note this can be provided by the backend even if not
+ # supported by the Python driver
+ supports_paramstyle_pyformat = True
+
+ def __init__(self, connection):
+ self.connection = connection
+
+ @cached_property
+ def supports_transactions(self):
+ "Confirm support for transactions"
+ try:
+ # Make sure to run inside a managed transaction block,
+ # otherwise autocommit will cause the confimation to
+ # fail.
+ self.connection.enter_transaction_management()
+ cursor = self.connection.cursor()
+ cursor.execute('CREATE TABLE ROLLBACK_TEST (X INT)')
+ self.connection.commit()
+ cursor.execute('INSERT INTO ROLLBACK_TEST (X) VALUES (8)')
+ self.connection.rollback()
+ cursor.execute('SELECT COUNT(X) FROM ROLLBACK_TEST')
+ count, = cursor.fetchone()
+ cursor.execute('DROP TABLE ROLLBACK_TEST')
+ self.connection.commit()
+ finally:
+ self.connection.leave_transaction_management()
+ return count == 0
+
+ @cached_property
+ def supports_stddev(self):
+ "Confirm support for STDDEV and related stats functions"
+ class StdDevPop(object):
+ sql_function = 'STDDEV_POP'
+
+ try:
+ self.connection.ops.check_aggregate_support(StdDevPop())
+ return True
+ except NotImplementedError:
+ return False
+
+
+class BaseDatabaseOperations(object):
+ """
+ This class encapsulates all backend-specific differences, such as the way
+ a backend performs ordering or calculates the ID of a recently-inserted
+ row.
+ """
+ compiler_module = "django.db.models.sql.compiler"
+
+ def __init__(self, connection):
+ self.connection = connection
+ self._cache = None
+
+ def autoinc_sql(self, table, column):
+ """
+ Returns any SQL needed to support auto-incrementing primary keys, or
+ None if no SQL is necessary.
+
+ This SQL is executed when a table is created.
+ """
+ return None
+
+ def bulk_batch_size(self, fields, objs):
+ """
+ Returns the maximum allowed batch size for the backend. The fields
+ are the fields going to be inserted in the batch, the objs contains
+ all the objects to be inserted.
+ """
+ return len(objs)
+
+ def cache_key_culling_sql(self):
+ """
+ Returns an SQL query that retrieves the first cache key greater than the
+ n smallest.
+
+ This is used by the 'db' cache backend to determine where to start
+ culling.
+ """
+ return "SELECT cache_key FROM %s ORDER BY cache_key LIMIT 1 OFFSET %%s"
+
+ def date_extract_sql(self, lookup_type, field_name):
+ """
+ Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
+ extracts a value from the given date field field_name.
+ """
+ raise NotImplementedError()
+
+ def date_interval_sql(self, sql, connector, timedelta):
+ """
+ Implements the date interval functionality for expressions
+ """
+ raise NotImplementedError()
+
+ def date_trunc_sql(self, lookup_type, field_name):
+ """
+ Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
+ truncates the given date field field_name to a date object with only
+ the given specificity.
+ """
+ raise NotImplementedError()
+
+ def datetime_cast_sql(self):
+ """
+ Returns the SQL necessary to cast a datetime value so that it will be
+ retrieved as a Python datetime object instead of a string.
+
+ This SQL should include a '%s' in place of the field's name.
+ """
+ return "%s"
+
+ def datetime_extract_sql(self, lookup_type, field_name, tzname):
+ """
+ Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute' or
+ 'second', returns the SQL that extracts a value from the given
+ datetime field field_name, and a tuple of parameters.
+ """
+ raise NotImplementedError()
+
+ def datetime_trunc_sql(self, lookup_type, field_name, tzname):
+ """
+ Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute' or
+ 'second', returns the SQL that truncates the given datetime field
+ field_name to a datetime object with only the given specificity, and
+ a tuple of parameters.
+ """
+ raise NotImplementedError()
+
+ def deferrable_sql(self):
+ """
+ Returns the SQL necessary to make a constraint "initially deferred"
+ during a CREATE TABLE statement.
+ """
+ return ''
+
+ def distinct_sql(self, fields):
+ """
+ Returns an SQL DISTINCT clause which removes duplicate rows from the
+ result set. If any fields are given, only the given fields are being
+ checked for duplicates.
+ """
+ if fields:
+ raise NotImplementedError('DISTINCT ON fields is not supported by this database backend')
+ else:
+ return 'DISTINCT'
+
+ def drop_foreignkey_sql(self):
+ """
+ Returns the SQL command that drops a foreign key.
+ """
+ return "DROP CONSTRAINT"
+
+ def drop_sequence_sql(self, table):
+ """
+ Returns any SQL necessary to drop the sequence for the given table.
+ Returns None if no SQL is necessary.
+ """
+ return None
+
+ def fetch_returned_insert_id(self, cursor):
+ """
+ Given a cursor object that has just performed an INSERT...RETURNING
+ statement into a table that has an auto-incrementing ID, returns the
+ newly created ID.
+ """
+ return cursor.fetchone()[0]
+
+ def field_cast_sql(self, db_type, internal_type):
+ """
+ Given a column type (e.g. 'BLOB', 'VARCHAR'), and an internal type
+ (e.g. 'GenericIPAddressField'), returns the SQL necessary to cast it
+ before using it in a WHERE statement. Note that the resulting string
+ should contain a '%s' placeholder for the column being searched against.
+ """
+ return '%s'
+
+ def force_no_ordering(self):
+ """
+ Returns a list used in the "ORDER BY" clause to force no ordering at
+ all. Returning an empty list means that nothing will be included in the
+ ordering.
+ """
+ return []
+
+ def for_update_sql(self, nowait=False):
+ """
+ Returns the FOR UPDATE SQL clause to lock rows for an update operation.
+ """
+ if nowait:
+ return 'FOR UPDATE NOWAIT'
+ else:
+ return 'FOR UPDATE'
+
+ def fulltext_search_sql(self, field_name):
+ """
+ Returns the SQL WHERE clause to use in order to perform a full-text
+ search of the given field_name. Note that the resulting string should
+ contain a '%s' placeholder for the value being searched against.
+ """
+ raise NotImplementedError('Full-text search is not implemented for this database backend')
+
+ def last_executed_query(self, cursor, sql, params):
+ """
+ Returns a string of the query last executed by the given cursor, with
+ placeholders replaced with actual values.
+
+ `sql` is the raw query containing placeholders, and `params` is the
+ sequence of parameters. These are used by default, but this method
+ exists for database backends to provide a better implementation
+ according to their own quoting schemes.
+ """
+ from django.utils.encoding import force_text
+
+ # Convert params to contain Unicode values.
+ to_unicode = lambda s: force_text(s, strings_only=True, errors='replace')
+ if isinstance(params, (list, tuple)):
+ u_params = tuple(to_unicode(val) for val in params)
+ elif params is None:
+ u_params = ()
+ else:
+ u_params = dict((to_unicode(k), to_unicode(v)) for k, v in params.items())
+
+ return six.text_type("QUERY = %r - PARAMS = %r") % (sql, u_params)
+
+ def last_insert_id(self, cursor, table_name, pk_name):
+ """
+ Given a cursor object that has just performed an INSERT statement into
+ a table that has an auto-incrementing ID, returns the newly created ID.
+
+ This method also receives the table name and the name of the primary-key
+ column.
+ """
+ return cursor.lastrowid
+
+ def lookup_cast(self, lookup_type):
+ """
+ Returns the string to use in a query when performing lookups
+ ("contains", "like", etc). The resulting string should contain a '%s'
+ placeholder for the column being searched against.
+ """
+ return "%s"
+
+ def max_in_list_size(self):
+ """
+ Returns the maximum number of items that can be passed in a single 'IN'
+ list condition, or None if the backend does not impose a limit.
+ """
+ return None
+
+ def max_name_length(self):
+ """
+ Returns the maximum length of table and column names, or None if there
+ is no limit.
+ """
+ return None
+
+ def no_limit_value(self):
+ """
+ Returns the value to use for the LIMIT when we are wanting "LIMIT
+ infinity". Returns None if the limit clause can be omitted in this case.
+ """
+ raise NotImplementedError
+
+ def pk_default_value(self):
+ """
+ Returns the value to use during an INSERT statement to specify that
+ the field should use its default value.
+ """
+ return 'DEFAULT'
+
+ def process_clob(self, value):
+ """
+ Returns the value of a CLOB column, for backends that return a locator
+ object that requires additional processing.
+ """
+ return value
+
+ def return_insert_id(self):
+ """
+ For backends that support returning the last insert ID as part
+ of an insert query, this method returns the SQL and params to
+ append to the INSERT query. The returned fragment should
+ contain a format string to hold the appropriate column.
+ """
+ pass
+
+ def compiler(self, compiler_name):
+ """
+ Returns the SQLCompiler class corresponding to the given name,
+ in the namespace corresponding to the `compiler_module` attribute
+ on this backend.
+ """
+ if self._cache is None:
+ self._cache = import_module(self.compiler_module)
+ return getattr(self._cache, compiler_name)
+
+ def quote_name(self, name):
+ """
+ Returns a quoted version of the given table, index or column name. Does
+ not quote the given name if it's already been quoted.
+ """
+ raise NotImplementedError()
+
+ def random_function_sql(self):
+ """
+ Returns an SQL expression that returns a random value.
+ """
+ return 'RANDOM()'
+
+ def regex_lookup(self, lookup_type):
+ """
+ Returns the string to use in a query when performing regular expression
+ lookups (using "regex" or "iregex"). The resulting string should
+ contain a '%s' placeholder for the column being searched against.
+
+ If the feature is not supported (or part of it is not supported), a
+ NotImplementedError exception can be raised.
+ """
+ raise NotImplementedError
+
+ def savepoint_create_sql(self, sid):
+ """
+ Returns the SQL for starting a new savepoint. Only required if the
+ "uses_savepoints" feature is True. The "sid" parameter is a string
+ for the savepoint id.
+ """
+ return "SAVEPOINT %s" % self.quote_name(sid)
+
+ def savepoint_commit_sql(self, sid):
+ """
+ Returns the SQL for committing the given savepoint.
+ """
+ return "RELEASE SAVEPOINT %s" % self.quote_name(sid)
+
+ def savepoint_rollback_sql(self, sid):
+ """
+ Returns the SQL for rolling back the given savepoint.
+ """
+ return "ROLLBACK TO SAVEPOINT %s" % self.quote_name(sid)
+
+ def set_time_zone_sql(self):
+ """
+ Returns the SQL that will set the connection's time zone.
+
+ Returns '' if the backend doesn't support time zones.
+ """
+ return ''
+
+ def sql_flush(self, style, tables, sequences, allow_cascade=False):
+ """
+ Returns a list of SQL statements required to remove all data from
+ the given database tables (without actually removing the tables
+ themselves).
+
+ The returned value also includes SQL statements required to reset DB
+ sequences passed in :param sequences:.
+
+ The `style` argument is a Style object as returned by either
+ color_style() or no_style() in django.core.management.color.
+
+ The `allow_cascade` argument determines whether truncation may cascade
+ to tables with foreign keys pointing the tables being truncated.
+ PostgreSQL requires a cascade even if these tables are empty.
+ """
+ raise NotImplementedError()
+
+ def sequence_reset_by_name_sql(self, style, sequences):
+ """
+ Returns a list of the SQL statements required to reset sequences
+ passed in :param sequences:.
+
+ The `style` argument is a Style object as returned by either
+ color_style() or no_style() in django.core.management.color.
+ """
+ return []
+
+ def sequence_reset_sql(self, style, model_list):
+ """
+ Returns a list of the SQL statements required to reset sequences for
+ the given models.
+
+ The `style` argument is a Style object as returned by either
+ color_style() or no_style() in django.core.management.color.
+ """
+ return [] # No sequence reset required by default.
+
+ def start_transaction_sql(self):
+ """
+ Returns the SQL statement required to start a transaction.
+ """
+ return "BEGIN;"
+
+ def end_transaction_sql(self, success=True):
+ """
+ Returns the SQL statement required to end a transaction.
+ """
+ if not success:
+ return "ROLLBACK;"
+ return "COMMIT;"
+
+ def tablespace_sql(self, tablespace, inline=False):
+ """
+ Returns the SQL that will be used in a query to define the tablespace.
+
+ Returns '' if the backend doesn't support tablespaces.
+
+ If inline is True, the SQL is appended to a row; otherwise it's appended
+ to the entire CREATE TABLE or CREATE INDEX statement.
+ """
+ return ''
+
+ def prep_for_like_query(self, x):
+ """Prepares a value for use in a LIKE query."""
+ from django.utils.encoding import force_text
+ return force_text(x).replace("\\", "\\\\").replace("%", "\%").replace("_", "\_")
+
+ # Same as prep_for_like_query(), but called for "iexact" matches, which
+ # need not necessarily be implemented using "LIKE" in the backend.
+ prep_for_iexact_query = prep_for_like_query
+
+ def validate_autopk_value(self, value):
+ """
+ Certain backends do not accept some values for "serial" fields
+ (for example zero in MySQL). This method will raise a ValueError
+ if the value is invalid, otherwise returns validated value.
+ """
+ return value
+
+ def value_to_db_date(self, value):
+ """
+ Transform a date value to an object compatible with what is expected
+ by the backend driver for date columns.
+ """
+ if value is None:
+ return None
+ return six.text_type(value)
+
+ def value_to_db_datetime(self, value):
+ """
+ Transform a datetime value to an object compatible with what is expected
+ by the backend driver for datetime columns.
+ """
+ if value is None:
+ return None
+ return six.text_type(value)
+
+ def value_to_db_time(self, value):
+ """
+ Transform a time value to an object compatible with what is expected
+ by the backend driver for time columns.
+ """
+ if value is None:
+ return None
+ if timezone.is_aware(value):
+ raise ValueError("Django does not support timezone-aware times.")
+ return six.text_type(value)
+
+ def value_to_db_decimal(self, value, max_digits, decimal_places):
+ """
+ Transform a decimal.Decimal value to an object compatible with what is
+ expected by the backend driver for decimal (numeric) columns.
+ """
+ if value is None:
+ return None
+ return util.format_number(value, max_digits, decimal_places)
+
+ def year_lookup_bounds_for_date_field(self, value):
+ """
+ Returns a two-elements list with the lower and upper bound to be used
+ with a BETWEEN operator to query a DateField value using a year
+ lookup.
+
+ `value` is an int, containing the looked-up year.
+ """
+ first = datetime.date(value, 1, 1)
+ second = datetime.date(value, 12, 31)
+ return [first, second]
+
+ def year_lookup_bounds_for_datetime_field(self, value):
+ """
+ Returns a two-elements list with the lower and upper bound to be used
+ with a BETWEEN operator to query a DateTimeField value using a year
+ lookup.
+
+ `value` is an int, containing the looked-up year.
+ """
+ first = datetime.datetime(value, 1, 1)
+ second = datetime.datetime(value, 12, 31, 23, 59, 59, 999999)
+ if settings.USE_TZ:
+ tz = timezone.get_current_timezone()
+ first = timezone.make_aware(first, tz)
+ second = timezone.make_aware(second, tz)
+ return [first, second]
+
+ def convert_values(self, value, field):
+ """
+ Coerce the value returned by the database backend into a consistent type
+ that is compatible with the field type.
+ """
+ if value is None or field is None:
+ return value
+ internal_type = field.get_internal_type()
+ if internal_type == 'FloatField':
+ return float(value)
+ elif (internal_type and (internal_type.endswith('IntegerField')
+ or internal_type == 'AutoField')):
+ return int(value)
+ return value
+
+ def check_aggregate_support(self, aggregate_func):
+ """Check that the backend supports the provided aggregate
+
+ This is used on specific backends to rule out known aggregates
+ that are known to have faulty implementations. If the named
+ aggregate function has a known problem, the backend should
+ raise NotImplementedError.
+ """
+ pass
+
+ def combine_expression(self, connector, sub_expressions):
+ """Combine a list of subexpressions into a single expression, using
+ the provided connecting operator. This is required because operators
+ can vary between backends (e.g., Oracle with %% and &) and between
+ subexpression types (e.g., date expressions)
+ """
+ conn = ' %s ' % connector
+ return conn.join(sub_expressions)
+
+ def modify_insert_params(self, placeholders, params):
+ """Allow modification of insert parameters. Needed for Oracle Spatial
+ backend due to #10888.
+ """
+ return params
+
+
+# Structure returned by the DB-API cursor.description interface (PEP 249)
+FieldInfo = namedtuple('FieldInfo',
+ 'name type_code display_size internal_size precision scale null_ok'
+)
+
+class BaseDatabaseIntrospection(object):
+ """
+ This class encapsulates all backend-specific introspection utilities
+ """
+ data_types_reverse = {}
+
+ def __init__(self, connection):
+ self.connection = connection
+
+ def get_field_type(self, data_type, description):
+ """Hook for a database backend to use the cursor description to
+ match a Django field type to a database column.
+
+ For Oracle, the column data_type on its own is insufficient to
+ distinguish between a FloatField and IntegerField, for example."""
+ return self.data_types_reverse[data_type]
+
+ def table_name_converter(self, name):
+ """Apply a conversion to the name for the purposes of comparison.
+
+ The default table name converter is for case sensitive comparison.
+ """
+ return name
+
+ def table_names(self, cursor=None):
+ """
+ Returns a list of names of all tables that exist in the database.
+ The returned table list is sorted by Python's default sorting. We
+ do NOT use database's ORDER BY here to avoid subtle differences
+ in sorting order between databases.
+ """
+ if cursor is None:
+ cursor = self.connection.cursor()
+ return sorted(self.get_table_list(cursor))
+
+ def get_table_list(self, cursor):
+ """
+ Returns an unsorted list of names of all tables that exist in the
+ database.
+ """
+ raise NotImplementedError
+
+ def django_table_names(self, only_existing=False):
+ """
+ Returns a list of all table names that have associated Django models and
+ are in INSTALLED_APPS.
+
+ If only_existing is True, the resulting list will only include the tables
+ that actually exist in the database.
+ """
+ from django.db import models, router
+ tables = set()
+ for app in models.get_apps():
+ for model in models.get_models(app):
+ if not model._meta.managed:
+ continue
+ if not router.allow_syncdb(self.connection.alias, model):
+ continue
+ tables.add(model._meta.db_table)
+ tables.update([f.m2m_db_table() for f in model._meta.local_many_to_many])
+ tables = list(tables)
+ if only_existing:
+ existing_tables = self.table_names()
+ tables = [
+ t
+ for t in tables
+ if self.table_name_converter(t) in existing_tables
+ ]
+ return tables
+
+ def installed_models(self, tables):
+ "Returns a set of all models represented by the provided list of table names."
+ from django.db import models, router
+ all_models = []
+ for app in models.get_apps():
+ for model in models.get_models(app):
+ if router.allow_syncdb(self.connection.alias, model):
+ all_models.append(model)
+ tables = list(map(self.table_name_converter, tables))
+ return set([
+ m for m in all_models
+ if self.table_name_converter(m._meta.db_table) in tables
+ ])
+
+ def sequence_list(self):
+ "Returns a list of information about all DB sequences for all models in all apps."
+ from django.db import models, router
+
+ apps = models.get_apps()
+ sequence_list = []
+
+ for app in apps:
+ for model in models.get_models(app):
+ if not model._meta.managed:
+ continue
+ if model._meta.swapped:
+ continue
+ if not router.allow_syncdb(self.connection.alias, model):
+ continue
+ for f in model._meta.local_fields:
+ if isinstance(f, models.AutoField):
+ sequence_list.append({'table': model._meta.db_table, 'column': f.column})
+ break # Only one AutoField is allowed per model, so don't bother continuing.
+
+ for f in model._meta.local_many_to_many:
+ # If this is an m2m using an intermediate table,
+ # we don't need to reset the sequence.
+ if f.rel.through is None:
+ sequence_list.append({'table': f.m2m_db_table(), 'column': None})
+
+ return sequence_list
+
+ def get_key_columns(self, cursor, table_name):
+ """
+ Backends can override this to return a list of (column_name, referenced_table_name,
+ referenced_column_name) for all key columns in given table.
+ """
+ raise NotImplementedError
+
+ def get_primary_key_column(self, cursor, table_name):
+ """
+ Returns the name of the primary key column for the given table.
+ """
+ for column in six.iteritems(self.get_indexes(cursor, table_name)):
+ if column[1]['primary_key']:
+ return column[0]
+ return None
+
+ def get_indexes(self, cursor, table_name):
+ """
+ Returns a dictionary of indexed fieldname -> infodict for the given
+ table, where each infodict is in the format:
+ {'primary_key': boolean representing whether it's the primary key,
+ 'unique': boolean representing whether it's a unique index}
+
+ Only single-column indexes are introspected.
+ """
+ raise NotImplementedError
+
+
+class BaseDatabaseClient(object):
+ """
+ This class encapsulates all backend-specific methods for opening a
+ client shell.
+ """
+ # This should be a string representing the name of the executable
+ # (e.g., "psql"). Subclasses must override this.
+ executable_name = None
+
+ def __init__(self, connection):
+ # connection is an instance of BaseDatabaseWrapper.
+ self.connection = connection
+
+ def runshell(self):
+ raise NotImplementedError()
+
+
+class BaseDatabaseValidation(object):
+ """
+ This class encapsualtes all backend-specific model validation.
+ """
+ def __init__(self, connection):
+ self.connection = connection
+
+ def validate_field(self, errors, opts, f):
+ "By default, there is no backend-specific validation"
+ pass
diff --git a/lib/python2.7/site-packages/django/db/backends/creation.py b/lib/python2.7/site-packages/django/db/backends/creation.py
new file mode 100644
index 0000000..bae439b
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/creation.py
@@ -0,0 +1,489 @@
+import hashlib
+import sys
+import time
+import warnings
+
+from django.conf import settings
+from django.db.utils import load_backend
+from django.utils.encoding import force_bytes
+from django.utils.six.moves import input
+
+from .util import truncate_name
+
+# The prefix to put on the default database name when creating
+# the test database.
+TEST_DATABASE_PREFIX = 'test_'
+
+
+class BaseDatabaseCreation(object):
+ """
+ This class encapsulates all backend-specific differences that pertain to
+ database *creation*, such as the column types to use for particular Django
+ Fields, the SQL used to create and destroy tables, and the creation and
+ destruction of test databases.
+ """
+ data_types = {}
+
+ def __init__(self, connection):
+ self.connection = connection
+
+ def _digest(self, *args):
+ """
+ Generates a 32-bit digest of a set of arguments that can be used to
+ shorten identifying names.
+ """
+ h = hashlib.md5()
+ for arg in args:
+ h.update(force_bytes(arg))
+ return h.hexdigest()[:8]
+
+ def sql_create_model(self, model, style, known_models=set()):
+ """
+ Returns the SQL required to create a single model, as a tuple of:
+ (list_of_sql, pending_references_dict)
+ """
+ opts = model._meta
+ if not opts.managed or opts.proxy or opts.swapped:
+ return [], {}
+ final_output = []
+ table_output = []
+ pending_references = {}
+ qn = self.connection.ops.quote_name
+ for f in opts.local_fields:
+ col_type = f.db_type(connection=self.connection)
+ tablespace = f.db_tablespace or opts.db_tablespace
+ if col_type is None:
+ # Skip ManyToManyFields, because they're not represented as
+ # database columns in this table.
+ continue
+ # Make the definition (e.g. 'foo VARCHAR(30)') for this field.
+ field_output = [style.SQL_FIELD(qn(f.column)),
+ style.SQL_COLTYPE(col_type)]
+ # Oracle treats the empty string ('') as null, so coerce the null
+ # option whenever '' is a possible value.
+ null = f.null
+ if (f.empty_strings_allowed and not f.primary_key and
+ self.connection.features.interprets_empty_strings_as_nulls):
+ null = True
+ if not null:
+ field_output.append(style.SQL_KEYWORD('NOT NULL'))
+ if f.primary_key:
+ field_output.append(style.SQL_KEYWORD('PRIMARY KEY'))
+ elif f.unique:
+ field_output.append(style.SQL_KEYWORD('UNIQUE'))
+ if tablespace and f.unique:
+ # We must specify the index tablespace inline, because we
+ # won't be generating a CREATE INDEX statement for this field.
+ tablespace_sql = self.connection.ops.tablespace_sql(
+ tablespace, inline=True)
+ if tablespace_sql:
+ field_output.append(tablespace_sql)
+ if f.rel and f.db_constraint:
+ ref_output, pending = self.sql_for_inline_foreign_key_references(
+ model, f, known_models, style)
+ if pending:
+ pending_references.setdefault(f.rel.to, []).append(
+ (model, f))
+ else:
+ field_output.extend(ref_output)
+ table_output.append(' '.join(field_output))
+ for field_constraints in opts.unique_together:
+ table_output.append(style.SQL_KEYWORD('UNIQUE') + ' (%s)' %
+ ", ".join(
+ [style.SQL_FIELD(qn(opts.get_field(f).column))
+ for f in field_constraints]))
+
+ full_statement = [style.SQL_KEYWORD('CREATE TABLE') + ' ' +
+ style.SQL_TABLE(qn(opts.db_table)) + ' (']
+ for i, line in enumerate(table_output): # Combine and add commas.
+ full_statement.append(
+ ' %s%s' % (line, ',' if i < len(table_output) - 1 else ''))
+ full_statement.append(')')
+ if opts.db_tablespace:
+ tablespace_sql = self.connection.ops.tablespace_sql(
+ opts.db_tablespace)
+ if tablespace_sql:
+ full_statement.append(tablespace_sql)
+ full_statement.append(';')
+ final_output.append('\n'.join(full_statement))
+
+ if opts.has_auto_field:
+ # Add any extra SQL needed to support auto-incrementing primary
+ # keys.
+ auto_column = opts.auto_field.db_column or opts.auto_field.name
+ autoinc_sql = self.connection.ops.autoinc_sql(opts.db_table,
+ auto_column)
+ if autoinc_sql:
+ for stmt in autoinc_sql:
+ final_output.append(stmt)
+
+ return final_output, pending_references
+
+ def sql_for_inline_foreign_key_references(self, model, field, known_models, style):
+ """
+ Return the SQL snippet defining the foreign key reference for a field.
+ """
+ qn = self.connection.ops.quote_name
+ rel_to = field.rel.to
+ if rel_to in known_models or rel_to == model:
+ output = [style.SQL_KEYWORD('REFERENCES') + ' ' +
+ style.SQL_TABLE(qn(rel_to._meta.db_table)) + ' (' +
+ style.SQL_FIELD(qn(rel_to._meta.get_field(
+ field.rel.field_name).column)) + ')' +
+ self.connection.ops.deferrable_sql()
+ ]
+ pending = False
+ else:
+ # We haven't yet created the table to which this field
+ # is related, so save it for later.
+ output = []
+ pending = True
+
+ return output, pending
+
+ def sql_for_pending_references(self, model, style, pending_references):
+ """
+ Returns any ALTER TABLE statements to add constraints after the fact.
+ """
+ opts = model._meta
+ if not opts.managed or opts.swapped:
+ return []
+ qn = self.connection.ops.quote_name
+ final_output = []
+ if model in pending_references:
+ for rel_class, f in pending_references[model]:
+ rel_opts = rel_class._meta
+ r_table = rel_opts.db_table
+ r_col = f.column
+ table = opts.db_table
+ col = opts.get_field(f.rel.field_name).column
+ # For MySQL, r_name must be unique in the first 64 characters.
+ # So we are careful with character usage here.
+ r_name = '%s_refs_%s_%s' % (
+ r_col, col, self._digest(r_table, table))
+ final_output.append(style.SQL_KEYWORD('ALTER TABLE') +
+ ' %s ADD CONSTRAINT %s FOREIGN KEY (%s) REFERENCES %s (%s)%s;' %
+ (qn(r_table), qn(truncate_name(
+ r_name, self.connection.ops.max_name_length())),
+ qn(r_col), qn(table), qn(col),
+ self.connection.ops.deferrable_sql()))
+ del pending_references[model]
+ return final_output
+
+ def sql_indexes_for_model(self, model, style):
+ """
+ Returns the CREATE INDEX SQL statements for a single model.
+ """
+ if not model._meta.managed or model._meta.proxy or model._meta.swapped:
+ return []
+ output = []
+ for f in model._meta.local_fields:
+ output.extend(self.sql_indexes_for_field(model, f, style))
+ for fs in model._meta.index_together:
+ fields = [model._meta.get_field_by_name(f)[0] for f in fs]
+ output.extend(self.sql_indexes_for_fields(model, fields, style))
+ return output
+
+ def sql_indexes_for_field(self, model, f, style):
+ """
+ Return the CREATE INDEX SQL statements for a single model field.
+ """
+ if f.db_index and not f.unique:
+ return self.sql_indexes_for_fields(model, [f], style)
+ else:
+ return []
+
+ def sql_indexes_for_fields(self, model, fields, style):
+ if len(fields) == 1 and fields[0].db_tablespace:
+ tablespace_sql = self.connection.ops.tablespace_sql(fields[0].db_tablespace)
+ elif model._meta.db_tablespace:
+ tablespace_sql = self.connection.ops.tablespace_sql(model._meta.db_tablespace)
+ else:
+ tablespace_sql = ""
+ if tablespace_sql:
+ tablespace_sql = " " + tablespace_sql
+
+ field_names = []
+ qn = self.connection.ops.quote_name
+ for f in fields:
+ field_names.append(style.SQL_FIELD(qn(f.column)))
+
+ index_name = "%s_%s" % (model._meta.db_table, self._digest([f.name for f in fields]))
+
+ return [
+ style.SQL_KEYWORD("CREATE INDEX") + " " +
+ style.SQL_TABLE(qn(truncate_name(index_name, self.connection.ops.max_name_length()))) + " " +
+ style.SQL_KEYWORD("ON") + " " +
+ style.SQL_TABLE(qn(model._meta.db_table)) + " " +
+ "(%s)" % style.SQL_FIELD(", ".join(field_names)) +
+ "%s;" % tablespace_sql,
+ ]
+
+ def sql_destroy_model(self, model, references_to_delete, style):
+ """
+ Return the DROP TABLE and restraint dropping statements for a single
+ model.
+ """
+ if not model._meta.managed or model._meta.proxy or model._meta.swapped:
+ return []
+ # Drop the table now
+ qn = self.connection.ops.quote_name
+ output = ['%s %s;' % (style.SQL_KEYWORD('DROP TABLE'),
+ style.SQL_TABLE(qn(model._meta.db_table)))]
+ if model in references_to_delete:
+ output.extend(self.sql_remove_table_constraints(
+ model, references_to_delete, style))
+ if model._meta.has_auto_field:
+ ds = self.connection.ops.drop_sequence_sql(model._meta.db_table)
+ if ds:
+ output.append(ds)
+ return output
+
+ def sql_remove_table_constraints(self, model, references_to_delete, style):
+ if not model._meta.managed or model._meta.proxy or model._meta.swapped:
+ return []
+ output = []
+ qn = self.connection.ops.quote_name
+ for rel_class, f in references_to_delete[model]:
+ table = rel_class._meta.db_table
+ col = f.column
+ r_table = model._meta.db_table
+ r_col = model._meta.get_field(f.rel.field_name).column
+ r_name = '%s_refs_%s_%s' % (
+ col, r_col, self._digest(table, r_table))
+ output.append('%s %s %s %s;' % \
+ (style.SQL_KEYWORD('ALTER TABLE'),
+ style.SQL_TABLE(qn(table)),
+ style.SQL_KEYWORD(self.connection.ops.drop_foreignkey_sql()),
+ style.SQL_FIELD(qn(truncate_name(
+ r_name, self.connection.ops.max_name_length())))))
+ del references_to_delete[model]
+ return output
+
+ def sql_destroy_indexes_for_model(self, model, style):
+ """
+ Returns the DROP INDEX SQL statements for a single model.
+ """
+ if not model._meta.managed or model._meta.proxy or model._meta.swapped:
+ return []
+ output = []
+ for f in model._meta.local_fields:
+ output.extend(self.sql_destroy_indexes_for_field(model, f, style))
+ for fs in model._meta.index_together:
+ fields = [model._meta.get_field_by_name(f)[0] for f in fs]
+ output.extend(self.sql_destroy_indexes_for_fields(model, fields, style))
+ return output
+
+ def sql_destroy_indexes_for_field(self, model, f, style):
+ """
+ Return the DROP INDEX SQL statements for a single model field.
+ """
+ if f.db_index and not f.unique:
+ return self.sql_destroy_indexes_for_fields(model, [f], style)
+ else:
+ return []
+
+ def sql_destroy_indexes_for_fields(self, model, fields, style):
+ if len(fields) == 1 and fields[0].db_tablespace:
+ tablespace_sql = self.connection.ops.tablespace_sql(fields[0].db_tablespace)
+ elif model._meta.db_tablespace:
+ tablespace_sql = self.connection.ops.tablespace_sql(model._meta.db_tablespace)
+ else:
+ tablespace_sql = ""
+ if tablespace_sql:
+ tablespace_sql = " " + tablespace_sql
+
+ field_names = []
+ qn = self.connection.ops.quote_name
+ for f in fields:
+ field_names.append(style.SQL_FIELD(qn(f.column)))
+
+ index_name = "%s_%s" % (model._meta.db_table, self._digest([f.name for f in fields]))
+
+ return [
+ style.SQL_KEYWORD("DROP INDEX") + " " +
+ style.SQL_TABLE(qn(truncate_name(index_name, self.connection.ops.max_name_length()))) + " " +
+ ";",
+ ]
+
+ def create_test_db(self, verbosity=1, autoclobber=False):
+ """
+ Creates a test database, prompting the user for confirmation if the
+ database already exists. Returns the name of the test database created.
+ """
+ # Don't import django.core.management if it isn't needed.
+ from django.core.management import call_command
+
+ test_database_name = self._get_test_db_name()
+
+ if verbosity >= 1:
+ test_db_repr = ''
+ if verbosity >= 2:
+ test_db_repr = " ('%s')" % test_database_name
+ print("Creating test database for alias '%s'%s..." % (
+ self.connection.alias, test_db_repr))
+
+ self._create_test_db(verbosity, autoclobber)
+
+ self.connection.close()
+ settings.DATABASES[self.connection.alias]["NAME"] = test_database_name
+ self.connection.settings_dict["NAME"] = test_database_name
+
+ # Report syncdb messages at one level lower than that requested.
+ # This ensures we don't get flooded with messages during testing
+ # (unless you really ask to be flooded)
+ call_command('syncdb',
+ verbosity=max(verbosity - 1, 0),
+ interactive=False,
+ database=self.connection.alias,
+ load_initial_data=False)
+
+ # We need to then do a flush to ensure that any data installed by
+ # custom SQL has been removed. The only test data should come from
+ # test fixtures, or autogenerated from post_syncdb triggers.
+ # This has the side effect of loading initial data (which was
+ # intentionally skipped in the syncdb).
+ call_command('flush',
+ verbosity=max(verbosity - 1, 0),
+ interactive=False,
+ database=self.connection.alias)
+
+ from django.core.cache import get_cache
+ from django.core.cache.backends.db import BaseDatabaseCache
+ for cache_alias in settings.CACHES:
+ cache = get_cache(cache_alias)
+ if isinstance(cache, BaseDatabaseCache):
+ call_command('createcachetable', cache._table,
+ database=self.connection.alias)
+
+ # Get a cursor (even though we don't need one yet). This has
+ # the side effect of initializing the test database.
+ self.connection.cursor()
+
+ return test_database_name
+
+ def _get_test_db_name(self):
+ """
+ Internal implementation - returns the name of the test DB that will be
+ created. Only useful when called from create_test_db() and
+ _create_test_db() and when no external munging is done with the 'NAME'
+ or 'TEST_NAME' settings.
+ """
+ if self.connection.settings_dict['TEST_NAME']:
+ return self.connection.settings_dict['TEST_NAME']
+ return TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
+
+ def _create_test_db(self, verbosity, autoclobber):
+ """
+ Internal implementation - creates the test db tables.
+ """
+ suffix = self.sql_table_creation_suffix()
+
+ test_database_name = self._get_test_db_name()
+
+ qn = self.connection.ops.quote_name
+
+ # Create the test database and connect to it.
+ cursor = self.connection.cursor()
+ try:
+ cursor.execute(
+ "CREATE DATABASE %s %s" % (qn(test_database_name), suffix))
+ except Exception as e:
+ sys.stderr.write(
+ "Got an error creating the test database: %s\n" % e)
+ if not autoclobber:
+ confirm = input(
+ "Type 'yes' if you would like to try deleting the test "
+ "database '%s', or 'no' to cancel: " % test_database_name)
+ if autoclobber or confirm == 'yes':
+ try:
+ if verbosity >= 1:
+ print("Destroying old test database '%s'..."
+ % self.connection.alias)
+ cursor.execute(
+ "DROP DATABASE %s" % qn(test_database_name))
+ cursor.execute(
+ "CREATE DATABASE %s %s" % (qn(test_database_name),
+ suffix))
+ except Exception as e:
+ sys.stderr.write(
+ "Got an error recreating the test database: %s\n" % e)
+ sys.exit(2)
+ else:
+ print("Tests cancelled.")
+ sys.exit(1)
+
+ return test_database_name
+
+ def destroy_test_db(self, old_database_name, verbosity=1):
+ """
+ Destroy a test database, prompting the user for confirmation if the
+ database already exists.
+ """
+ self.connection.close()
+ test_database_name = self.connection.settings_dict['NAME']
+ if verbosity >= 1:
+ test_db_repr = ''
+ if verbosity >= 2:
+ test_db_repr = " ('%s')" % test_database_name
+ print("Destroying test database for alias '%s'%s..." % (
+ self.connection.alias, test_db_repr))
+
+ # Temporarily use a new connection and a copy of the settings dict.
+ # This prevents the production database from being exposed to potential
+ # child threads while (or after) the test database is destroyed.
+ # Refs #10868 and #17786.
+ settings_dict = self.connection.settings_dict.copy()
+ settings_dict['NAME'] = old_database_name
+ backend = load_backend(settings_dict['ENGINE'])
+ new_connection = backend.DatabaseWrapper(
+ settings_dict,
+ alias='__destroy_test_db__',
+ allow_thread_sharing=False)
+ new_connection.creation._destroy_test_db(test_database_name, verbosity)
+
+ def _destroy_test_db(self, test_database_name, verbosity):
+ """
+ Internal implementation - remove the test db tables.
+ """
+ # Remove the test database to clean up after
+ # ourselves. Connect to the previous database (not the test database)
+ # to do so, because it's not allowed to delete a database while being
+ # connected to it.
+ cursor = self.connection.cursor()
+ # Wait to avoid "database is being accessed by other users" errors.
+ time.sleep(1)
+ cursor.execute("DROP DATABASE %s"
+ % self.connection.ops.quote_name(test_database_name))
+ self.connection.close()
+
+ def set_autocommit(self):
+ """
+ Make sure a connection is in autocommit mode. - Deprecated, not used
+ anymore by Django code. Kept for compatibility with user code that
+ might use it.
+ """
+ warnings.warn(
+ "set_autocommit was moved from BaseDatabaseCreation to "
+ "BaseDatabaseWrapper.", PendingDeprecationWarning, stacklevel=2)
+ return self.connection.set_autocommit(True)
+
+ def sql_table_creation_suffix(self):
+ """
+ SQL to append to the end of the test table creation statements.
+ """
+ return ''
+
+ def test_db_signature(self):
+ """
+ Returns a tuple with elements of self.connection.settings_dict (a
+ DATABASES setting value) that uniquely identify a database
+ accordingly to the RDBMS particularities.
+ """
+ settings_dict = self.connection.settings_dict
+ return (
+ settings_dict['HOST'],
+ settings_dict['PORT'],
+ settings_dict['ENGINE'],
+ settings_dict['NAME']
+ )
diff --git a/lib/python2.7/site-packages/django/db/backends/dummy/__init__.py b/lib/python2.7/site-packages/django/db/backends/dummy/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/dummy/__init__.py
diff --git a/lib/python2.7/site-packages/django/db/backends/dummy/base.py b/lib/python2.7/site-packages/django/db/backends/dummy/base.py
new file mode 100644
index 0000000..9a220ff
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/dummy/base.py
@@ -0,0 +1,73 @@
+"""
+Dummy database backend for Django.
+
+Django uses this if the database ENGINE setting is empty (None or empty string).
+
+Each of these API functions, except connection.close(), raises
+ImproperlyConfigured.
+"""
+
+from django.core.exceptions import ImproperlyConfigured
+from django.db.backends import *
+from django.db.backends.creation import BaseDatabaseCreation
+
+def complain(*args, **kwargs):
+ raise ImproperlyConfigured("settings.DATABASES is improperly configured. "
+ "Please supply the ENGINE value. Check "
+ "settings documentation for more details.")
+
+def ignore(*args, **kwargs):
+ pass
+
+class DatabaseError(Exception):
+ pass
+
+class IntegrityError(DatabaseError):
+ pass
+
+class DatabaseOperations(BaseDatabaseOperations):
+ quote_name = complain
+
+class DatabaseClient(BaseDatabaseClient):
+ runshell = complain
+
+class DatabaseCreation(BaseDatabaseCreation):
+ create_test_db = ignore
+ destroy_test_db = ignore
+
+class DatabaseIntrospection(BaseDatabaseIntrospection):
+ get_table_list = complain
+ get_table_description = complain
+ get_relations = complain
+ get_indexes = complain
+ get_key_columns = complain
+
+class DatabaseWrapper(BaseDatabaseWrapper):
+ operators = {}
+ # Override the base class implementations with null
+ # implementations. Anything that tries to actually
+ # do something raises complain; anything that tries
+ # to rollback or undo something raises ignore.
+ _cursor = complain
+ _commit = complain
+ _rollback = ignore
+ _close = ignore
+ _savepoint = ignore
+ _savepoint_commit = complain
+ _savepoint_rollback = ignore
+ _set_autocommit = complain
+ set_dirty = complain
+ set_clean = complain
+
+ def __init__(self, *args, **kwargs):
+ super(DatabaseWrapper, self).__init__(*args, **kwargs)
+
+ self.features = BaseDatabaseFeatures(self)
+ self.ops = DatabaseOperations(self)
+ self.client = DatabaseClient(self)
+ self.creation = DatabaseCreation(self)
+ self.introspection = DatabaseIntrospection(self)
+ self.validation = BaseDatabaseValidation(self)
+
+ def is_usable(self):
+ return True
diff --git a/lib/python2.7/site-packages/django/db/backends/mysql/__init__.py b/lib/python2.7/site-packages/django/db/backends/mysql/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/mysql/__init__.py
diff --git a/lib/python2.7/site-packages/django/db/backends/mysql/base.py b/lib/python2.7/site-packages/django/db/backends/mysql/base.py
new file mode 100644
index 0000000..ea04a5e
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/mysql/base.py
@@ -0,0 +1,533 @@
+"""
+MySQL database backend for Django.
+
+Requires MySQLdb: http://sourceforge.net/projects/mysql-python
+"""
+from __future__ import unicode_literals
+
+import datetime
+import re
+import sys
+import warnings
+
+try:
+ import MySQLdb as Database
+except ImportError as e:
+ from django.core.exceptions import ImproperlyConfigured
+ raise ImproperlyConfigured("Error loading MySQLdb module: %s" % e)
+
+from django.utils.functional import cached_property
+
+# We want version (1, 2, 1, 'final', 2) or later. We can't just use
+# lexicographic ordering in this check because then (1, 2, 1, 'gamma')
+# inadvertently passes the version test.
+version = Database.version_info
+if (version < (1, 2, 1) or (version[:3] == (1, 2, 1) and
+ (len(version) < 5 or version[3] != 'final' or version[4] < 2))):
+ from django.core.exceptions import ImproperlyConfigured
+ raise ImproperlyConfigured("MySQLdb-1.2.1p2 or newer is required; you have %s" % Database.__version__)
+
+from MySQLdb.converters import conversions, Thing2Literal
+from MySQLdb.constants import FIELD_TYPE, CLIENT
+
+try:
+ import pytz
+except ImportError:
+ pytz = None
+
+from django.conf import settings
+from django.db import utils
+from django.db.backends import *
+from django.db.backends.mysql.client import DatabaseClient
+from django.db.backends.mysql.creation import DatabaseCreation
+from django.db.backends.mysql.introspection import DatabaseIntrospection
+from django.db.backends.mysql.validation import DatabaseValidation
+from django.utils.encoding import force_str, force_text
+from django.utils.safestring import SafeBytes, SafeText
+from django.utils import six
+from django.utils import timezone
+
+# Raise exceptions for database warnings if DEBUG is on
+if settings.DEBUG:
+ warnings.filterwarnings("error", category=Database.Warning)
+
+DatabaseError = Database.DatabaseError
+IntegrityError = Database.IntegrityError
+
+# It's impossible to import datetime_or_None directly from MySQLdb.times
+parse_datetime = conversions[FIELD_TYPE.DATETIME]
+
+def parse_datetime_with_timezone_support(value):
+ dt = parse_datetime(value)
+ # Confirm that dt is naive before overwriting its tzinfo.
+ if dt is not None and settings.USE_TZ and timezone.is_naive(dt):
+ dt = dt.replace(tzinfo=timezone.utc)
+ return dt
+
+def adapt_datetime_with_timezone_support(value, conv):
+ # Equivalent to DateTimeField.get_db_prep_value. Used only by raw SQL.
+ if settings.USE_TZ:
+ if timezone.is_naive(value):
+ warnings.warn("MySQL received a naive datetime (%s)"
+ " while time zone support is active." % value,
+ RuntimeWarning)
+ default_timezone = timezone.get_default_timezone()
+ value = timezone.make_aware(value, default_timezone)
+ value = value.astimezone(timezone.utc).replace(tzinfo=None)
+ return Thing2Literal(value.strftime("%Y-%m-%d %H:%M:%S"), conv)
+
+# MySQLdb-1.2.1 returns TIME columns as timedelta -- they are more like
+# timedelta in terms of actual behavior as they are signed and include days --
+# and Django expects time, so we still need to override that. We also need to
+# add special handling for SafeText and SafeBytes as MySQLdb's type
+# checking is too tight to catch those (see Django ticket #6052).
+# Finally, MySQLdb always returns naive datetime objects. However, when
+# timezone support is active, Django expects timezone-aware datetime objects.
+django_conversions = conversions.copy()
+django_conversions.update({
+ FIELD_TYPE.TIME: util.typecast_time,
+ FIELD_TYPE.DECIMAL: util.typecast_decimal,
+ FIELD_TYPE.NEWDECIMAL: util.typecast_decimal,
+ FIELD_TYPE.DATETIME: parse_datetime_with_timezone_support,
+ datetime.datetime: adapt_datetime_with_timezone_support,
+})
+
+# This should match the numerical portion of the version numbers (we can treat
+# versions like 5.0.24 and 5.0.24a as the same). Based on the list of version
+# at http://dev.mysql.com/doc/refman/4.1/en/news.html and
+# http://dev.mysql.com/doc/refman/5.0/en/news.html .
+server_version_re = re.compile(r'(\d{1,2})\.(\d{1,2})\.(\d{1,2})')
+
+# MySQLdb-1.2.1 and newer automatically makes use of SHOW WARNINGS on
+# MySQL-4.1 and newer, so the MysqlDebugWrapper is unnecessary. Since the
+# point is to raise Warnings as exceptions, this can be done with the Python
+# warning module, and this is setup when the connection is created, and the
+# standard util.CursorDebugWrapper can be used. Also, using sql_mode
+# TRADITIONAL will automatically cause most warnings to be treated as errors.
+
+class CursorWrapper(object):
+ """
+ A thin wrapper around MySQLdb's normal cursor class so that we can catch
+ particular exception instances and reraise them with the right types.
+
+ Implemented as a wrapper, rather than a subclass, so that we aren't stuck
+ to the particular underlying representation returned by Connection.cursor().
+ """
+ codes_for_integrityerror = (1048,)
+
+ def __init__(self, cursor):
+ self.cursor = cursor
+
+ def execute(self, query, args=None):
+ try:
+ # args is None means no string interpolation
+ return self.cursor.execute(query, args)
+ except Database.OperationalError as e:
+ # Map some error codes to IntegrityError, since they seem to be
+ # misclassified and Django would prefer the more logical place.
+ if e.args[0] in self.codes_for_integrityerror:
+ six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
+ raise
+
+ def executemany(self, query, args):
+ try:
+ return self.cursor.executemany(query, args)
+ except Database.OperationalError as e:
+ # Map some error codes to IntegrityError, since they seem to be
+ # misclassified and Django would prefer the more logical place.
+ if e.args[0] in self.codes_for_integrityerror:
+ six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
+ raise
+
+ def __getattr__(self, attr):
+ if attr in self.__dict__:
+ return self.__dict__[attr]
+ else:
+ return getattr(self.cursor, attr)
+
+ def __iter__(self):
+ return iter(self.cursor)
+
+class DatabaseFeatures(BaseDatabaseFeatures):
+ empty_fetchmany_value = ()
+ update_can_self_select = False
+ allows_group_by_pk = True
+ related_fields_match_type = True
+ allow_sliced_subqueries = False
+ has_bulk_insert = True
+ has_select_for_update = True
+ has_select_for_update_nowait = False
+ supports_forward_references = False
+ supports_long_model_names = False
+ supports_microsecond_precision = False
+ supports_regex_backreferencing = False
+ supports_date_lookup_using_string = False
+ supports_timezones = False
+ requires_explicit_null_ordering_when_grouping = True
+ allows_primary_key_0 = False
+ uses_savepoints = True
+ atomic_transactions = False
+
+ def __init__(self, connection):
+ super(DatabaseFeatures, self).__init__(connection)
+
+ @cached_property
+ def _mysql_storage_engine(self):
+ "Internal method used in Django tests. Don't rely on this from your code"
+ cursor = self.connection.cursor()
+ cursor.execute('CREATE TABLE INTROSPECT_TEST (X INT)')
+ # This command is MySQL specific; the second column
+ # will tell you the default table type of the created
+ # table. Since all Django's test tables will have the same
+ # table type, that's enough to evaluate the feature.
+ cursor.execute("SHOW TABLE STATUS WHERE Name='INTROSPECT_TEST'")
+ result = cursor.fetchone()
+ cursor.execute('DROP TABLE INTROSPECT_TEST')
+ return result[1]
+
+ @cached_property
+ def can_introspect_foreign_keys(self):
+ "Confirm support for introspected foreign keys"
+ return self._mysql_storage_engine != 'MyISAM'
+
+ @cached_property
+ def has_zoneinfo_database(self):
+ # MySQL accepts full time zones names (eg. Africa/Nairobi) but rejects
+ # abbreviations (eg. EAT). When pytz isn't installed and the current
+ # time zone is LocalTimezone (the only sensible value in this
+ # context), the current time zone name will be an abbreviation. As a
+ # consequence, MySQL cannot perform time zone conversions reliably.
+ if pytz is None:
+ return False
+
+ # Test if the time zone definitions are installed.
+ cursor = self.connection.cursor()
+ cursor.execute("SELECT 1 FROM mysql.time_zone LIMIT 1")
+ return cursor.fetchone() is not None
+
+class DatabaseOperations(BaseDatabaseOperations):
+ compiler_module = "django.db.backends.mysql.compiler"
+
+ def date_extract_sql(self, lookup_type, field_name):
+ # http://dev.mysql.com/doc/mysql/en/date-and-time-functions.html
+ if lookup_type == 'week_day':
+ # DAYOFWEEK() returns an integer, 1-7, Sunday=1.
+ # Note: WEEKDAY() returns 0-6, Monday=0.
+ return "DAYOFWEEK(%s)" % field_name
+ else:
+ return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
+
+ def date_trunc_sql(self, lookup_type, field_name):
+ fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
+ format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape.
+ format_def = ('0000-', '01', '-01', ' 00:', '00', ':00')
+ try:
+ i = fields.index(lookup_type) + 1
+ except ValueError:
+ sql = field_name
+ else:
+ format_str = ''.join([f for f in format[:i]] + [f for f in format_def[i:]])
+ sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str)
+ return sql
+
+ def datetime_extract_sql(self, lookup_type, field_name, tzname):
+ if settings.USE_TZ:
+ field_name = "CONVERT_TZ(%s, 'UTC', %%s)" % field_name
+ params = [tzname]
+ else:
+ params = []
+ # http://dev.mysql.com/doc/mysql/en/date-and-time-functions.html
+ if lookup_type == 'week_day':
+ # DAYOFWEEK() returns an integer, 1-7, Sunday=1.
+ # Note: WEEKDAY() returns 0-6, Monday=0.
+ sql = "DAYOFWEEK(%s)" % field_name
+ else:
+ sql = "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
+ return sql, params
+
+ def datetime_trunc_sql(self, lookup_type, field_name, tzname):
+ if settings.USE_TZ:
+ field_name = "CONVERT_TZ(%s, 'UTC', %%s)" % field_name
+ params = [tzname]
+ else:
+ params = []
+ fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
+ format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape.
+ format_def = ('0000-', '01', '-01', ' 00:', '00', ':00')
+ try:
+ i = fields.index(lookup_type) + 1
+ except ValueError:
+ sql = field_name
+ else:
+ format_str = ''.join([f for f in format[:i]] + [f for f in format_def[i:]])
+ sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str)
+ return sql, params
+
+ def date_interval_sql(self, sql, connector, timedelta):
+ return "(%s %s INTERVAL '%d 0:0:%d:%d' DAY_MICROSECOND)" % (sql, connector,
+ timedelta.days, timedelta.seconds, timedelta.microseconds)
+
+ def drop_foreignkey_sql(self):
+ return "DROP FOREIGN KEY"
+
+ def force_no_ordering(self):
+ """
+ "ORDER BY NULL" prevents MySQL from implicitly ordering by grouped
+ columns. If no ordering would otherwise be applied, we don't want any
+ implicit sorting going on.
+ """
+ return ["NULL"]
+
+ def fulltext_search_sql(self, field_name):
+ return 'MATCH (%s) AGAINST (%%s IN BOOLEAN MODE)' % field_name
+
+ def last_executed_query(self, cursor, sql, params):
+ # With MySQLdb, cursor objects have an (undocumented) "_last_executed"
+ # attribute where the exact query sent to the database is saved.
+ # See MySQLdb/cursors.py in the source distribution.
+ return force_text(getattr(cursor, '_last_executed', None), errors='replace')
+
+ def no_limit_value(self):
+ # 2**64 - 1, as recommended by the MySQL documentation
+ return 18446744073709551615
+
+ def quote_name(self, name):
+ if name.startswith("`") and name.endswith("`"):
+ return name # Quoting once is enough.
+ return "`%s`" % name
+
+ def random_function_sql(self):
+ return 'RAND()'
+
+ def sql_flush(self, style, tables, sequences, allow_cascade=False):
+ # NB: The generated SQL below is specific to MySQL
+ # 'TRUNCATE x;', 'TRUNCATE y;', 'TRUNCATE z;'... style SQL statements
+ # to clear all tables of all data
+ if tables:
+ sql = ['SET FOREIGN_KEY_CHECKS = 0;']
+ for table in tables:
+ sql.append('%s %s;' % (
+ style.SQL_KEYWORD('TRUNCATE'),
+ style.SQL_FIELD(self.quote_name(table)),
+ ))
+ sql.append('SET FOREIGN_KEY_CHECKS = 1;')
+ sql.extend(self.sequence_reset_by_name_sql(style, sequences))
+ return sql
+ else:
+ return []
+
+ def sequence_reset_by_name_sql(self, style, sequences):
+ # Truncate already resets the AUTO_INCREMENT field from
+ # MySQL version 5.0.13 onwards. Refs #16961.
+ if self.connection.mysql_version < (5, 0, 13):
+ return ["%s %s %s %s %s;" % \
+ (style.SQL_KEYWORD('ALTER'),
+ style.SQL_KEYWORD('TABLE'),
+ style.SQL_TABLE(self.quote_name(sequence['table'])),
+ style.SQL_KEYWORD('AUTO_INCREMENT'),
+ style.SQL_FIELD('= 1'),
+ ) for sequence in sequences]
+ else:
+ return []
+
+ def validate_autopk_value(self, value):
+ # MySQLism: zero in AUTO_INCREMENT field does not work. Refs #17653.
+ if value == 0:
+ raise ValueError('The database backend does not accept 0 as a '
+ 'value for AutoField.')
+ return value
+
+ def value_to_db_datetime(self, value):
+ if value is None:
+ return None
+
+ # MySQL doesn't support tz-aware datetimes
+ if timezone.is_aware(value):
+ if settings.USE_TZ:
+ value = value.astimezone(timezone.utc).replace(tzinfo=None)
+ else:
+ raise ValueError("MySQL backend does not support timezone-aware datetimes when USE_TZ is False.")
+
+ # MySQL doesn't support microseconds
+ return six.text_type(value.replace(microsecond=0))
+
+ def value_to_db_time(self, value):
+ if value is None:
+ return None
+
+ # MySQL doesn't support tz-aware times
+ if timezone.is_aware(value):
+ raise ValueError("MySQL backend does not support timezone-aware times.")
+
+ # MySQL doesn't support microseconds
+ return six.text_type(value.replace(microsecond=0))
+
+ def year_lookup_bounds_for_datetime_field(self, value):
+ # Again, no microseconds
+ first, second = super(DatabaseOperations, self).year_lookup_bounds_for_datetime_field(value)
+ return [first.replace(microsecond=0), second.replace(microsecond=0)]
+
+ def max_name_length(self):
+ return 64
+
+ def bulk_insert_sql(self, fields, num_values):
+ items_sql = "(%s)" % ", ".join(["%s"] * len(fields))
+ return "VALUES " + ", ".join([items_sql] * num_values)
+
+class DatabaseWrapper(BaseDatabaseWrapper):
+ vendor = 'mysql'
+ operators = {
+ 'exact': '= %s',
+ 'iexact': 'LIKE %s',
+ 'contains': 'LIKE BINARY %s',
+ 'icontains': 'LIKE %s',
+ 'regex': 'REGEXP BINARY %s',
+ 'iregex': 'REGEXP %s',
+ 'gt': '> %s',
+ 'gte': '>= %s',
+ 'lt': '< %s',
+ 'lte': '<= %s',
+ 'startswith': 'LIKE BINARY %s',
+ 'endswith': 'LIKE BINARY %s',
+ 'istartswith': 'LIKE %s',
+ 'iendswith': 'LIKE %s',
+ }
+
+ Database = Database
+
+ def __init__(self, *args, **kwargs):
+ super(DatabaseWrapper, self).__init__(*args, **kwargs)
+
+ self.features = DatabaseFeatures(self)
+ self.ops = DatabaseOperations(self)
+ self.client = DatabaseClient(self)
+ self.creation = DatabaseCreation(self)
+ self.introspection = DatabaseIntrospection(self)
+ self.validation = DatabaseValidation(self)
+
+ def get_connection_params(self):
+ kwargs = {
+ 'conv': django_conversions,
+ 'charset': 'utf8',
+ }
+ if six.PY2:
+ kwargs['use_unicode'] = True
+ settings_dict = self.settings_dict
+ if settings_dict['USER']:
+ kwargs['user'] = settings_dict['USER']
+ if settings_dict['NAME']:
+ kwargs['db'] = settings_dict['NAME']
+ if settings_dict['PASSWORD']:
+ kwargs['passwd'] = force_str(settings_dict['PASSWORD'])
+ if settings_dict['HOST'].startswith('/'):
+ kwargs['unix_socket'] = settings_dict['HOST']
+ elif settings_dict['HOST']:
+ kwargs['host'] = settings_dict['HOST']
+ if settings_dict['PORT']:
+ kwargs['port'] = int(settings_dict['PORT'])
+ # We need the number of potentially affected rows after an
+ # "UPDATE", not the number of changed rows.
+ kwargs['client_flag'] = CLIENT.FOUND_ROWS
+ kwargs.update(settings_dict['OPTIONS'])
+ return kwargs
+
+ def get_new_connection(self, conn_params):
+ conn = Database.connect(**conn_params)
+ conn.encoders[SafeText] = conn.encoders[six.text_type]
+ conn.encoders[SafeBytes] = conn.encoders[bytes]
+ return conn
+
+ def init_connection_state(self):
+ cursor = self.connection.cursor()
+ # SQL_AUTO_IS_NULL in MySQL controls whether an AUTO_INCREMENT column
+ # on a recently-inserted row will return when the field is tested for
+ # NULL. Disabling this value brings this aspect of MySQL in line with
+ # SQL standards.
+ cursor.execute('SET SQL_AUTO_IS_NULL = 0')
+ cursor.close()
+
+ def create_cursor(self):
+ cursor = self.connection.cursor()
+ return CursorWrapper(cursor)
+
+ def _rollback(self):
+ try:
+ BaseDatabaseWrapper._rollback(self)
+ except Database.NotSupportedError:
+ pass
+
+ def _set_autocommit(self, autocommit):
+ with self.wrap_database_errors:
+ self.connection.autocommit(autocommit)
+
+ def disable_constraint_checking(self):
+ """
+ Disables foreign key checks, primarily for use in adding rows with forward references. Always returns True,
+ to indicate constraint checks need to be re-enabled.
+ """
+ self.cursor().execute('SET foreign_key_checks=0')
+ return True
+
+ def enable_constraint_checking(self):
+ """
+ Re-enable foreign key checks after they have been disabled.
+ """
+ # Override needs_rollback in case constraint_checks_disabled is
+ # nested inside transaction.atomic.
+ self.needs_rollback, needs_rollback = False, self.needs_rollback
+ try:
+ self.cursor().execute('SET foreign_key_checks=1')
+ finally:
+ self.needs_rollback = needs_rollback
+
+ def check_constraints(self, table_names=None):
+ """
+ Checks each table name in `table_names` for rows with invalid foreign key references. This method is
+ intended to be used in conjunction with `disable_constraint_checking()` and `enable_constraint_checking()`, to
+ determine if rows with invalid references were entered while constraint checks were off.
+
+ Raises an IntegrityError on the first invalid foreign key reference encountered (if any) and provides
+ detailed information about the invalid reference in the error message.
+
+ Backends can override this method if they can more directly apply constraint checking (e.g. via "SET CONSTRAINTS
+ ALL IMMEDIATE")
+ """
+ cursor = self.cursor()
+ if table_names is None:
+ table_names = self.introspection.table_names(cursor)
+ for table_name in table_names:
+ primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
+ if not primary_key_column_name:
+ continue
+ key_columns = self.introspection.get_key_columns(cursor, table_name)
+ for column_name, referenced_table_name, referenced_column_name in key_columns:
+ cursor.execute("""
+ SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING
+ LEFT JOIN `%s` as REFERRED
+ ON (REFERRING.`%s` = REFERRED.`%s`)
+ WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL"""
+ % (primary_key_column_name, column_name, table_name, referenced_table_name,
+ column_name, referenced_column_name, column_name, referenced_column_name))
+ for bad_row in cursor.fetchall():
+ raise utils.IntegrityError("The row in table '%s' with primary key '%s' has an invalid "
+ "foreign key: %s.%s contains a value '%s' that does not have a corresponding value in %s.%s."
+ % (table_name, bad_row[0],
+ table_name, column_name, bad_row[1],
+ referenced_table_name, referenced_column_name))
+
+ def is_usable(self):
+ try:
+ self.connection.ping()
+ except Database.Error:
+ return False
+ else:
+ return True
+
+ @cached_property
+ def mysql_version(self):
+ with self.temporary_connection():
+ server_info = self.connection.get_server_info()
+ match = server_version_re.match(server_info)
+ if not match:
+ raise Exception('Unable to determine MySQL version from version string %r' % server_info)
+ return tuple([int(x) for x in match.groups()])
diff --git a/lib/python2.7/site-packages/django/db/backends/mysql/client.py b/lib/python2.7/site-packages/django/db/backends/mysql/client.py
new file mode 100644
index 0000000..1cf8cee
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/mysql/client.py
@@ -0,0 +1,40 @@
+import os
+import sys
+
+from django.db.backends import BaseDatabaseClient
+
+class DatabaseClient(BaseDatabaseClient):
+ executable_name = 'mysql'
+
+ def runshell(self):
+ settings_dict = self.connection.settings_dict
+ args = [self.executable_name]
+ db = settings_dict['OPTIONS'].get('db', settings_dict['NAME'])
+ user = settings_dict['OPTIONS'].get('user', settings_dict['USER'])
+ passwd = settings_dict['OPTIONS'].get('passwd', settings_dict['PASSWORD'])
+ host = settings_dict['OPTIONS'].get('host', settings_dict['HOST'])
+ port = settings_dict['OPTIONS'].get('port', settings_dict['PORT'])
+ defaults_file = settings_dict['OPTIONS'].get('read_default_file')
+ # Seems to be no good way to set sql_mode with CLI.
+
+ if defaults_file:
+ args += ["--defaults-file=%s" % defaults_file]
+ if user:
+ args += ["--user=%s" % user]
+ if passwd:
+ args += ["--password=%s" % passwd]
+ if host:
+ if '/' in host:
+ args += ["--socket=%s" % host]
+ else:
+ args += ["--host=%s" % host]
+ if port:
+ args += ["--port=%s" % port]
+ if db:
+ args += [db]
+
+ if os.name == 'nt':
+ sys.exit(os.system(" ".join(args)))
+ else:
+ os.execvp(self.executable_name, args)
+
diff --git a/lib/python2.7/site-packages/django/db/backends/mysql/compiler.py b/lib/python2.7/site-packages/django/db/backends/mysql/compiler.py
new file mode 100644
index 0000000..d3439bf
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/mysql/compiler.py
@@ -0,0 +1,37 @@
+from django.db.models.sql import compiler
+from django.utils.six.moves import zip_longest
+
+
+class SQLCompiler(compiler.SQLCompiler):
+ def resolve_columns(self, row, fields=()):
+ values = []
+ index_extra_select = len(self.query.extra_select)
+ for value, field in zip_longest(row[index_extra_select:], fields):
+ if (field and field.get_internal_type() in ("BooleanField", "NullBooleanField") and
+ value in (0, 1)):
+ value = bool(value)
+ values.append(value)
+ return row[:index_extra_select] + tuple(values)
+
+ def as_subquery_condition(self, alias, columns, qn):
+ qn2 = self.connection.ops.quote_name
+ sql, params = self.as_sql()
+ return '(%s) IN (%s)' % (', '.join(['%s.%s' % (qn(alias), qn2(column)) for column in columns]), sql), params
+
+class SQLInsertCompiler(compiler.SQLInsertCompiler, SQLCompiler):
+ pass
+
+class SQLDeleteCompiler(compiler.SQLDeleteCompiler, SQLCompiler):
+ pass
+
+class SQLUpdateCompiler(compiler.SQLUpdateCompiler, SQLCompiler):
+ pass
+
+class SQLAggregateCompiler(compiler.SQLAggregateCompiler, SQLCompiler):
+ pass
+
+class SQLDateCompiler(compiler.SQLDateCompiler, SQLCompiler):
+ pass
+
+class SQLDateTimeCompiler(compiler.SQLDateTimeCompiler, SQLCompiler):
+ pass
diff --git a/lib/python2.7/site-packages/django/db/backends/mysql/creation.py b/lib/python2.7/site-packages/django/db/backends/mysql/creation.py
new file mode 100644
index 0000000..3a57c29
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/mysql/creation.py
@@ -0,0 +1,70 @@
+from django.db.backends.creation import BaseDatabaseCreation
+
+class DatabaseCreation(BaseDatabaseCreation):
+ # This dictionary maps Field objects to their associated MySQL column
+ # types, as strings. Column-type strings can contain format strings; they'll
+ # be interpolated against the values of Field.__dict__ before being output.
+ # If a column type is set to None, it won't be included in the output.
+ data_types = {
+ 'AutoField': 'integer AUTO_INCREMENT',
+ 'BinaryField': 'longblob',
+ 'BooleanField': 'bool',
+ 'CharField': 'varchar(%(max_length)s)',
+ 'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
+ 'DateField': 'date',
+ 'DateTimeField': 'datetime',
+ 'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
+ 'FileField': 'varchar(%(max_length)s)',
+ 'FilePathField': 'varchar(%(max_length)s)',
+ 'FloatField': 'double precision',
+ 'IntegerField': 'integer',
+ 'BigIntegerField': 'bigint',
+ 'IPAddressField': 'char(15)',
+ 'GenericIPAddressField': 'char(39)',
+ 'NullBooleanField': 'bool',
+ 'OneToOneField': 'integer',
+ 'PositiveIntegerField': 'integer UNSIGNED',
+ 'PositiveSmallIntegerField': 'smallint UNSIGNED',
+ 'SlugField': 'varchar(%(max_length)s)',
+ 'SmallIntegerField': 'smallint',
+ 'TextField': 'longtext',
+ 'TimeField': 'time',
+ }
+
+ def sql_table_creation_suffix(self):
+ suffix = []
+ if self.connection.settings_dict['TEST_CHARSET']:
+ suffix.append('CHARACTER SET %s' % self.connection.settings_dict['TEST_CHARSET'])
+ if self.connection.settings_dict['TEST_COLLATION']:
+ suffix.append('COLLATE %s' % self.connection.settings_dict['TEST_COLLATION'])
+ return ' '.join(suffix)
+
+ def sql_for_inline_foreign_key_references(self, model, field, known_models, style):
+ "All inline references are pending under MySQL"
+ return [], True
+
+ def sql_destroy_indexes_for_fields(self, model, fields, style):
+ if len(fields) == 1 and fields[0].db_tablespace:
+ tablespace_sql = self.connection.ops.tablespace_sql(fields[0].db_tablespace)
+ elif model._meta.db_tablespace:
+ tablespace_sql = self.connection.ops.tablespace_sql(model._meta.db_tablespace)
+ else:
+ tablespace_sql = ""
+ if tablespace_sql:
+ tablespace_sql = " " + tablespace_sql
+
+ field_names = []
+ qn = self.connection.ops.quote_name
+ for f in fields:
+ field_names.append(style.SQL_FIELD(qn(f.column)))
+
+ index_name = "%s_%s" % (model._meta.db_table, self._digest([f.name for f in fields]))
+
+ from ..util import truncate_name
+
+ return [
+ style.SQL_KEYWORD("DROP INDEX") + " " +
+ style.SQL_TABLE(qn(truncate_name(index_name, self.connection.ops.max_name_length()))) + " " +
+ style.SQL_KEYWORD("ON") + " " +
+ style.SQL_TABLE(qn(model._meta.db_table)) + ";",
+ ]
diff --git a/lib/python2.7/site-packages/django/db/backends/mysql/introspection.py b/lib/python2.7/site-packages/django/db/backends/mysql/introspection.py
new file mode 100644
index 0000000..548877e
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/mysql/introspection.py
@@ -0,0 +1,119 @@
+import re
+from .base import FIELD_TYPE
+
+from django.db.backends import BaseDatabaseIntrospection, FieldInfo
+from django.utils.encoding import force_text
+
+
+foreign_key_re = re.compile(r"\sCONSTRAINT `[^`]*` FOREIGN KEY \(`([^`]*)`\) REFERENCES `([^`]*)` \(`([^`]*)`\)")
+
+class DatabaseIntrospection(BaseDatabaseIntrospection):
+ data_types_reverse = {
+ FIELD_TYPE.BLOB: 'TextField',
+ FIELD_TYPE.CHAR: 'CharField',
+ FIELD_TYPE.DECIMAL: 'DecimalField',
+ FIELD_TYPE.NEWDECIMAL: 'DecimalField',
+ FIELD_TYPE.DATE: 'DateField',
+ FIELD_TYPE.DATETIME: 'DateTimeField',
+ FIELD_TYPE.DOUBLE: 'FloatField',
+ FIELD_TYPE.FLOAT: 'FloatField',
+ FIELD_TYPE.INT24: 'IntegerField',
+ FIELD_TYPE.LONG: 'IntegerField',
+ FIELD_TYPE.LONGLONG: 'BigIntegerField',
+ FIELD_TYPE.SHORT: 'IntegerField',
+ FIELD_TYPE.STRING: 'CharField',
+ FIELD_TYPE.TIME: 'TimeField',
+ FIELD_TYPE.TIMESTAMP: 'DateTimeField',
+ FIELD_TYPE.TINY: 'IntegerField',
+ FIELD_TYPE.TINY_BLOB: 'TextField',
+ FIELD_TYPE.MEDIUM_BLOB: 'TextField',
+ FIELD_TYPE.LONG_BLOB: 'TextField',
+ FIELD_TYPE.VAR_STRING: 'CharField',
+ }
+
+ def get_table_list(self, cursor):
+ "Returns a list of table names in the current database."
+ cursor.execute("SHOW TABLES")
+ return [row[0] for row in cursor.fetchall()]
+
+ def get_table_description(self, cursor, table_name):
+ """
+ Returns a description of the table, with the DB-API cursor.description interface."
+ """
+ # varchar length returned by cursor.description is an internal length,
+ # not visible length (#5725), use information_schema database to fix this
+ cursor.execute("""
+ SELECT column_name, character_maximum_length FROM information_schema.columns
+ WHERE table_name = %s AND table_schema = DATABASE()
+ AND character_maximum_length IS NOT NULL""", [table_name])
+ length_map = dict(cursor.fetchall())
+
+ # Also getting precision and scale from information_schema (see #5014)
+ cursor.execute("""
+ SELECT column_name, numeric_precision, numeric_scale FROM information_schema.columns
+ WHERE table_name = %s AND table_schema = DATABASE()
+ AND data_type='decimal'""", [table_name])
+ numeric_map = dict([(line[0], tuple([int(n) for n in line[1:]])) for line in cursor.fetchall()])
+
+ cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name))
+ return [FieldInfo(*((force_text(line[0]),)
+ + line[1:3]
+ + (length_map.get(line[0], line[3]),)
+ + numeric_map.get(line[0], line[4:6])
+ + (line[6],)))
+ for line in cursor.description]
+
+ def _name_to_index(self, cursor, table_name):
+ """
+ Returns a dictionary of {field_name: field_index} for the given table.
+ Indexes are 0-based.
+ """
+ return dict([(d[0], i) for i, d in enumerate(self.get_table_description(cursor, table_name))])
+
+ def get_relations(self, cursor, table_name):
+ """
+ Returns a dictionary of {field_index: (field_index_other_table, other_table)}
+ representing all relationships to the given table. Indexes are 0-based.
+ """
+ my_field_dict = self._name_to_index(cursor, table_name)
+ constraints = self.get_key_columns(cursor, table_name)
+ relations = {}
+ for my_fieldname, other_table, other_field in constraints:
+ other_field_index = self._name_to_index(cursor, other_table)[other_field]
+ my_field_index = my_field_dict[my_fieldname]
+ relations[my_field_index] = (other_field_index, other_table)
+ return relations
+
+ def get_key_columns(self, cursor, table_name):
+ """
+ Returns a list of (column_name, referenced_table_name, referenced_column_name) for all
+ key columns in given table.
+ """
+ key_columns = []
+ cursor.execute("""
+ SELECT column_name, referenced_table_name, referenced_column_name
+ FROM information_schema.key_column_usage
+ WHERE table_name = %s
+ AND table_schema = DATABASE()
+ AND referenced_table_name IS NOT NULL
+ AND referenced_column_name IS NOT NULL""", [table_name])
+ key_columns.extend(cursor.fetchall())
+ return key_columns
+
+ def get_indexes(self, cursor, table_name):
+ cursor.execute("SHOW INDEX FROM %s" % self.connection.ops.quote_name(table_name))
+ # Do a two-pass search for indexes: on first pass check which indexes
+ # are multicolumn, on second pass check which single-column indexes
+ # are present.
+ rows = list(cursor.fetchall())
+ multicol_indexes = set()
+ for row in rows:
+ if row[3] > 1:
+ multicol_indexes.add(row[2])
+ indexes = {}
+ for row in rows:
+ if row[2] in multicol_indexes:
+ continue
+ indexes[row[4]] = {'primary_key': (row[2] == 'PRIMARY'), 'unique': not bool(row[1])}
+ return indexes
+
diff --git a/lib/python2.7/site-packages/django/db/backends/mysql/validation.py b/lib/python2.7/site-packages/django/db/backends/mysql/validation.py
new file mode 100644
index 0000000..2ce957c
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/mysql/validation.py
@@ -0,0 +1,16 @@
+from django.db.backends import BaseDatabaseValidation
+
+class DatabaseValidation(BaseDatabaseValidation):
+ def validate_field(self, errors, opts, f):
+ """
+ MySQL has the following field length restriction:
+ No character (varchar) fields can have a length exceeding 255
+ characters if they have a unique index on them.
+ """
+ from django.db import models
+ varchar_fields = (models.CharField, models.CommaSeparatedIntegerField,
+ models.SlugField)
+ if (isinstance(f, varchar_fields) and f.unique
+ and (f.max_length is None or int(f.max_length) > 255)):
+ msg = '"%(name)s": %(cls)s cannot have a "max_length" greater than 255 when using "unique=True".'
+ errors.add(opts, msg % {'name': f.name, 'cls': f.__class__.__name__})
diff --git a/lib/python2.7/site-packages/django/db/backends/oracle/__init__.py b/lib/python2.7/site-packages/django/db/backends/oracle/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/oracle/__init__.py
diff --git a/lib/python2.7/site-packages/django/db/backends/oracle/base.py b/lib/python2.7/site-packages/django/db/backends/oracle/base.py
new file mode 100644
index 0000000..11ab574
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/oracle/base.py
@@ -0,0 +1,961 @@
+"""
+Oracle database backend for Django.
+
+Requires cx_Oracle: http://cx-oracle.sourceforge.net/
+"""
+from __future__ import unicode_literals
+
+import decimal
+import re
+import sys
+import warnings
+
+def _setup_environment(environ):
+ import platform
+ # Cygwin requires some special voodoo to set the environment variables
+ # properly so that Oracle will see them.
+ if platform.system().upper().startswith('CYGWIN'):
+ try:
+ import ctypes
+ except ImportError as e:
+ from django.core.exceptions import ImproperlyConfigured
+ raise ImproperlyConfigured("Error loading ctypes: %s; "
+ "the Oracle backend requires ctypes to "
+ "operate correctly under Cygwin." % e)
+ kernel32 = ctypes.CDLL('kernel32')
+ for name, value in environ:
+ kernel32.SetEnvironmentVariableA(name, value)
+ else:
+ import os
+ os.environ.update(environ)
+
+_setup_environment([
+ # Oracle takes client-side character set encoding from the environment.
+ ('NLS_LANG', '.UTF8'),
+ # This prevents unicode from getting mangled by getting encoded into the
+ # potentially non-unicode database character set.
+ ('ORA_NCHAR_LITERAL_REPLACE', 'TRUE'),
+])
+
+
+try:
+ import cx_Oracle as Database
+except ImportError as e:
+ from django.core.exceptions import ImproperlyConfigured
+ raise ImproperlyConfigured("Error loading cx_Oracle module: %s" % e)
+
+try:
+ import pytz
+except ImportError:
+ pytz = None
+
+from django.db import utils
+from django.db.backends import *
+from django.db.backends.oracle.client import DatabaseClient
+from django.db.backends.oracle.creation import DatabaseCreation
+from django.db.backends.oracle.introspection import DatabaseIntrospection
+from django.utils.encoding import force_bytes, force_text
+
+
+DatabaseError = Database.DatabaseError
+IntegrityError = Database.IntegrityError
+
+# Check whether cx_Oracle was compiled with the WITH_UNICODE option if cx_Oracle is pre-5.1. This will
+# also be True for cx_Oracle 5.1 and in Python 3.0. See #19606
+if int(Database.version.split('.', 1)[0]) >= 5 and \
+ (int(Database.version.split('.', 2)[1]) >= 1 or
+ not hasattr(Database, 'UNICODE')):
+ convert_unicode = force_text
+else:
+ convert_unicode = force_bytes
+
+
+class DatabaseFeatures(BaseDatabaseFeatures):
+ empty_fetchmany_value = ()
+ needs_datetime_string_cast = False
+ interprets_empty_strings_as_nulls = True
+ uses_savepoints = True
+ has_select_for_update = True
+ has_select_for_update_nowait = True
+ can_return_id_from_insert = True
+ allow_sliced_subqueries = False
+ supports_subqueries_in_group_by = False
+ supports_transactions = True
+ supports_timezones = False
+ has_zoneinfo_database = pytz is not None
+ supports_bitwise_or = False
+ can_defer_constraint_checks = True
+ ignores_nulls_in_unique_constraints = False
+ has_bulk_insert = True
+ supports_tablespaces = True
+ supports_sequence_reset = False
+ atomic_transactions = False
+
+class DatabaseOperations(BaseDatabaseOperations):
+ compiler_module = "django.db.backends.oracle.compiler"
+
+ def autoinc_sql(self, table, column):
+ # To simulate auto-incrementing primary keys in Oracle, we have to
+ # create a sequence and a trigger.
+ sq_name = self._get_sequence_name(table)
+ tr_name = self._get_trigger_name(table)
+ tbl_name = self.quote_name(table)
+ col_name = self.quote_name(column)
+ sequence_sql = """
+DECLARE
+ i INTEGER;
+BEGIN
+ SELECT COUNT(*) INTO i FROM USER_CATALOG
+ WHERE TABLE_NAME = '%(sq_name)s' AND TABLE_TYPE = 'SEQUENCE';
+ IF i = 0 THEN
+ EXECUTE IMMEDIATE 'CREATE SEQUENCE "%(sq_name)s"';
+ END IF;
+END;
+/""" % locals()
+ trigger_sql = """
+CREATE OR REPLACE TRIGGER "%(tr_name)s"
+BEFORE INSERT ON %(tbl_name)s
+FOR EACH ROW
+WHEN (new.%(col_name)s IS NULL)
+ BEGIN
+ SELECT "%(sq_name)s".nextval
+ INTO :new.%(col_name)s FROM dual;
+ END;
+/""" % locals()
+ return sequence_sql, trigger_sql
+
+ def cache_key_culling_sql(self):
+ return """
+ SELECT cache_key
+ FROM (SELECT cache_key, rank() OVER (ORDER BY cache_key) AS rank FROM %s)
+ WHERE rank = %%s + 1
+ """
+
+ def date_extract_sql(self, lookup_type, field_name):
+ if lookup_type == 'week_day':
+ # TO_CHAR(field, 'D') returns an integer from 1-7, where 1=Sunday.
+ return "TO_CHAR(%s, 'D')" % field_name
+ else:
+ # http://docs.oracle.com/cd/B19306_01/server.102/b14200/functions050.htm
+ return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
+
+ def date_interval_sql(self, sql, connector, timedelta):
+ """
+ Implements the interval functionality for expressions
+ format for Oracle:
+ (datefield + INTERVAL '3 00:03:20.000000' DAY(1) TO SECOND(6))
+ """
+ minutes, seconds = divmod(timedelta.seconds, 60)
+ hours, minutes = divmod(minutes, 60)
+ days = str(timedelta.days)
+ day_precision = len(days)
+ fmt = "(%s %s INTERVAL '%s %02d:%02d:%02d.%06d' DAY(%d) TO SECOND(6))"
+ return fmt % (sql, connector, days, hours, minutes, seconds,
+ timedelta.microseconds, day_precision)
+
+ def date_trunc_sql(self, lookup_type, field_name):
+ # http://docs.oracle.com/cd/B19306_01/server.102/b14200/functions230.htm#i1002084
+ if lookup_type in ('year', 'month'):
+ return "TRUNC(%s, '%s')" % (field_name, lookup_type.upper())
+ else:
+ return "TRUNC(%s)" % field_name
+
+ # Oracle crashes with "ORA-03113: end-of-file on communication channel"
+ # if the time zone name is passed in parameter. Use interpolation instead.
+ # https://groups.google.com/forum/#!msg/django-developers/zwQju7hbG78/9l934yelwfsJ
+ # This regexp matches all time zone names from the zoneinfo database.
+ _tzname_re = re.compile(r'^[\w/:+-]+$')
+
+ def _convert_field_to_tz(self, field_name, tzname):
+ if not self._tzname_re.match(tzname):
+ raise ValueError("Invalid time zone name: %s" % tzname)
+ # Convert from UTC to local time, returning TIMESTAMP WITH TIME ZONE.
+ result = "(FROM_TZ(%s, '0:00') AT TIME ZONE '%s')" % (field_name, tzname)
+ # Extracting from a TIMESTAMP WITH TIME ZONE ignore the time zone.
+ # Convert to a DATETIME, which is called DATE by Oracle. There's no
+ # built-in function to do that; the easiest is to go through a string.
+ result = "TO_CHAR(%s, 'YYYY-MM-DD HH24:MI:SS')" % result
+ result = "TO_DATE(%s, 'YYYY-MM-DD HH24:MI:SS')" % result
+ # Re-convert to a TIMESTAMP because EXTRACT only handles the date part
+ # on DATE values, even though they actually store the time part.
+ return "CAST(%s AS TIMESTAMP)" % result
+
+ def datetime_extract_sql(self, lookup_type, field_name, tzname):
+ if settings.USE_TZ:
+ field_name = self._convert_field_to_tz(field_name, tzname)
+ if lookup_type == 'week_day':
+ # TO_CHAR(field, 'D') returns an integer from 1-7, where 1=Sunday.
+ sql = "TO_CHAR(%s, 'D')" % field_name
+ else:
+ # http://docs.oracle.com/cd/B19306_01/server.102/b14200/functions050.htm
+ sql = "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
+ return sql, []
+
+ def datetime_trunc_sql(self, lookup_type, field_name, tzname):
+ if settings.USE_TZ:
+ field_name = self._convert_field_to_tz(field_name, tzname)
+ # http://docs.oracle.com/cd/B19306_01/server.102/b14200/functions230.htm#i1002084
+ if lookup_type in ('year', 'month'):
+ sql = "TRUNC(%s, '%s')" % (field_name, lookup_type.upper())
+ elif lookup_type == 'day':
+ sql = "TRUNC(%s)" % field_name
+ elif lookup_type == 'hour':
+ sql = "TRUNC(%s, 'HH24')" % field_name
+ elif lookup_type == 'minute':
+ sql = "TRUNC(%s, 'MI')" % field_name
+ else:
+ sql = field_name # Cast to DATE removes sub-second precision.
+ return sql, []
+
+ def convert_values(self, value, field):
+ if isinstance(value, Database.LOB):
+ value = value.read()
+ if field and field.get_internal_type() == 'TextField':
+ value = force_text(value)
+
+ # Oracle stores empty strings as null. We need to undo this in
+ # order to adhere to the Django convention of using the empty
+ # string instead of null, but only if the field accepts the
+ # empty string.
+ if value is None and field and field.empty_strings_allowed:
+ value = ''
+ # Convert 1 or 0 to True or False
+ elif value in (1, 0) and field and field.get_internal_type() in ('BooleanField', 'NullBooleanField'):
+ value = bool(value)
+ # Force floats to the correct type
+ elif value is not None and field and field.get_internal_type() == 'FloatField':
+ value = float(value)
+ # Convert floats to decimals
+ elif value is not None and field and field.get_internal_type() == 'DecimalField':
+ value = util.typecast_decimal(field.format_number(value))
+ # cx_Oracle always returns datetime.datetime objects for
+ # DATE and TIMESTAMP columns, but Django wants to see a
+ # python datetime.date, .time, or .datetime. We use the type
+ # of the Field to determine which to cast to, but it's not
+ # always available.
+ # As a workaround, we cast to date if all the time-related
+ # values are 0, or to time if the date is 1/1/1900.
+ # This could be cleaned a bit by adding a method to the Field
+ # classes to normalize values from the database (the to_python
+ # method is used for validation and isn't what we want here).
+ elif isinstance(value, Database.Timestamp):
+ if field and field.get_internal_type() == 'DateTimeField':
+ pass
+ elif field and field.get_internal_type() == 'DateField':
+ value = value.date()
+ elif field and field.get_internal_type() == 'TimeField' or (value.year == 1900 and value.month == value.day == 1):
+ value = value.time()
+ elif value.hour == value.minute == value.second == value.microsecond == 0:
+ value = value.date()
+ return value
+
+ def deferrable_sql(self):
+ return " DEFERRABLE INITIALLY DEFERRED"
+
+ def drop_sequence_sql(self, table):
+ return "DROP SEQUENCE %s;" % self.quote_name(self._get_sequence_name(table))
+
+ def fetch_returned_insert_id(self, cursor):
+ return int(cursor._insert_id_var.getvalue())
+
+ def field_cast_sql(self, db_type, internal_type):
+ if db_type and db_type.endswith('LOB'):
+ return "DBMS_LOB.SUBSTR(%s)"
+ else:
+ return "%s"
+
+ def last_executed_query(self, cursor, sql, params):
+ # http://cx-oracle.sourceforge.net/html/cursor.html#Cursor.statement
+ # The DB API definition does not define this attribute.
+ statement = cursor.statement
+ if statement and six.PY2 and not isinstance(statement, unicode):
+ statement = statement.decode('utf-8')
+ # Unlike Psycopg's `query` and MySQLdb`'s `_last_executed`, CxOracle's
+ # `statement` doesn't contain the query parameters. refs #20010.
+ return super(DatabaseOperations, self).last_executed_query(cursor, statement, params)
+
+ def last_insert_id(self, cursor, table_name, pk_name):
+ sq_name = self._get_sequence_name(table_name)
+ cursor.execute('SELECT "%s".currval FROM dual' % sq_name)
+ return cursor.fetchone()[0]
+
+ def lookup_cast(self, lookup_type):
+ if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
+ return "UPPER(%s)"
+ return "%s"
+
+ def max_in_list_size(self):
+ return 1000
+
+ def max_name_length(self):
+ return 30
+
+ def prep_for_iexact_query(self, x):
+ return x
+
+ def process_clob(self, value):
+ if value is None:
+ return ''
+ return force_text(value.read())
+
+ def quote_name(self, name):
+ # SQL92 requires delimited (quoted) names to be case-sensitive. When
+ # not quoted, Oracle has case-insensitive behavior for identifiers, but
+ # always defaults to uppercase.
+ # We simplify things by making Oracle identifiers always uppercase.
+ if not name.startswith('"') and not name.endswith('"'):
+ name = '"%s"' % util.truncate_name(name.upper(),
+ self.max_name_length())
+ # Oracle puts the query text into a (query % args) construct, so % signs
+ # in names need to be escaped. The '%%' will be collapsed back to '%' at
+ # that stage so we aren't really making the name longer here.
+ name = name.replace('%','%%')
+ return name.upper()
+
+ def random_function_sql(self):
+ return "DBMS_RANDOM.RANDOM"
+
+ def regex_lookup_9(self, lookup_type):
+ raise NotImplementedError("Regexes are not supported in Oracle before version 10g.")
+
+ def regex_lookup_10(self, lookup_type):
+ if lookup_type == 'regex':
+ match_option = "'c'"
+ else:
+ match_option = "'i'"
+ return 'REGEXP_LIKE(%%s, %%s, %s)' % match_option
+
+ def regex_lookup(self, lookup_type):
+ # If regex_lookup is called before it's been initialized, then create
+ # a cursor to initialize it and recur.
+ self.connection.cursor()
+ return self.connection.ops.regex_lookup(lookup_type)
+
+ def return_insert_id(self):
+ return "RETURNING %s INTO %%s", (InsertIdVar(),)
+
+ def savepoint_create_sql(self, sid):
+ return convert_unicode("SAVEPOINT " + self.quote_name(sid))
+
+ def savepoint_rollback_sql(self, sid):
+ return convert_unicode("ROLLBACK TO SAVEPOINT " + self.quote_name(sid))
+
+ def sql_flush(self, style, tables, sequences, allow_cascade=False):
+ # Return a list of 'TRUNCATE x;', 'TRUNCATE y;',
+ # 'TRUNCATE z;'... style SQL statements
+ if tables:
+ # Oracle does support TRUNCATE, but it seems to get us into
+ # FK referential trouble, whereas DELETE FROM table works.
+ sql = ['%s %s %s;' % (
+ style.SQL_KEYWORD('DELETE'),
+ style.SQL_KEYWORD('FROM'),
+ style.SQL_FIELD(self.quote_name(table))
+ ) for table in tables]
+ # Since we've just deleted all the rows, running our sequence
+ # ALTER code will reset the sequence to 0.
+ sql.extend(self.sequence_reset_by_name_sql(style, sequences))
+ return sql
+ else:
+ return []
+
+ def sequence_reset_by_name_sql(self, style, sequences):
+ sql = []
+ for sequence_info in sequences:
+ sequence_name = self._get_sequence_name(sequence_info['table'])
+ table_name = self.quote_name(sequence_info['table'])
+ column_name = self.quote_name(sequence_info['column'] or 'id')
+ query = _get_sequence_reset_sql() % {'sequence': sequence_name,
+ 'table': table_name,
+ 'column': column_name}
+ sql.append(query)
+ return sql
+
+ def sequence_reset_sql(self, style, model_list):
+ from django.db import models
+ output = []
+ query = _get_sequence_reset_sql()
+ for model in model_list:
+ for f in model._meta.local_fields:
+ if isinstance(f, models.AutoField):
+ table_name = self.quote_name(model._meta.db_table)
+ sequence_name = self._get_sequence_name(model._meta.db_table)
+ column_name = self.quote_name(f.column)
+ output.append(query % {'sequence': sequence_name,
+ 'table': table_name,
+ 'column': column_name})
+ # Only one AutoField is allowed per model, so don't
+ # continue to loop
+ break
+ for f in model._meta.many_to_many:
+ if not f.rel.through:
+ table_name = self.quote_name(f.m2m_db_table())
+ sequence_name = self._get_sequence_name(f.m2m_db_table())
+ column_name = self.quote_name('id')
+ output.append(query % {'sequence': sequence_name,
+ 'table': table_name,
+ 'column': column_name})
+ return output
+
+ def start_transaction_sql(self):
+ return ''
+
+ def tablespace_sql(self, tablespace, inline=False):
+ if inline:
+ return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace)
+ else:
+ return "TABLESPACE %s" % self.quote_name(tablespace)
+
+ def value_to_db_datetime(self, value):
+ if value is None:
+ return None
+
+ # Oracle doesn't support tz-aware datetimes
+ if timezone.is_aware(value):
+ if settings.USE_TZ:
+ value = value.astimezone(timezone.utc).replace(tzinfo=None)
+ else:
+ raise ValueError("Oracle backend does not support timezone-aware datetimes when USE_TZ is False.")
+
+ return six.text_type(value)
+
+ def value_to_db_time(self, value):
+ if value is None:
+ return None
+
+ if isinstance(value, six.string_types):
+ return datetime.datetime.strptime(value, '%H:%M:%S')
+
+ # Oracle doesn't support tz-aware times
+ if timezone.is_aware(value):
+ raise ValueError("Oracle backend does not support timezone-aware times.")
+
+ return datetime.datetime(1900, 1, 1, value.hour, value.minute,
+ value.second, value.microsecond)
+
+ def year_lookup_bounds_for_date_field(self, value):
+ first = '%s-01-01'
+ second = '%s-12-31'
+ return [first % value, second % value]
+
+ def year_lookup_bounds_for_datetime_field(self, value):
+ # The default implementation uses datetime objects for the bounds.
+ # This must be overridden here, to use a formatted date (string) as
+ # 'second' instead -- cx_Oracle chops the fraction-of-second part
+ # off of datetime objects, leaving almost an entire second out of
+ # the year under the default implementation.
+ bounds = super(DatabaseOperations, self).year_lookup_bounds_for_datetime_field(value)
+ if settings.USE_TZ:
+ bounds = [b.astimezone(timezone.utc).replace(tzinfo=None) for b in bounds]
+ return [b.isoformat(b' ') for b in bounds]
+
+ def combine_expression(self, connector, sub_expressions):
+ "Oracle requires special cases for %% and & operators in query expressions"
+ if connector == '%%':
+ return 'MOD(%s)' % ','.join(sub_expressions)
+ elif connector == '&':
+ return 'BITAND(%s)' % ','.join(sub_expressions)
+ elif connector == '|':
+ raise NotImplementedError("Bit-wise or is not supported in Oracle.")
+ return super(DatabaseOperations, self).combine_expression(connector, sub_expressions)
+
+ def _get_sequence_name(self, table):
+ name_length = self.max_name_length() - 3
+ return '%s_SQ' % util.truncate_name(table, name_length).upper()
+
+ def _get_trigger_name(self, table):
+ name_length = self.max_name_length() - 3
+ return '%s_TR' % util.truncate_name(table, name_length).upper()
+
+ def bulk_insert_sql(self, fields, num_values):
+ items_sql = "SELECT %s FROM DUAL" % ", ".join(["%s"] * len(fields))
+ return " UNION ALL ".join([items_sql] * num_values)
+
+
+class _UninitializedOperatorsDescriptor(object):
+
+ def __get__(self, instance, owner):
+ # If connection.operators is looked up before a connection has been
+ # created, transparently initialize connection.operators to avert an
+ # AttributeError.
+ if instance is None:
+ raise AttributeError("operators not available as class attribute")
+ # Creating a cursor will initialize the operators.
+ instance.cursor().close()
+ return instance.__dict__['operators']
+
+
+class DatabaseWrapper(BaseDatabaseWrapper):
+ vendor = 'oracle'
+ operators = _UninitializedOperatorsDescriptor()
+
+ _standard_operators = {
+ 'exact': '= %s',
+ 'iexact': '= UPPER(%s)',
+ 'contains': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
+ 'icontains': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
+ 'gt': '> %s',
+ 'gte': '>= %s',
+ 'lt': '< %s',
+ 'lte': '<= %s',
+ 'startswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
+ 'endswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
+ 'istartswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
+ 'iendswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
+ }
+
+ _likec_operators = _standard_operators.copy()
+ _likec_operators.update({
+ 'contains': "LIKEC %s ESCAPE '\\'",
+ 'icontains': "LIKEC UPPER(%s) ESCAPE '\\'",
+ 'startswith': "LIKEC %s ESCAPE '\\'",
+ 'endswith': "LIKEC %s ESCAPE '\\'",
+ 'istartswith': "LIKEC UPPER(%s) ESCAPE '\\'",
+ 'iendswith': "LIKEC UPPER(%s) ESCAPE '\\'",
+ })
+
+ Database = Database
+
+ def __init__(self, *args, **kwargs):
+ super(DatabaseWrapper, self).__init__(*args, **kwargs)
+
+ self.features = DatabaseFeatures(self)
+ use_returning_into = self.settings_dict["OPTIONS"].get('use_returning_into', True)
+ self.features.can_return_id_from_insert = use_returning_into
+ self.ops = DatabaseOperations(self)
+ self.client = DatabaseClient(self)
+ self.creation = DatabaseCreation(self)
+ self.introspection = DatabaseIntrospection(self)
+ self.validation = BaseDatabaseValidation(self)
+
+ def _connect_string(self):
+ settings_dict = self.settings_dict
+ if not settings_dict['HOST'].strip():
+ settings_dict['HOST'] = 'localhost'
+ if settings_dict['PORT'].strip():
+ dsn = Database.makedsn(settings_dict['HOST'],
+ int(settings_dict['PORT']),
+ settings_dict['NAME'])
+ else:
+ dsn = settings_dict['NAME']
+ return "%s/%s@%s" % (settings_dict['USER'],
+ settings_dict['PASSWORD'], dsn)
+
+ def get_connection_params(self):
+ conn_params = self.settings_dict['OPTIONS'].copy()
+ if 'use_returning_into' in conn_params:
+ del conn_params['use_returning_into']
+ return conn_params
+
+ def get_new_connection(self, conn_params):
+ conn_string = convert_unicode(self._connect_string())
+ return Database.connect(conn_string, **conn_params)
+
+ def init_connection_state(self):
+ cursor = self.create_cursor()
+ # Set the territory first. The territory overrides NLS_DATE_FORMAT
+ # and NLS_TIMESTAMP_FORMAT to the territory default. When all of
+ # these are set in single statement it isn't clear what is supposed
+ # to happen.
+ cursor.execute("ALTER SESSION SET NLS_TERRITORY = 'AMERICA'")
+ # Set oracle date to ansi date format. This only needs to execute
+ # once when we create a new connection. We also set the Territory
+ # to 'AMERICA' which forces Sunday to evaluate to a '1' in
+ # TO_CHAR().
+ cursor.execute(
+ "ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS'"
+ " NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS.FF'"
+ + (" TIME_ZONE = 'UTC'" if settings.USE_TZ else ''))
+ cursor.close()
+ if 'operators' not in self.__dict__:
+ # Ticket #14149: Check whether our LIKE implementation will
+ # work for this connection or we need to fall back on LIKEC.
+ # This check is performed only once per DatabaseWrapper
+ # instance per thread, since subsequent connections will use
+ # the same settings.
+ cursor = self.create_cursor()
+ try:
+ cursor.execute("SELECT 1 FROM DUAL WHERE DUMMY %s"
+ % self._standard_operators['contains'],
+ ['X'])
+ except DatabaseError:
+ self.operators = self._likec_operators
+ else:
+ self.operators = self._standard_operators
+ cursor.close()
+
+ # There's no way for the DatabaseOperations class to know the
+ # currently active Oracle version, so we do some setups here.
+ # TODO: Multi-db support will need a better solution (a way to
+ # communicate the current version).
+ if self.oracle_version is not None and self.oracle_version <= 9:
+ self.ops.regex_lookup = self.ops.regex_lookup_9
+ else:
+ self.ops.regex_lookup = self.ops.regex_lookup_10
+
+ try:
+ self.connection.stmtcachesize = 20
+ except:
+ # Django docs specify cx_Oracle version 4.3.1 or higher, but
+ # stmtcachesize is available only in 4.3.2 and up.
+ pass
+
+ def create_cursor(self):
+ return FormatStylePlaceholderCursor(self.connection)
+
+ def _commit(self):
+ if self.connection is not None:
+ try:
+ return self.connection.commit()
+ except Database.DatabaseError as e:
+ # cx_Oracle 5.0.4 raises a cx_Oracle.DatabaseError exception
+ # with the following attributes and values:
+ # code = 2091
+ # message = 'ORA-02091: transaction rolled back
+ # 'ORA-02291: integrity constraint (TEST_DJANGOTEST.SYS
+ # _C00102056) violated - parent key not found'
+ # We convert that particular case to our IntegrityError exception
+ x = e.args[0]
+ if hasattr(x, 'code') and hasattr(x, 'message') \
+ and x.code == 2091 and 'ORA-02291' in x.message:
+ six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
+ raise
+
+ # Oracle doesn't support savepoint commits. Ignore them.
+ def _savepoint_commit(self, sid):
+ pass
+
+ def _set_autocommit(self, autocommit):
+ with self.wrap_database_errors:
+ self.connection.autocommit = autocommit
+
+ def check_constraints(self, table_names=None):
+ """
+ To check constraints, we set constraints to immediate. Then, when, we're done we must ensure they
+ are returned to deferred.
+ """
+ self.cursor().execute('SET CONSTRAINTS ALL IMMEDIATE')
+ self.cursor().execute('SET CONSTRAINTS ALL DEFERRED')
+
+ def is_usable(self):
+ try:
+ if hasattr(self.connection, 'ping'): # Oracle 10g R2 and higher
+ self.connection.ping()
+ else:
+ # Use a cx_Oracle cursor directly, bypassing Django's utilities.
+ self.connection.cursor().execute("SELECT 1 FROM DUAL")
+ except Database.Error:
+ return False
+ else:
+ return True
+
+ @cached_property
+ def oracle_version(self):
+ with self.temporary_connection():
+ version = self.connection.version
+ try:
+ return int(version.split('.')[0])
+ except ValueError:
+ return None
+
+
+class OracleParam(object):
+ """
+ Wrapper object for formatting parameters for Oracle. If the string
+ representation of the value is large enough (greater than 4000 characters)
+ the input size needs to be set as CLOB. Alternatively, if the parameter
+ has an `input_size` attribute, then the value of the `input_size` attribute
+ will be used instead. Otherwise, no input size will be set for the
+ parameter when executing the query.
+ """
+
+ def __init__(self, param, cursor, strings_only=False):
+ # With raw SQL queries, datetimes can reach this function
+ # without being converted by DateTimeField.get_db_prep_value.
+ if settings.USE_TZ and isinstance(param, datetime.datetime):
+ if timezone.is_naive(param):
+ warnings.warn("Oracle received a naive datetime (%s)"
+ " while time zone support is active." % param,
+ RuntimeWarning)
+ default_timezone = timezone.get_default_timezone()
+ param = timezone.make_aware(param, default_timezone)
+ param = param.astimezone(timezone.utc).replace(tzinfo=None)
+
+ # Oracle doesn't recognize True and False correctly in Python 3.
+ # The conversion done below works both in 2 and 3.
+ if param is True:
+ param = "1"
+ elif param is False:
+ param = "0"
+ if hasattr(param, 'bind_parameter'):
+ self.force_bytes = param.bind_parameter(cursor)
+ elif isinstance(param, six.memoryview):
+ self.force_bytes = param
+ else:
+ self.force_bytes = convert_unicode(param, cursor.charset,
+ strings_only)
+ if hasattr(param, 'input_size'):
+ # If parameter has `input_size` attribute, use that.
+ self.input_size = param.input_size
+ elif isinstance(param, six.string_types) and len(param) > 4000:
+ # Mark any string param greater than 4000 characters as a CLOB.
+ self.input_size = Database.CLOB
+ else:
+ self.input_size = None
+
+
+class VariableWrapper(object):
+ """
+ An adapter class for cursor variables that prevents the wrapped object
+ from being converted into a string when used to instanciate an OracleParam.
+ This can be used generally for any other object that should be passed into
+ Cursor.execute as-is.
+ """
+
+ def __init__(self, var):
+ self.var = var
+
+ def bind_parameter(self, cursor):
+ return self.var
+
+ def __getattr__(self, key):
+ return getattr(self.var, key)
+
+ def __setattr__(self, key, value):
+ if key == 'var':
+ self.__dict__[key] = value
+ else:
+ setattr(self.var, key, value)
+
+
+class InsertIdVar(object):
+ """
+ A late-binding cursor variable that can be passed to Cursor.execute
+ as a parameter, in order to receive the id of the row created by an
+ insert statement.
+ """
+
+ def bind_parameter(self, cursor):
+ param = cursor.cursor.var(Database.NUMBER)
+ cursor._insert_id_var = param
+ return param
+
+
+class FormatStylePlaceholderCursor(object):
+ """
+ Django uses "format" (e.g. '%s') style placeholders, but Oracle uses ":var"
+ style. This fixes it -- but note that if you want to use a literal "%s" in
+ a query, you'll need to use "%%s".
+
+ We also do automatic conversion between Unicode on the Python side and
+ UTF-8 -- for talking to Oracle -- in here.
+ """
+ charset = 'utf-8'
+
+ def __init__(self, connection):
+ self.cursor = connection.cursor()
+ # Necessary to retrieve decimal values without rounding error.
+ self.cursor.numbersAsStrings = True
+ # Default arraysize of 1 is highly sub-optimal.
+ self.cursor.arraysize = 100
+
+ def _format_params(self, params):
+ try:
+ return dict((k,OracleParam(v, self, True)) for k,v in params.items())
+ except AttributeError:
+ return tuple([OracleParam(p, self, True) for p in params])
+
+ def _guess_input_sizes(self, params_list):
+ # Try dict handling; if that fails, treat as sequence
+ if hasattr(params_list[0], 'keys'):
+ sizes = {}
+ for params in params_list:
+ for k, value in params.items():
+ if value.input_size:
+ sizes[k] = value.input_size
+ self.setinputsizes(**sizes)
+ else:
+ # It's not a list of dicts; it's a list of sequences
+ sizes = [None] * len(params_list[0])
+ for params in params_list:
+ for i, value in enumerate(params):
+ if value.input_size:
+ sizes[i] = value.input_size
+ self.setinputsizes(*sizes)
+
+ def _param_generator(self, params):
+ # Try dict handling; if that fails, treat as sequence
+ if hasattr(params, 'items'):
+ return dict((k, v.force_bytes) for k,v in params.items())
+ else:
+ return [p.force_bytes for p in params]
+
+ def _fix_for_params(self, query, params):
+ # cx_Oracle wants no trailing ';' for SQL statements. For PL/SQL, it
+ # it does want a trailing ';' but not a trailing '/'. However, these
+ # characters must be included in the original query in case the query
+ # is being passed to SQL*Plus.
+ if query.endswith(';') or query.endswith('/'):
+ query = query[:-1]
+ if params is None:
+ params = []
+ query = convert_unicode(query, self.charset)
+ elif hasattr(params, 'keys'):
+ # Handle params as dict
+ args = dict((k, ":%s"%k) for k in params.keys())
+ query = convert_unicode(query % args, self.charset)
+ else:
+ # Handle params as sequence
+ args = [(':arg%d' % i) for i in range(len(params))]
+ query = convert_unicode(query % tuple(args), self.charset)
+ return query, self._format_params(params)
+
+ def execute(self, query, params=None):
+ query, params = self._fix_for_params(query, params)
+ self._guess_input_sizes([params])
+ try:
+ return self.cursor.execute(query, self._param_generator(params))
+ except Database.DatabaseError as e:
+ # cx_Oracle <= 4.4.0 wrongly raises a DatabaseError for ORA-01400.
+ if hasattr(e.args[0], 'code') and e.args[0].code == 1400 and not isinstance(e, IntegrityError):
+ six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
+ raise
+
+ def executemany(self, query, params=None):
+ if not params:
+ # No params given, nothing to do
+ return None
+ # uniform treatment for sequences and iterables
+ params_iter = iter(params)
+ query, firstparams = self._fix_for_params(query, next(params_iter))
+ # we build a list of formatted params; as we're going to traverse it
+ # more than once, we can't make it lazy by using a generator
+ formatted = [firstparams]+[self._format_params(p) for p in params_iter]
+ self._guess_input_sizes(formatted)
+ try:
+ return self.cursor.executemany(query,
+ [self._param_generator(p) for p in formatted])
+ except Database.DatabaseError as e:
+ # cx_Oracle <= 4.4.0 wrongly raises a DatabaseError for ORA-01400.
+ if hasattr(e.args[0], 'code') and e.args[0].code == 1400 and not isinstance(e, IntegrityError):
+ six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
+ raise
+
+ def fetchone(self):
+ row = self.cursor.fetchone()
+ if row is None:
+ return row
+ return _rowfactory(row, self.cursor)
+
+ def fetchmany(self, size=None):
+ if size is None:
+ size = self.arraysize
+ return tuple([_rowfactory(r, self.cursor)
+ for r in self.cursor.fetchmany(size)])
+
+ def fetchall(self):
+ return tuple([_rowfactory(r, self.cursor)
+ for r in self.cursor.fetchall()])
+
+ def var(self, *args):
+ return VariableWrapper(self.cursor.var(*args))
+
+ def arrayvar(self, *args):
+ return VariableWrapper(self.cursor.arrayvar(*args))
+
+ def __getattr__(self, attr):
+ if attr in self.__dict__:
+ return self.__dict__[attr]
+ else:
+ return getattr(self.cursor, attr)
+
+ def __iter__(self):
+ return CursorIterator(self.cursor)
+
+
+class CursorIterator(six.Iterator):
+
+ """Cursor iterator wrapper that invokes our custom row factory."""
+
+ def __init__(self, cursor):
+ self.cursor = cursor
+ self.iter = iter(cursor)
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ return _rowfactory(next(self.iter), self.cursor)
+
+
+def _rowfactory(row, cursor):
+ # Cast numeric values as the appropriate Python type based upon the
+ # cursor description, and convert strings to unicode.
+ casted = []
+ for value, desc in zip(row, cursor.description):
+ if value is not None and desc[1] is Database.NUMBER:
+ precision, scale = desc[4:6]
+ if scale == -127:
+ if precision == 0:
+ # NUMBER column: decimal-precision floating point
+ # This will normally be an integer from a sequence,
+ # but it could be a decimal value.
+ if '.' in value:
+ value = decimal.Decimal(value)
+ else:
+ value = int(value)
+ else:
+ # FLOAT column: binary-precision floating point.
+ # This comes from FloatField columns.
+ value = float(value)
+ elif precision > 0:
+ # NUMBER(p,s) column: decimal-precision fixed point.
+ # This comes from IntField and DecimalField columns.
+ if scale == 0:
+ value = int(value)
+ else:
+ value = decimal.Decimal(value)
+ elif '.' in value:
+ # No type information. This normally comes from a
+ # mathematical expression in the SELECT list. Guess int
+ # or Decimal based on whether it has a decimal point.
+ value = decimal.Decimal(value)
+ else:
+ value = int(value)
+ # datetimes are returned as TIMESTAMP, except the results
+ # of "dates" queries, which are returned as DATETIME.
+ elif desc[1] in (Database.TIMESTAMP, Database.DATETIME):
+ # Confirm that dt is naive before overwriting its tzinfo.
+ if settings.USE_TZ and value is not None and timezone.is_naive(value):
+ value = value.replace(tzinfo=timezone.utc)
+ elif desc[1] in (Database.STRING, Database.FIXED_CHAR,
+ Database.LONG_STRING):
+ value = to_unicode(value)
+ casted.append(value)
+ return tuple(casted)
+
+
+def to_unicode(s):
+ """
+ Convert strings to Unicode objects (and return all other data types
+ unchanged).
+ """
+ if isinstance(s, six.string_types):
+ return force_text(s)
+ return s
+
+
+def _get_sequence_reset_sql():
+ # TODO: colorize this SQL code with style.SQL_KEYWORD(), etc.
+ return """
+DECLARE
+ table_value integer;
+ seq_value integer;
+BEGIN
+ SELECT NVL(MAX(%(column)s), 0) INTO table_value FROM %(table)s;
+ SELECT NVL(last_number - cache_size, 0) INTO seq_value FROM user_sequences
+ WHERE sequence_name = '%(sequence)s';
+ WHILE table_value > seq_value LOOP
+ SELECT "%(sequence)s".nextval INTO seq_value FROM dual;
+ END LOOP;
+END;
+/"""
diff --git a/lib/python2.7/site-packages/django/db/backends/oracle/client.py b/lib/python2.7/site-packages/django/db/backends/oracle/client.py
new file mode 100644
index 0000000..ccc64eb
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/oracle/client.py
@@ -0,0 +1,16 @@
+import os
+import sys
+
+from django.db.backends import BaseDatabaseClient
+
+class DatabaseClient(BaseDatabaseClient):
+ executable_name = 'sqlplus'
+
+ def runshell(self):
+ conn_string = self.connection._connect_string()
+ args = [self.executable_name, "-L", conn_string]
+ if os.name == 'nt':
+ sys.exit(os.system(" ".join(args)))
+ else:
+ os.execvp(self.executable_name, args)
+
diff --git a/lib/python2.7/site-packages/django/db/backends/oracle/compiler.py b/lib/python2.7/site-packages/django/db/backends/oracle/compiler.py
new file mode 100644
index 0000000..bb8ef59
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/oracle/compiler.py
@@ -0,0 +1,72 @@
+from django.db.models.sql import compiler
+from django.utils.six.moves import zip_longest
+
+
+class SQLCompiler(compiler.SQLCompiler):
+ def resolve_columns(self, row, fields=()):
+ # If this query has limit/offset information, then we expect the
+ # first column to be an extra "_RN" column that we need to throw
+ # away.
+ if self.query.high_mark is not None or self.query.low_mark:
+ rn_offset = 1
+ else:
+ rn_offset = 0
+ index_start = rn_offset + len(self.query.extra_select)
+ values = [self.query.convert_values(v, None, connection=self.connection)
+ for v in row[rn_offset:index_start]]
+ for value, field in zip_longest(row[index_start:], fields):
+ values.append(self.query.convert_values(value, field, connection=self.connection))
+ return tuple(values)
+
+ def as_sql(self, with_limits=True, with_col_aliases=False):
+ """
+ Creates the SQL for this query. Returns the SQL string and list
+ of parameters. This is overriden from the original Query class
+ to handle the additional SQL Oracle requires to emulate LIMIT
+ and OFFSET.
+
+ If 'with_limits' is False, any limit/offset information is not
+ included in the query.
+ """
+ if with_limits and self.query.low_mark == self.query.high_mark:
+ return '', ()
+
+ # The `do_offset` flag indicates whether we need to construct
+ # the SQL needed to use limit/offset with Oracle.
+ do_offset = with_limits and (self.query.high_mark is not None
+ or self.query.low_mark)
+ if not do_offset:
+ sql, params = super(SQLCompiler, self).as_sql(with_limits=False,
+ with_col_aliases=with_col_aliases)
+ else:
+ sql, params = super(SQLCompiler, self).as_sql(with_limits=False,
+ with_col_aliases=True)
+
+ # Wrap the base query in an outer SELECT * with boundaries on
+ # the "_RN" column. This is the canonical way to emulate LIMIT
+ # and OFFSET on Oracle.
+ high_where = ''
+ if self.query.high_mark is not None:
+ high_where = 'WHERE ROWNUM <= %d' % (self.query.high_mark,)
+ sql = 'SELECT * FROM (SELECT ROWNUM AS "_RN", "_SUB".* FROM (%s) "_SUB" %s) WHERE "_RN" > %d' % (sql, high_where, self.query.low_mark)
+
+ return sql, params
+
+
+class SQLInsertCompiler(compiler.SQLInsertCompiler, SQLCompiler):
+ pass
+
+class SQLDeleteCompiler(compiler.SQLDeleteCompiler, SQLCompiler):
+ pass
+
+class SQLUpdateCompiler(compiler.SQLUpdateCompiler, SQLCompiler):
+ pass
+
+class SQLAggregateCompiler(compiler.SQLAggregateCompiler, SQLCompiler):
+ pass
+
+class SQLDateCompiler(compiler.SQLDateCompiler, SQLCompiler):
+ pass
+
+class SQLDateTimeCompiler(compiler.SQLDateTimeCompiler, SQLCompiler):
+ pass
diff --git a/lib/python2.7/site-packages/django/db/backends/oracle/creation.py b/lib/python2.7/site-packages/django/db/backends/oracle/creation.py
new file mode 100644
index 0000000..2f2f391
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/oracle/creation.py
@@ -0,0 +1,277 @@
+import sys
+import time
+
+from django.conf import settings
+from django.db.backends.creation import BaseDatabaseCreation
+from django.utils.six.moves import input
+
+TEST_DATABASE_PREFIX = 'test_'
+PASSWORD = 'Im_a_lumberjack'
+
+class DatabaseCreation(BaseDatabaseCreation):
+ # This dictionary maps Field objects to their associated Oracle column
+ # types, as strings. Column-type strings can contain format strings; they'll
+ # be interpolated against the values of Field.__dict__ before being output.
+ # If a column type is set to None, it won't be included in the output.
+ #
+ # Any format strings starting with "qn_" are quoted before being used in the
+ # output (the "qn_" prefix is stripped before the lookup is performed.
+
+ data_types = {
+ 'AutoField': 'NUMBER(11)',
+ 'BinaryField': 'BLOB',
+ 'BooleanField': 'NUMBER(1) CHECK (%(qn_column)s IN (0,1))',
+ 'CharField': 'NVARCHAR2(%(max_length)s)',
+ 'CommaSeparatedIntegerField': 'VARCHAR2(%(max_length)s)',
+ 'DateField': 'DATE',
+ 'DateTimeField': 'TIMESTAMP',
+ 'DecimalField': 'NUMBER(%(max_digits)s, %(decimal_places)s)',
+ 'FileField': 'NVARCHAR2(%(max_length)s)',
+ 'FilePathField': 'NVARCHAR2(%(max_length)s)',
+ 'FloatField': 'DOUBLE PRECISION',
+ 'IntegerField': 'NUMBER(11)',
+ 'BigIntegerField': 'NUMBER(19)',
+ 'IPAddressField': 'VARCHAR2(15)',
+ 'GenericIPAddressField': 'VARCHAR2(39)',
+ 'NullBooleanField': 'NUMBER(1) CHECK ((%(qn_column)s IN (0,1)) OR (%(qn_column)s IS NULL))',
+ 'OneToOneField': 'NUMBER(11)',
+ 'PositiveIntegerField': 'NUMBER(11) CHECK (%(qn_column)s >= 0)',
+ 'PositiveSmallIntegerField': 'NUMBER(11) CHECK (%(qn_column)s >= 0)',
+ 'SlugField': 'NVARCHAR2(%(max_length)s)',
+ 'SmallIntegerField': 'NUMBER(11)',
+ 'TextField': 'NCLOB',
+ 'TimeField': 'TIMESTAMP',
+ 'URLField': 'VARCHAR2(%(max_length)s)',
+ }
+
+ def __init__(self, connection):
+ super(DatabaseCreation, self).__init__(connection)
+
+ def _create_test_db(self, verbosity=1, autoclobber=False):
+ TEST_NAME = self._test_database_name()
+ TEST_USER = self._test_database_user()
+ TEST_PASSWD = self._test_database_passwd()
+ TEST_TBLSPACE = self._test_database_tblspace()
+ TEST_TBLSPACE_TMP = self._test_database_tblspace_tmp()
+
+ parameters = {
+ 'dbname': TEST_NAME,
+ 'user': TEST_USER,
+ 'password': TEST_PASSWD,
+ 'tblspace': TEST_TBLSPACE,
+ 'tblspace_temp': TEST_TBLSPACE_TMP,
+ }
+
+ cursor = self.connection.cursor()
+ if self._test_database_create():
+ try:
+ self._execute_test_db_creation(cursor, parameters, verbosity)
+ except Exception as e:
+ sys.stderr.write("Got an error creating the test database: %s\n" % e)
+ if not autoclobber:
+ confirm = input("It appears the test database, %s, already exists. Type 'yes' to delete it, or 'no' to cancel: " % TEST_NAME)
+ if autoclobber or confirm == 'yes':
+ try:
+ if verbosity >= 1:
+ print("Destroying old test database '%s'..." % self.connection.alias)
+ self._execute_test_db_destruction(cursor, parameters, verbosity)
+ self._execute_test_db_creation(cursor, parameters, verbosity)
+ except Exception as e:
+ sys.stderr.write("Got an error recreating the test database: %s\n" % e)
+ sys.exit(2)
+ else:
+ print("Tests cancelled.")
+ sys.exit(1)
+
+ if self._test_user_create():
+ if verbosity >= 1:
+ print("Creating test user...")
+ try:
+ self._create_test_user(cursor, parameters, verbosity)
+ except Exception as e:
+ sys.stderr.write("Got an error creating the test user: %s\n" % e)
+ if not autoclobber:
+ confirm = input("It appears the test user, %s, already exists. Type 'yes' to delete it, or 'no' to cancel: " % TEST_USER)
+ if autoclobber or confirm == 'yes':
+ try:
+ if verbosity >= 1:
+ print("Destroying old test user...")
+ self._destroy_test_user(cursor, parameters, verbosity)
+ if verbosity >= 1:
+ print("Creating test user...")
+ self._create_test_user(cursor, parameters, verbosity)
+ except Exception as e:
+ sys.stderr.write("Got an error recreating the test user: %s\n" % e)
+ sys.exit(2)
+ else:
+ print("Tests cancelled.")
+ sys.exit(1)
+
+ real_settings = settings.DATABASES[self.connection.alias]
+ real_settings['SAVED_USER'] = self.connection.settings_dict['SAVED_USER'] = self.connection.settings_dict['USER']
+ real_settings['SAVED_PASSWORD'] = self.connection.settings_dict['SAVED_PASSWORD'] = self.connection.settings_dict['PASSWORD']
+ real_settings['TEST_USER'] = real_settings['USER'] = self.connection.settings_dict['TEST_USER'] = self.connection.settings_dict['USER'] = TEST_USER
+ real_settings['PASSWORD'] = self.connection.settings_dict['PASSWORD'] = TEST_PASSWD
+
+ return self.connection.settings_dict['NAME']
+
+ def _destroy_test_db(self, test_database_name, verbosity=1):
+ """
+ Destroy a test database, prompting the user for confirmation if the
+ database already exists. Returns the name of the test database created.
+ """
+ TEST_NAME = self._test_database_name()
+ TEST_USER = self._test_database_user()
+ TEST_PASSWD = self._test_database_passwd()
+ TEST_TBLSPACE = self._test_database_tblspace()
+ TEST_TBLSPACE_TMP = self._test_database_tblspace_tmp()
+
+ self.connection.settings_dict['USER'] = self.connection.settings_dict['SAVED_USER']
+ self.connection.settings_dict['PASSWORD'] = self.connection.settings_dict['SAVED_PASSWORD']
+
+ parameters = {
+ 'dbname': TEST_NAME,
+ 'user': TEST_USER,
+ 'password': TEST_PASSWD,
+ 'tblspace': TEST_TBLSPACE,
+ 'tblspace_temp': TEST_TBLSPACE_TMP,
+ }
+
+ cursor = self.connection.cursor()
+ time.sleep(1) # To avoid "database is being accessed by other users" errors.
+ if self._test_user_create():
+ if verbosity >= 1:
+ print('Destroying test user...')
+ self._destroy_test_user(cursor, parameters, verbosity)
+ if self._test_database_create():
+ if verbosity >= 1:
+ print('Destroying test database tables...')
+ self._execute_test_db_destruction(cursor, parameters, verbosity)
+ self.connection.close()
+
+ def _execute_test_db_creation(self, cursor, parameters, verbosity):
+ if verbosity >= 2:
+ print("_create_test_db(): dbname = %s" % parameters['dbname'])
+ statements = [
+ """CREATE TABLESPACE %(tblspace)s
+ DATAFILE '%(tblspace)s.dbf' SIZE 20M
+ REUSE AUTOEXTEND ON NEXT 10M MAXSIZE 200M
+ """,
+ """CREATE TEMPORARY TABLESPACE %(tblspace_temp)s
+ TEMPFILE '%(tblspace_temp)s.dbf' SIZE 20M
+ REUSE AUTOEXTEND ON NEXT 10M MAXSIZE 100M
+ """,
+ ]
+ self._execute_statements(cursor, statements, parameters, verbosity)
+
+ def _create_test_user(self, cursor, parameters, verbosity):
+ if verbosity >= 2:
+ print("_create_test_user(): username = %s" % parameters['user'])
+ statements = [
+ """CREATE USER %(user)s
+ IDENTIFIED BY %(password)s
+ DEFAULT TABLESPACE %(tblspace)s
+ TEMPORARY TABLESPACE %(tblspace_temp)s
+ QUOTA UNLIMITED ON %(tblspace)s
+ """,
+ """GRANT CONNECT, RESOURCE TO %(user)s""",
+ ]
+ self._execute_statements(cursor, statements, parameters, verbosity)
+
+ def _execute_test_db_destruction(self, cursor, parameters, verbosity):
+ if verbosity >= 2:
+ print("_execute_test_db_destruction(): dbname=%s" % parameters['dbname'])
+ statements = [
+ 'DROP TABLESPACE %(tblspace)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS',
+ 'DROP TABLESPACE %(tblspace_temp)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS',
+ ]
+ self._execute_statements(cursor, statements, parameters, verbosity)
+
+ def _destroy_test_user(self, cursor, parameters, verbosity):
+ if verbosity >= 2:
+ print("_destroy_test_user(): user=%s" % parameters['user'])
+ print("Be patient. This can take some time...")
+ statements = [
+ 'DROP USER %(user)s CASCADE',
+ ]
+ self._execute_statements(cursor, statements, parameters, verbosity)
+
+ def _execute_statements(self, cursor, statements, parameters, verbosity):
+ for template in statements:
+ stmt = template % parameters
+ if verbosity >= 2:
+ print(stmt)
+ try:
+ cursor.execute(stmt)
+ except Exception as err:
+ sys.stderr.write("Failed (%s)\n" % (err))
+ raise
+
+ def _test_database_name(self):
+ name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
+ try:
+ if self.connection.settings_dict['TEST_NAME']:
+ name = self.connection.settings_dict['TEST_NAME']
+ except AttributeError:
+ pass
+ return name
+
+ def _test_database_create(self):
+ return self.connection.settings_dict.get('TEST_CREATE', True)
+
+ def _test_user_create(self):
+ return self.connection.settings_dict.get('TEST_USER_CREATE', True)
+
+ def _test_database_user(self):
+ name = TEST_DATABASE_PREFIX + self.connection.settings_dict['USER']
+ try:
+ if self.connection.settings_dict['TEST_USER']:
+ name = self.connection.settings_dict['TEST_USER']
+ except KeyError:
+ pass
+ return name
+
+ def _test_database_passwd(self):
+ name = PASSWORD
+ try:
+ if self.connection.settings_dict['TEST_PASSWD']:
+ name = self.connection.settings_dict['TEST_PASSWD']
+ except KeyError:
+ pass
+ return name
+
+ def _test_database_tblspace(self):
+ name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
+ try:
+ if self.connection.settings_dict['TEST_TBLSPACE']:
+ name = self.connection.settings_dict['TEST_TBLSPACE']
+ except KeyError:
+ pass
+ return name
+
+ def _test_database_tblspace_tmp(self):
+ name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME'] + '_temp'
+ try:
+ if self.connection.settings_dict['TEST_TBLSPACE_TMP']:
+ name = self.connection.settings_dict['TEST_TBLSPACE_TMP']
+ except KeyError:
+ pass
+ return name
+
+ def _get_test_db_name(self):
+ """
+ We need to return the 'production' DB name to get the test DB creation
+ machinery to work. This isn't a great deal in this case because DB
+ names as handled by Django haven't real counterparts in Oracle.
+ """
+ return self.connection.settings_dict['NAME']
+
+ def test_db_signature(self):
+ settings_dict = self.connection.settings_dict
+ return (
+ settings_dict['HOST'],
+ settings_dict['PORT'],
+ settings_dict['ENGINE'],
+ settings_dict['NAME'],
+ self._test_database_user(),
+ )
diff --git a/lib/python2.7/site-packages/django/db/backends/oracle/introspection.py b/lib/python2.7/site-packages/django/db/backends/oracle/introspection.py
new file mode 100644
index 0000000..3ea3a08
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/oracle/introspection.py
@@ -0,0 +1,138 @@
+from django.db.backends import BaseDatabaseIntrospection, FieldInfo
+from django.utils.encoding import force_text
+import cx_Oracle
+import re
+
+foreign_key_re = re.compile(r"\sCONSTRAINT `[^`]*` FOREIGN KEY \(`([^`]*)`\) REFERENCES `([^`]*)` \(`([^`]*)`\)")
+
+class DatabaseIntrospection(BaseDatabaseIntrospection):
+ # Maps type objects to Django Field types.
+ data_types_reverse = {
+ cx_Oracle.BLOB: 'BinaryField',
+ cx_Oracle.CLOB: 'TextField',
+ cx_Oracle.DATETIME: 'DateField',
+ cx_Oracle.FIXED_CHAR: 'CharField',
+ cx_Oracle.NCLOB: 'TextField',
+ cx_Oracle.NUMBER: 'DecimalField',
+ cx_Oracle.STRING: 'CharField',
+ cx_Oracle.TIMESTAMP: 'DateTimeField',
+ }
+
+ try:
+ data_types_reverse[cx_Oracle.NATIVE_FLOAT] = 'FloatField'
+ except AttributeError:
+ pass
+
+ try:
+ data_types_reverse[cx_Oracle.UNICODE] = 'CharField'
+ except AttributeError:
+ pass
+
+ def get_field_type(self, data_type, description):
+ # If it's a NUMBER with scale == 0, consider it an IntegerField
+ if data_type == cx_Oracle.NUMBER:
+ precision, scale = description[4:6]
+ if scale == 0:
+ if precision > 11:
+ return 'BigIntegerField'
+ elif precision == 1:
+ return 'BooleanField'
+ else:
+ return 'IntegerField'
+ elif scale == -127:
+ return 'FloatField'
+
+ return super(DatabaseIntrospection, self).get_field_type(data_type, description)
+
+ def get_table_list(self, cursor):
+ "Returns a list of table names in the current database."
+ cursor.execute("SELECT TABLE_NAME FROM USER_TABLES")
+ return [row[0].lower() for row in cursor.fetchall()]
+
+ def get_table_description(self, cursor, table_name):
+ "Returns a description of the table, with the DB-API cursor.description interface."
+ cursor.execute("SELECT * FROM %s WHERE ROWNUM < 2" % self.connection.ops.quote_name(table_name))
+ description = []
+ for desc in cursor.description:
+ name = force_text(desc[0]) # cx_Oracle always returns a 'str' on both Python 2 and 3
+ name = name % {} # cx_Oracle, for some reason, doubles percent signs.
+ description.append(FieldInfo(*(name.lower(),) + desc[1:]))
+ return description
+
+ def table_name_converter(self, name):
+ "Table name comparison is case insensitive under Oracle"
+ return name.lower()
+
+ def _name_to_index(self, cursor, table_name):
+ """
+ Returns a dictionary of {field_name: field_index} for the given table.
+ Indexes are 0-based.
+ """
+ return dict([(d[0], i) for i, d in enumerate(self.get_table_description(cursor, table_name))])
+
+ def get_relations(self, cursor, table_name):
+ """
+ Returns a dictionary of {field_index: (field_index_other_table, other_table)}
+ representing all relationships to the given table. Indexes are 0-based.
+ """
+ table_name = table_name.upper()
+ cursor.execute("""
+ SELECT ta.column_id - 1, tb.table_name, tb.column_id - 1
+ FROM user_constraints, USER_CONS_COLUMNS ca, USER_CONS_COLUMNS cb,
+ user_tab_cols ta, user_tab_cols tb
+ WHERE user_constraints.table_name = %s AND
+ ta.table_name = user_constraints.table_name AND
+ ta.column_name = ca.column_name AND
+ ca.table_name = ta.table_name AND
+ user_constraints.constraint_name = ca.constraint_name AND
+ user_constraints.r_constraint_name = cb.constraint_name AND
+ cb.table_name = tb.table_name AND
+ cb.column_name = tb.column_name AND
+ ca.position = cb.position""", [table_name])
+
+ relations = {}
+ for row in cursor.fetchall():
+ relations[row[0]] = (row[2], row[1].lower())
+ return relations
+
+ def get_key_columns(self, cursor, table_name):
+ cursor.execute("""
+ SELECT ccol.column_name, rcol.table_name AS referenced_table, rcol.column_name AS referenced_column
+ FROM user_constraints c
+ JOIN user_cons_columns ccol
+ ON ccol.constraint_name = c.constraint_name
+ JOIN user_cons_columns rcol
+ ON rcol.constraint_name = c.r_constraint_name
+ WHERE c.table_name = %s AND c.constraint_type = 'R'""" , [table_name.upper()])
+ return [tuple(cell.lower() for cell in row)
+ for row in cursor.fetchall()]
+
+ def get_indexes(self, cursor, table_name):
+ sql = """
+ SELECT LOWER(uic1.column_name) AS column_name,
+ CASE user_constraints.constraint_type
+ WHEN 'P' THEN 1 ELSE 0
+ END AS is_primary_key,
+ CASE user_indexes.uniqueness
+ WHEN 'UNIQUE' THEN 1 ELSE 0
+ END AS is_unique
+ FROM user_constraints, user_indexes, user_ind_columns uic1
+ WHERE user_constraints.constraint_type (+) = 'P'
+ AND user_constraints.index_name (+) = uic1.index_name
+ AND user_indexes.uniqueness (+) = 'UNIQUE'
+ AND user_indexes.index_name (+) = uic1.index_name
+ AND uic1.table_name = UPPER(%s)
+ AND uic1.column_position = 1
+ AND NOT EXISTS (
+ SELECT 1
+ FROM user_ind_columns uic2
+ WHERE uic2.index_name = uic1.index_name
+ AND uic2.column_position = 2
+ )
+ """
+ cursor.execute(sql, [table_name])
+ indexes = {}
+ for row in cursor.fetchall():
+ indexes[row[0]] = {'primary_key': bool(row[1]),
+ 'unique': bool(row[2])}
+ return indexes
diff --git a/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/__init__.py b/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/__init__.py
diff --git a/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/base.py b/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/base.py
new file mode 100644
index 0000000..9aa8b47
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/base.py
@@ -0,0 +1,184 @@
+"""
+PostgreSQL database backend for Django.
+
+Requires psycopg 2: http://initd.org/projects/psycopg2
+"""
+
+import sys
+
+from django.db.backends import *
+from django.db.backends.postgresql_psycopg2.operations import DatabaseOperations
+from django.db.backends.postgresql_psycopg2.client import DatabaseClient
+from django.db.backends.postgresql_psycopg2.creation import DatabaseCreation
+from django.db.backends.postgresql_psycopg2.version import get_version
+from django.db.backends.postgresql_psycopg2.introspection import DatabaseIntrospection
+from django.utils.encoding import force_str
+from django.utils.functional import cached_property
+from django.utils.safestring import SafeText, SafeBytes
+from django.utils.timezone import utc
+
+try:
+ import psycopg2 as Database
+ import psycopg2.extensions
+except ImportError as e:
+ from django.core.exceptions import ImproperlyConfigured
+ raise ImproperlyConfigured("Error loading psycopg2 module: %s" % e)
+
+DatabaseError = Database.DatabaseError
+IntegrityError = Database.IntegrityError
+
+psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
+psycopg2.extensions.register_adapter(SafeBytes, psycopg2.extensions.QuotedString)
+psycopg2.extensions.register_adapter(SafeText, psycopg2.extensions.QuotedString)
+
+def utc_tzinfo_factory(offset):
+ if offset != 0:
+ raise AssertionError("database connection isn't set to UTC")
+ return utc
+
+class DatabaseFeatures(BaseDatabaseFeatures):
+ needs_datetime_string_cast = False
+ can_return_id_from_insert = True
+ requires_rollback_on_dirty_transaction = True
+ has_real_datatype = True
+ can_defer_constraint_checks = True
+ has_select_for_update = True
+ has_select_for_update_nowait = True
+ has_bulk_insert = True
+ uses_savepoints = True
+ supports_tablespaces = True
+ supports_transactions = True
+ can_distinct_on_fields = True
+
+class DatabaseWrapper(BaseDatabaseWrapper):
+ vendor = 'postgresql'
+ operators = {
+ 'exact': '= %s',
+ 'iexact': '= UPPER(%s)',
+ 'contains': 'LIKE %s',
+ 'icontains': 'LIKE UPPER(%s)',
+ 'regex': '~ %s',
+ 'iregex': '~* %s',
+ 'gt': '> %s',
+ 'gte': '>= %s',
+ 'lt': '< %s',
+ 'lte': '<= %s',
+ 'startswith': 'LIKE %s',
+ 'endswith': 'LIKE %s',
+ 'istartswith': 'LIKE UPPER(%s)',
+ 'iendswith': 'LIKE UPPER(%s)',
+ }
+
+ Database = Database
+
+ def __init__(self, *args, **kwargs):
+ super(DatabaseWrapper, self).__init__(*args, **kwargs)
+
+ opts = self.settings_dict["OPTIONS"]
+ RC = psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED
+ self.isolation_level = opts.get('isolation_level', RC)
+
+ self.features = DatabaseFeatures(self)
+ self.ops = DatabaseOperations(self)
+ self.client = DatabaseClient(self)
+ self.creation = DatabaseCreation(self)
+ self.introspection = DatabaseIntrospection(self)
+ self.validation = BaseDatabaseValidation(self)
+
+ def get_connection_params(self):
+ settings_dict = self.settings_dict
+ if not settings_dict['NAME']:
+ from django.core.exceptions import ImproperlyConfigured
+ raise ImproperlyConfigured(
+ "settings.DATABASES is improperly configured. "
+ "Please supply the NAME value.")
+ conn_params = {
+ 'database': settings_dict['NAME'],
+ }
+ conn_params.update(settings_dict['OPTIONS'])
+ if 'autocommit' in conn_params:
+ del conn_params['autocommit']
+ if 'isolation_level' in conn_params:
+ del conn_params['isolation_level']
+ if settings_dict['USER']:
+ conn_params['user'] = settings_dict['USER']
+ if settings_dict['PASSWORD']:
+ conn_params['password'] = force_str(settings_dict['PASSWORD'])
+ if settings_dict['HOST']:
+ conn_params['host'] = settings_dict['HOST']
+ if settings_dict['PORT']:
+ conn_params['port'] = settings_dict['PORT']
+ return conn_params
+
+ def get_new_connection(self, conn_params):
+ return Database.connect(**conn_params)
+
+ def init_connection_state(self):
+ settings_dict = self.settings_dict
+ self.connection.set_client_encoding('UTF8')
+ tz = 'UTC' if settings.USE_TZ else settings_dict.get('TIME_ZONE')
+ if tz:
+ try:
+ get_parameter_status = self.connection.get_parameter_status
+ except AttributeError:
+ # psycopg2 < 2.0.12 doesn't have get_parameter_status
+ conn_tz = None
+ else:
+ conn_tz = get_parameter_status('TimeZone')
+
+ if conn_tz != tz:
+ self.connection.cursor().execute(
+ self.ops.set_time_zone_sql(), [tz])
+ # Commit after setting the time zone (see #17062)
+ self.connection.commit()
+ self.connection.set_isolation_level(self.isolation_level)
+
+ def create_cursor(self):
+ cursor = self.connection.cursor()
+ cursor.tzinfo_factory = utc_tzinfo_factory if settings.USE_TZ else None
+ return cursor
+
+ def _set_isolation_level(self, isolation_level):
+ assert isolation_level in range(1, 5) # Use set_autocommit for level = 0
+ if self.psycopg2_version >= (2, 4, 2):
+ self.connection.set_session(isolation_level=isolation_level)
+ else:
+ self.connection.set_isolation_level(isolation_level)
+
+ def _set_autocommit(self, autocommit):
+ with self.wrap_database_errors:
+ if self.psycopg2_version >= (2, 4, 2):
+ self.connection.autocommit = autocommit
+ else:
+ if autocommit:
+ level = psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT
+ else:
+ level = self.isolation_level
+ self.connection.set_isolation_level(level)
+
+ def check_constraints(self, table_names=None):
+ """
+ To check constraints, we set constraints to immediate. Then, when, we're done we must ensure they
+ are returned to deferred.
+ """
+ self.cursor().execute('SET CONSTRAINTS ALL IMMEDIATE')
+ self.cursor().execute('SET CONSTRAINTS ALL DEFERRED')
+
+ def is_usable(self):
+ try:
+ # Use a psycopg cursor directly, bypassing Django's utilities.
+ self.connection.cursor().execute("SELECT 1")
+ except Database.Error:
+ return False
+ else:
+ return True
+
+ @cached_property
+ def psycopg2_version(self):
+ version = psycopg2.__version__.split(' ', 1)[0]
+ return tuple(int(v) for v in version.split('.'))
+
+ @cached_property
+ def pg_version(self):
+ with self.temporary_connection():
+ return get_version(self.connection)
diff --git a/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/client.py b/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/client.py
new file mode 100644
index 0000000..a5c0296
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/client.py
@@ -0,0 +1,23 @@
+import os
+import sys
+
+from django.db.backends import BaseDatabaseClient
+
+class DatabaseClient(BaseDatabaseClient):
+ executable_name = 'psql'
+
+ def runshell(self):
+ settings_dict = self.connection.settings_dict
+ args = [self.executable_name]
+ if settings_dict['USER']:
+ args += ["-U", settings_dict['USER']]
+ if settings_dict['HOST']:
+ args.extend(["-h", settings_dict['HOST']])
+ if settings_dict['PORT']:
+ args.extend(["-p", str(settings_dict['PORT'])])
+ args += [settings_dict['NAME']]
+ if os.name == 'nt':
+ sys.exit(os.system(" ".join(args)))
+ else:
+ os.execvp(self.executable_name, args)
+
diff --git a/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/creation.py b/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/creation.py
new file mode 100644
index 0000000..d4260e0
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/creation.py
@@ -0,0 +1,77 @@
+from django.db.backends.creation import BaseDatabaseCreation
+from django.db.backends.util import truncate_name
+
+
+class DatabaseCreation(BaseDatabaseCreation):
+ # This dictionary maps Field objects to their associated PostgreSQL column
+ # types, as strings. Column-type strings can contain format strings; they'll
+ # be interpolated against the values of Field.__dict__ before being output.
+ # If a column type is set to None, it won't be included in the output.
+ data_types = {
+ 'AutoField': 'serial',
+ 'BinaryField': 'bytea',
+ 'BooleanField': 'boolean',
+ 'CharField': 'varchar(%(max_length)s)',
+ 'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
+ 'DateField': 'date',
+ 'DateTimeField': 'timestamp with time zone',
+ 'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
+ 'FileField': 'varchar(%(max_length)s)',
+ 'FilePathField': 'varchar(%(max_length)s)',
+ 'FloatField': 'double precision',
+ 'IntegerField': 'integer',
+ 'BigIntegerField': 'bigint',
+ 'IPAddressField': 'inet',
+ 'GenericIPAddressField': 'inet',
+ 'NullBooleanField': 'boolean',
+ 'OneToOneField': 'integer',
+ 'PositiveIntegerField': 'integer CHECK ("%(column)s" >= 0)',
+ 'PositiveSmallIntegerField': 'smallint CHECK ("%(column)s" >= 0)',
+ 'SlugField': 'varchar(%(max_length)s)',
+ 'SmallIntegerField': 'smallint',
+ 'TextField': 'text',
+ 'TimeField': 'time',
+ }
+
+ def sql_table_creation_suffix(self):
+ assert self.connection.settings_dict['TEST_COLLATION'] is None, "PostgreSQL does not support collation setting at database creation time."
+ if self.connection.settings_dict['TEST_CHARSET']:
+ return "WITH ENCODING '%s'" % self.connection.settings_dict['TEST_CHARSET']
+ return ''
+
+ def sql_indexes_for_field(self, model, f, style):
+ output = []
+ if f.db_index or f.unique:
+ qn = self.connection.ops.quote_name
+ db_table = model._meta.db_table
+ tablespace = f.db_tablespace or model._meta.db_tablespace
+ if tablespace:
+ tablespace_sql = self.connection.ops.tablespace_sql(tablespace)
+ if tablespace_sql:
+ tablespace_sql = ' ' + tablespace_sql
+ else:
+ tablespace_sql = ''
+
+ def get_index_sql(index_name, opclass=''):
+ return (style.SQL_KEYWORD('CREATE INDEX') + ' ' +
+ style.SQL_TABLE(qn(truncate_name(index_name,self.connection.ops.max_name_length()))) + ' ' +
+ style.SQL_KEYWORD('ON') + ' ' +
+ style.SQL_TABLE(qn(db_table)) + ' ' +
+ "(%s%s)" % (style.SQL_FIELD(qn(f.column)), opclass) +
+ "%s;" % tablespace_sql)
+
+ if not f.unique:
+ output = [get_index_sql('%s_%s' % (db_table, f.column))]
+
+ # Fields with database column types of `varchar` and `text` need
+ # a second index that specifies their operator class, which is
+ # needed when performing correct LIKE queries outside the
+ # C locale. See #12234.
+ db_type = f.db_type(connection=self.connection)
+ if db_type.startswith('varchar'):
+ output.append(get_index_sql('%s_%s_like' % (db_table, f.column),
+ ' varchar_pattern_ops'))
+ elif db_type.startswith('text'):
+ output.append(get_index_sql('%s_%s_like' % (db_table, f.column),
+ ' text_pattern_ops'))
+ return output
diff --git a/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/introspection.py b/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/introspection.py
new file mode 100644
index 0000000..ea4e3e1
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/introspection.py
@@ -0,0 +1,111 @@
+from __future__ import unicode_literals
+
+from django.db.backends import BaseDatabaseIntrospection, FieldInfo
+from django.utils.encoding import force_text
+
+
+class DatabaseIntrospection(BaseDatabaseIntrospection):
+ # Maps type codes to Django Field types.
+ data_types_reverse = {
+ 16: 'BooleanField',
+ 17: 'BinaryField',
+ 20: 'BigIntegerField',
+ 21: 'SmallIntegerField',
+ 23: 'IntegerField',
+ 25: 'TextField',
+ 700: 'FloatField',
+ 701: 'FloatField',
+ 869: 'GenericIPAddressField',
+ 1042: 'CharField', # blank-padded
+ 1043: 'CharField',
+ 1082: 'DateField',
+ 1083: 'TimeField',
+ 1114: 'DateTimeField',
+ 1184: 'DateTimeField',
+ 1266: 'TimeField',
+ 1700: 'DecimalField',
+ }
+
+ ignored_tables = []
+
+ def get_table_list(self, cursor):
+ "Returns a list of table names in the current database."
+ cursor.execute("""
+ SELECT c.relname
+ FROM pg_catalog.pg_class c
+ LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
+ WHERE c.relkind IN ('r', 'v', '')
+ AND n.nspname NOT IN ('pg_catalog', 'pg_toast')
+ AND pg_catalog.pg_table_is_visible(c.oid)""")
+ return [row[0] for row in cursor.fetchall() if row[0] not in self.ignored_tables]
+
+ def get_table_description(self, cursor, table_name):
+ "Returns a description of the table, with the DB-API cursor.description interface."
+ # As cursor.description does not return reliably the nullable property,
+ # we have to query the information_schema (#7783)
+ cursor.execute("""
+ SELECT column_name, is_nullable
+ FROM information_schema.columns
+ WHERE table_name = %s""", [table_name])
+ null_map = dict(cursor.fetchall())
+ cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name))
+ return [FieldInfo(*((force_text(line[0]),) + line[1:6] + (null_map[force_text(line[0])]=='YES',)))
+ for line in cursor.description]
+
+ def get_relations(self, cursor, table_name):
+ """
+ Returns a dictionary of {field_index: (field_index_other_table, other_table)}
+ representing all relationships to the given table. Indexes are 0-based.
+ """
+ cursor.execute("""
+ SELECT con.conkey, con.confkey, c2.relname
+ FROM pg_constraint con, pg_class c1, pg_class c2
+ WHERE c1.oid = con.conrelid
+ AND c2.oid = con.confrelid
+ AND c1.relname = %s
+ AND con.contype = 'f'""", [table_name])
+ relations = {}
+ for row in cursor.fetchall():
+ # row[0] and row[1] are single-item lists, so grab the single item.
+ relations[row[0][0] - 1] = (row[1][0] - 1, row[2])
+ return relations
+
+ def get_key_columns(self, cursor, table_name):
+ key_columns = []
+ cursor.execute("""
+ SELECT kcu.column_name, ccu.table_name AS referenced_table, ccu.column_name AS referenced_column
+ FROM information_schema.constraint_column_usage ccu
+ LEFT JOIN information_schema.key_column_usage kcu
+ ON ccu.constraint_catalog = kcu.constraint_catalog
+ AND ccu.constraint_schema = kcu.constraint_schema
+ AND ccu.constraint_name = kcu.constraint_name
+ LEFT JOIN information_schema.table_constraints tc
+ ON ccu.constraint_catalog = tc.constraint_catalog
+ AND ccu.constraint_schema = tc.constraint_schema
+ AND ccu.constraint_name = tc.constraint_name
+ WHERE kcu.table_name = %s AND tc.constraint_type = 'FOREIGN KEY'""" , [table_name])
+ key_columns.extend(cursor.fetchall())
+ return key_columns
+
+ def get_indexes(self, cursor, table_name):
+ # This query retrieves each index on the given table, including the
+ # first associated field name
+ cursor.execute("""
+ SELECT attr.attname, idx.indkey, idx.indisunique, idx.indisprimary
+ FROM pg_catalog.pg_class c, pg_catalog.pg_class c2,
+ pg_catalog.pg_index idx, pg_catalog.pg_attribute attr
+ WHERE c.oid = idx.indrelid
+ AND idx.indexrelid = c2.oid
+ AND attr.attrelid = c.oid
+ AND attr.attnum = idx.indkey[0]
+ AND c.relname = %s""", [table_name])
+ indexes = {}
+ for row in cursor.fetchall():
+ # row[1] (idx.indkey) is stored in the DB as an array. It comes out as
+ # a string of space-separated integers. This designates the field
+ # indexes (1-based) of the fields that have indexes on the table.
+ # Here, we skip any indexes across multiple fields.
+ if ' ' in row[1]:
+ continue
+ indexes[row[0]] = {'primary_key': row[3], 'unique': row[2]}
+ return indexes
diff --git a/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/operations.py b/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/operations.py
new file mode 100644
index 0000000..c5aab84
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/operations.py
@@ -0,0 +1,222 @@
+from __future__ import unicode_literals
+
+from django.conf import settings
+from django.db.backends import BaseDatabaseOperations
+
+
+class DatabaseOperations(BaseDatabaseOperations):
+ def __init__(self, connection):
+ super(DatabaseOperations, self).__init__(connection)
+
+ def date_extract_sql(self, lookup_type, field_name):
+ # http://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-EXTRACT
+ if lookup_type == 'week_day':
+ # For consistency across backends, we return Sunday=1, Saturday=7.
+ return "EXTRACT('dow' FROM %s) + 1" % field_name
+ else:
+ return "EXTRACT('%s' FROM %s)" % (lookup_type, field_name)
+
+ def date_interval_sql(self, sql, connector, timedelta):
+ """
+ implements the interval functionality for expressions
+ format for Postgres:
+ (datefield + interval '3 days 200 seconds 5 microseconds')
+ """
+ modifiers = []
+ if timedelta.days:
+ modifiers.append('%s days' % timedelta.days)
+ if timedelta.seconds:
+ modifiers.append('%s seconds' % timedelta.seconds)
+ if timedelta.microseconds:
+ modifiers.append('%s microseconds' % timedelta.microseconds)
+ mods = ' '.join(modifiers)
+ conn = ' %s ' % connector
+ return '(%s)' % conn.join([sql, 'interval \'%s\'' % mods])
+
+ def date_trunc_sql(self, lookup_type, field_name):
+ # http://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC
+ return "DATE_TRUNC('%s', %s)" % (lookup_type, field_name)
+
+ def datetime_extract_sql(self, lookup_type, field_name, tzname):
+ if settings.USE_TZ:
+ field_name = "%s AT TIME ZONE %%s" % field_name
+ params = [tzname]
+ else:
+ params = []
+ # http://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-EXTRACT
+ if lookup_type == 'week_day':
+ # For consistency across backends, we return Sunday=1, Saturday=7.
+ sql = "EXTRACT('dow' FROM %s) + 1" % field_name
+ else:
+ sql = "EXTRACT('%s' FROM %s)" % (lookup_type, field_name)
+ return sql, params
+
+ def datetime_trunc_sql(self, lookup_type, field_name, tzname):
+ if settings.USE_TZ:
+ field_name = "%s AT TIME ZONE %%s" % field_name
+ params = [tzname]
+ else:
+ params = []
+ # http://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC
+ sql = "DATE_TRUNC('%s', %s)" % (lookup_type, field_name)
+ return sql, params
+
+ def deferrable_sql(self):
+ return " DEFERRABLE INITIALLY DEFERRED"
+
+ def lookup_cast(self, lookup_type):
+ lookup = '%s'
+
+ # Cast text lookups to text to allow things like filter(x__contains=4)
+ if lookup_type in ('iexact', 'contains', 'icontains', 'startswith',
+ 'istartswith', 'endswith', 'iendswith', 'regex', 'iregex'):
+ lookup = "%s::text"
+
+ # Use UPPER(x) for case-insensitive lookups; it's faster.
+ if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
+ lookup = 'UPPER(%s)' % lookup
+
+ return lookup
+
+ def field_cast_sql(self, db_type, internal_type):
+ if internal_type == "GenericIPAddressField" or internal_type == "IPAddressField":
+ return 'HOST(%s)'
+ return '%s'
+
+ def last_insert_id(self, cursor, table_name, pk_name):
+ # Use pg_get_serial_sequence to get the underlying sequence name
+ # from the table name and column name (available since PostgreSQL 8)
+ cursor.execute("SELECT CURRVAL(pg_get_serial_sequence('%s','%s'))" % (
+ self.quote_name(table_name), pk_name))
+ return cursor.fetchone()[0]
+
+ def no_limit_value(self):
+ return None
+
+ def quote_name(self, name):
+ if name.startswith('"') and name.endswith('"'):
+ return name # Quoting once is enough.
+ return '"%s"' % name
+
+ def set_time_zone_sql(self):
+ return "SET TIME ZONE %s"
+
+ def sql_flush(self, style, tables, sequences, allow_cascade=False):
+ if tables:
+ # Perform a single SQL 'TRUNCATE x, y, z...;' statement. It allows
+ # us to truncate tables referenced by a foreign key in any other
+ # table.
+ tables_sql = ', '.join(
+ style.SQL_FIELD(self.quote_name(table)) for table in tables)
+ if allow_cascade:
+ sql = ['%s %s %s;' % (
+ style.SQL_KEYWORD('TRUNCATE'),
+ tables_sql,
+ style.SQL_KEYWORD('CASCADE'),
+ )]
+ else:
+ sql = ['%s %s;' % (
+ style.SQL_KEYWORD('TRUNCATE'),
+ tables_sql,
+ )]
+ sql.extend(self.sequence_reset_by_name_sql(style, sequences))
+ return sql
+ else:
+ return []
+
+ def sequence_reset_by_name_sql(self, style, sequences):
+ # 'ALTER SEQUENCE sequence_name RESTART WITH 1;'... style SQL statements
+ # to reset sequence indices
+ sql = []
+ for sequence_info in sequences:
+ table_name = sequence_info['table']
+ column_name = sequence_info['column']
+ if not (column_name and len(column_name) > 0):
+ # This will be the case if it's an m2m using an autogenerated
+ # intermediate table (see BaseDatabaseIntrospection.sequence_list)
+ column_name = 'id'
+ sql.append("%s setval(pg_get_serial_sequence('%s','%s'), 1, false);" % \
+ (style.SQL_KEYWORD('SELECT'),
+ style.SQL_TABLE(self.quote_name(table_name)),
+ style.SQL_FIELD(column_name))
+ )
+ return sql
+
+ def tablespace_sql(self, tablespace, inline=False):
+ if inline:
+ return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace)
+ else:
+ return "TABLESPACE %s" % self.quote_name(tablespace)
+
+ def sequence_reset_sql(self, style, model_list):
+ from django.db import models
+ output = []
+ qn = self.quote_name
+ for model in model_list:
+ # Use `coalesce` to set the sequence for each model to the max pk value if there are records,
+ # or 1 if there are none. Set the `is_called` property (the third argument to `setval`) to true
+ # if there are records (as the max pk value is already in use), otherwise set it to false.
+ # Use pg_get_serial_sequence to get the underlying sequence name from the table name
+ # and column name (available since PostgreSQL 8)
+
+ for f in model._meta.local_fields:
+ if isinstance(f, models.AutoField):
+ output.append("%s setval(pg_get_serial_sequence('%s','%s'), coalesce(max(%s), 1), max(%s) %s null) %s %s;" % \
+ (style.SQL_KEYWORD('SELECT'),
+ style.SQL_TABLE(qn(model._meta.db_table)),
+ style.SQL_FIELD(f.column),
+ style.SQL_FIELD(qn(f.column)),
+ style.SQL_FIELD(qn(f.column)),
+ style.SQL_KEYWORD('IS NOT'),
+ style.SQL_KEYWORD('FROM'),
+ style.SQL_TABLE(qn(model._meta.db_table))))
+ break # Only one AutoField is allowed per model, so don't bother continuing.
+ for f in model._meta.many_to_many:
+ if not f.rel.through:
+ output.append("%s setval(pg_get_serial_sequence('%s','%s'), coalesce(max(%s), 1), max(%s) %s null) %s %s;" % \
+ (style.SQL_KEYWORD('SELECT'),
+ style.SQL_TABLE(qn(f.m2m_db_table())),
+ style.SQL_FIELD('id'),
+ style.SQL_FIELD(qn('id')),
+ style.SQL_FIELD(qn('id')),
+ style.SQL_KEYWORD('IS NOT'),
+ style.SQL_KEYWORD('FROM'),
+ style.SQL_TABLE(qn(f.m2m_db_table()))))
+ return output
+
+ def prep_for_iexact_query(self, x):
+ return x
+
+ def max_name_length(self):
+ """
+ Returns the maximum length of an identifier.
+
+ Note that the maximum length of an identifier is 63 by default, but can
+ be changed by recompiling PostgreSQL after editing the NAMEDATALEN
+ macro in src/include/pg_config_manual.h .
+
+ This implementation simply returns 63, but can easily be overridden by a
+ custom database backend that inherits most of its behavior from this one.
+ """
+
+ return 63
+
+ def distinct_sql(self, fields):
+ if fields:
+ return 'DISTINCT ON (%s)' % ', '.join(fields)
+ else:
+ return 'DISTINCT'
+
+ def last_executed_query(self, cursor, sql, params):
+ # http://initd.org/psycopg/docs/cursor.html#cursor.query
+ # The query attribute is a Psycopg extension to the DB API 2.0.
+ if cursor.query is not None:
+ return cursor.query.decode('utf-8')
+ return None
+
+ def return_insert_id(self):
+ return "RETURNING %s", ()
+
+ def bulk_insert_sql(self, fields, num_values):
+ items_sql = "(%s)" % ", ".join(["%s"] * len(fields))
+ return "VALUES " + ", ".join([items_sql] * num_values)
diff --git a/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/version.py b/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/version.py
new file mode 100644
index 0000000..8ef5167
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/version.py
@@ -0,0 +1,43 @@
+"""
+Extracts the version of the PostgreSQL server.
+"""
+
+import re
+
+# This reg-exp is intentionally fairly flexible here.
+# Needs to be able to handle stuff like:
+# PostgreSQL 8.3.6
+# EnterpriseDB 8.3
+# PostgreSQL 8.3 beta4
+# PostgreSQL 8.4beta1
+VERSION_RE = re.compile(r'\S+ (\d+)\.(\d+)\.?(\d+)?')
+
+
+def _parse_version(text):
+ "Internal parsing method. Factored out for testing purposes."
+ major, major2, minor = VERSION_RE.search(text).groups()
+ try:
+ return int(major) * 10000 + int(major2) * 100 + int(minor)
+ except (ValueError, TypeError):
+ return int(major) * 10000 + int(major2) * 100
+
+def get_version(connection):
+ """
+ Returns an integer representing the major, minor and revision number of the
+ server. Format is the one used for the return value of libpq
+ PQServerVersion()/``server_version`` connection attribute (available in
+ newer psycopg2 versions.)
+
+ For example, 80304 for 8.3.4. The last two digits will be 00 in the case of
+ releases (e.g., 80400 for 'PostgreSQL 8.4') or in the case of beta and
+ prereleases (e.g. 90100 for 'PostgreSQL 9.1beta2').
+
+ PQServerVersion()/``server_version`` doesn't execute a query so try that
+ first, then fallback to a ``SELECT version()`` query.
+ """
+ if hasattr(connection, 'server_version'):
+ return connection.server_version
+ else:
+ cursor = connection.cursor()
+ cursor.execute("SELECT version()")
+ return _parse_version(cursor.fetchone()[0])
diff --git a/lib/python2.7/site-packages/django/db/backends/signals.py b/lib/python2.7/site-packages/django/db/backends/signals.py
new file mode 100644
index 0000000..c16a63f
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/signals.py
@@ -0,0 +1,3 @@
+from django.dispatch import Signal
+
+connection_created = Signal(providing_args=["connection"])
diff --git a/lib/python2.7/site-packages/django/db/backends/sqlite3/__init__.py b/lib/python2.7/site-packages/django/db/backends/sqlite3/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/sqlite3/__init__.py
diff --git a/lib/python2.7/site-packages/django/db/backends/sqlite3/base.py b/lib/python2.7/site-packages/django/db/backends/sqlite3/base.py
new file mode 100644
index 0000000..a219178
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/sqlite3/base.py
@@ -0,0 +1,533 @@
+"""
+SQLite3 backend for django.
+
+Works with either the pysqlite2 module or the sqlite3 module in the
+standard library.
+"""
+from __future__ import unicode_literals
+
+import datetime
+import decimal
+import warnings
+import re
+
+from django.db import utils
+from django.db.backends import *
+from django.db.backends.sqlite3.client import DatabaseClient
+from django.db.backends.sqlite3.creation import DatabaseCreation
+from django.db.backends.sqlite3.introspection import DatabaseIntrospection
+from django.db.models import fields
+from django.db.models.sql import aggregates
+from django.utils.dateparse import parse_date, parse_datetime, parse_time
+from django.utils.encoding import force_text
+from django.utils.functional import cached_property
+from django.utils.safestring import SafeBytes
+from django.utils import six
+from django.utils import timezone
+
+try:
+ try:
+ from pysqlite2 import dbapi2 as Database
+ except ImportError:
+ from sqlite3 import dbapi2 as Database
+except ImportError as exc:
+ from django.core.exceptions import ImproperlyConfigured
+ raise ImproperlyConfigured("Error loading either pysqlite2 or sqlite3 modules (tried in that order): %s" % exc)
+
+try:
+ import pytz
+except ImportError:
+ pytz = None
+
+DatabaseError = Database.DatabaseError
+IntegrityError = Database.IntegrityError
+
+def parse_datetime_with_timezone_support(value):
+ dt = parse_datetime(value)
+ # Confirm that dt is naive before overwriting its tzinfo.
+ if dt is not None and settings.USE_TZ and timezone.is_naive(dt):
+ dt = dt.replace(tzinfo=timezone.utc)
+ return dt
+
+def adapt_datetime_with_timezone_support(value):
+ # Equivalent to DateTimeField.get_db_prep_value. Used only by raw SQL.
+ if settings.USE_TZ:
+ if timezone.is_naive(value):
+ warnings.warn("SQLite received a naive datetime (%s)"
+ " while time zone support is active." % value,
+ RuntimeWarning)
+ default_timezone = timezone.get_default_timezone()
+ value = timezone.make_aware(value, default_timezone)
+ value = value.astimezone(timezone.utc).replace(tzinfo=None)
+ return value.isoformat(str(" "))
+
+def decoder(conv_func):
+ """ The Python sqlite3 interface returns always byte strings.
+ This function converts the received value to a regular string before
+ passing it to the receiver function.
+ """
+ return lambda s: conv_func(s.decode('utf-8'))
+
+Database.register_converter(str("bool"), decoder(lambda s: s == '1'))
+Database.register_converter(str("time"), decoder(parse_time))
+Database.register_converter(str("date"), decoder(parse_date))
+Database.register_converter(str("datetime"), decoder(parse_datetime_with_timezone_support))
+Database.register_converter(str("timestamp"), decoder(parse_datetime_with_timezone_support))
+Database.register_converter(str("TIMESTAMP"), decoder(parse_datetime_with_timezone_support))
+Database.register_converter(str("decimal"), decoder(util.typecast_decimal))
+
+Database.register_adapter(datetime.datetime, adapt_datetime_with_timezone_support)
+Database.register_adapter(decimal.Decimal, util.rev_typecast_decimal)
+if six.PY2 and Database.version_info >= (2, 4, 1):
+ # Starting in 2.4.1, the str type is not accepted anymore, therefore,
+ # we convert all str objects to Unicode
+ # As registering a adapter for a primitive type causes a small
+ # slow-down, this adapter is only registered for sqlite3 versions
+ # needing it (Python 2.6 and up).
+ Database.register_adapter(str, lambda s: s.decode('utf-8'))
+ Database.register_adapter(SafeBytes, lambda s: s.decode('utf-8'))
+
+class DatabaseFeatures(BaseDatabaseFeatures):
+ # SQLite cannot handle us only partially reading from a cursor's result set
+ # and then writing the same rows to the database in another cursor. This
+ # setting ensures we always read result sets fully into memory all in one
+ # go.
+ can_use_chunked_reads = False
+ test_db_allows_multiple_connections = False
+ supports_unspecified_pk = True
+ supports_timezones = False
+ supports_1000_query_parameters = False
+ supports_mixed_date_datetime_comparisons = False
+ has_bulk_insert = True
+ can_combine_inserts_with_and_without_auto_increment_pk = False
+ autocommits_when_autocommit_is_off = True
+ atomic_transactions = False
+ supports_paramstyle_pyformat = False
+
+ @cached_property
+ def uses_savepoints(self):
+ return Database.sqlite_version_info >= (3, 6, 8)
+
+ @cached_property
+ def supports_stddev(self):
+ """Confirm support for STDDEV and related stats functions
+
+ SQLite supports STDDEV as an extension package; so
+ connection.ops.check_aggregate_support() can't unilaterally
+ rule out support for STDDEV. We need to manually check
+ whether the call works.
+ """
+ cursor = self.connection.cursor()
+ cursor.execute('CREATE TABLE STDDEV_TEST (X INT)')
+ try:
+ cursor.execute('SELECT STDDEV(*) FROM STDDEV_TEST')
+ has_support = True
+ except utils.DatabaseError:
+ has_support = False
+ cursor.execute('DROP TABLE STDDEV_TEST')
+ return has_support
+
+ @cached_property
+ def has_zoneinfo_database(self):
+ return pytz is not None
+
+class DatabaseOperations(BaseDatabaseOperations):
+ def bulk_batch_size(self, fields, objs):
+ """
+ SQLite has a compile-time default (SQLITE_LIMIT_VARIABLE_NUMBER) of
+ 999 variables per query.
+
+ If there is just single field to insert, then we can hit another
+ limit, SQLITE_MAX_COMPOUND_SELECT which defaults to 500.
+ """
+ limit = 999 if len(fields) > 1 else 500
+ return (limit // len(fields)) if len(fields) > 0 else len(objs)
+
+ def check_aggregate_support(self, aggregate):
+ bad_fields = (fields.DateField, fields.DateTimeField, fields.TimeField)
+ bad_aggregates = (aggregates.Sum, aggregates.Avg,
+ aggregates.Variance, aggregates.StdDev)
+ if (isinstance(aggregate.source, bad_fields) and
+ isinstance(aggregate, bad_aggregates)):
+ raise NotImplementedError(
+ 'You cannot use Sum, Avg, StdDev and Variance aggregations '
+ 'on date/time fields in sqlite3 '
+ 'since date/time is saved as text.')
+
+ def date_extract_sql(self, lookup_type, field_name):
+ # sqlite doesn't support extract, so we fake it with the user-defined
+ # function django_date_extract that's registered in connect(). Note that
+ # single quotes are used because this is a string (and could otherwise
+ # cause a collision with a field name).
+ return "django_date_extract('%s', %s)" % (lookup_type.lower(), field_name)
+
+ def date_interval_sql(self, sql, connector, timedelta):
+ # It would be more straightforward if we could use the sqlite strftime
+ # function, but it does not allow for keeping six digits of fractional
+ # second information, nor does it allow for formatting date and datetime
+ # values differently. So instead we register our own function that
+ # formats the datetime combined with the delta in a manner suitable
+ # for comparisons.
+ return 'django_format_dtdelta(%s, "%s", "%d", "%d", "%d")' % (sql,
+ connector, timedelta.days, timedelta.seconds, timedelta.microseconds)
+
+ def date_trunc_sql(self, lookup_type, field_name):
+ # sqlite doesn't support DATE_TRUNC, so we fake it with a user-defined
+ # function django_date_trunc that's registered in connect(). Note that
+ # single quotes are used because this is a string (and could otherwise
+ # cause a collision with a field name).
+ return "django_date_trunc('%s', %s)" % (lookup_type.lower(), field_name)
+
+ def datetime_extract_sql(self, lookup_type, field_name, tzname):
+ # Same comment as in date_extract_sql.
+ if settings.USE_TZ:
+ if pytz is None:
+ from django.core.exceptions import ImproperlyConfigured
+ raise ImproperlyConfigured("This query requires pytz, "
+ "but it isn't installed.")
+ return "django_datetime_extract('%s', %s, %%s)" % (
+ lookup_type.lower(), field_name), [tzname]
+
+ def datetime_trunc_sql(self, lookup_type, field_name, tzname):
+ # Same comment as in date_trunc_sql.
+ if settings.USE_TZ:
+ if pytz is None:
+ from django.core.exceptions import ImproperlyConfigured
+ raise ImproperlyConfigured("This query requires pytz, "
+ "but it isn't installed.")
+ return "django_datetime_trunc('%s', %s, %%s)" % (
+ lookup_type.lower(), field_name), [tzname]
+
+ def drop_foreignkey_sql(self):
+ return ""
+
+ def pk_default_value(self):
+ return "NULL"
+
+ def quote_name(self, name):
+ if name.startswith('"') and name.endswith('"'):
+ return name # Quoting once is enough.
+ return '"%s"' % name
+
+ def no_limit_value(self):
+ return -1
+
+ def sql_flush(self, style, tables, sequences, allow_cascade=False):
+ # NB: The generated SQL below is specific to SQLite
+ # Note: The DELETE FROM... SQL generated below works for SQLite databases
+ # because constraints don't exist
+ sql = ['%s %s %s;' % (
+ style.SQL_KEYWORD('DELETE'),
+ style.SQL_KEYWORD('FROM'),
+ style.SQL_FIELD(self.quote_name(table))
+ ) for table in tables]
+ # Note: No requirement for reset of auto-incremented indices (cf. other
+ # sql_flush() implementations). Just return SQL at this point
+ return sql
+
+ def value_to_db_datetime(self, value):
+ if value is None:
+ return None
+
+ # SQLite doesn't support tz-aware datetimes
+ if timezone.is_aware(value):
+ if settings.USE_TZ:
+ value = value.astimezone(timezone.utc).replace(tzinfo=None)
+ else:
+ raise ValueError("SQLite backend does not support timezone-aware datetimes when USE_TZ is False.")
+
+ return six.text_type(value)
+
+ def value_to_db_time(self, value):
+ if value is None:
+ return None
+
+ # SQLite doesn't support tz-aware datetimes
+ if timezone.is_aware(value):
+ raise ValueError("SQLite backend does not support timezone-aware times.")
+
+ return six.text_type(value)
+
+ def convert_values(self, value, field):
+ """SQLite returns floats when it should be returning decimals,
+ and gets dates and datetimes wrong.
+ For consistency with other backends, coerce when required.
+ """
+ if value is None:
+ return None
+
+ internal_type = field.get_internal_type()
+ if internal_type == 'DecimalField':
+ return util.typecast_decimal(field.format_number(value))
+ elif internal_type and internal_type.endswith('IntegerField') or internal_type == 'AutoField':
+ return int(value)
+ elif internal_type == 'DateField':
+ return parse_date(value)
+ elif internal_type == 'DateTimeField':
+ return parse_datetime_with_timezone_support(value)
+ elif internal_type == 'TimeField':
+ return parse_time(value)
+
+ # No field, or the field isn't known to be a decimal or integer
+ return value
+
+ def bulk_insert_sql(self, fields, num_values):
+ res = []
+ res.append("SELECT %s" % ", ".join(
+ "%%s AS %s" % self.quote_name(f.column) for f in fields
+ ))
+ res.extend(["UNION ALL SELECT %s" % ", ".join(["%s"] * len(fields))] * (num_values - 1))
+ return " ".join(res)
+
+class DatabaseWrapper(BaseDatabaseWrapper):
+ vendor = 'sqlite'
+ # SQLite requires LIKE statements to include an ESCAPE clause if the value
+ # being escaped has a percent or underscore in it.
+ # See http://www.sqlite.org/lang_expr.html for an explanation.
+ operators = {
+ 'exact': '= %s',
+ 'iexact': "LIKE %s ESCAPE '\\'",
+ 'contains': "LIKE %s ESCAPE '\\'",
+ 'icontains': "LIKE %s ESCAPE '\\'",
+ 'regex': 'REGEXP %s',
+ 'iregex': "REGEXP '(?i)' || %s",
+ 'gt': '> %s',
+ 'gte': '>= %s',
+ 'lt': '< %s',
+ 'lte': '<= %s',
+ 'startswith': "LIKE %s ESCAPE '\\'",
+ 'endswith': "LIKE %s ESCAPE '\\'",
+ 'istartswith': "LIKE %s ESCAPE '\\'",
+ 'iendswith': "LIKE %s ESCAPE '\\'",
+ }
+
+ Database = Database
+
+ def __init__(self, *args, **kwargs):
+ super(DatabaseWrapper, self).__init__(*args, **kwargs)
+
+ self.features = DatabaseFeatures(self)
+ self.ops = DatabaseOperations(self)
+ self.client = DatabaseClient(self)
+ self.creation = DatabaseCreation(self)
+ self.introspection = DatabaseIntrospection(self)
+ self.validation = BaseDatabaseValidation(self)
+
+ def get_connection_params(self):
+ settings_dict = self.settings_dict
+ if not settings_dict['NAME']:
+ from django.core.exceptions import ImproperlyConfigured
+ raise ImproperlyConfigured(
+ "settings.DATABASES is improperly configured. "
+ "Please supply the NAME value.")
+ kwargs = {
+ 'database': settings_dict['NAME'],
+ 'detect_types': Database.PARSE_DECLTYPES | Database.PARSE_COLNAMES,
+ }
+ kwargs.update(settings_dict['OPTIONS'])
+ # Always allow the underlying SQLite connection to be shareable
+ # between multiple threads. The safe-guarding will be handled at a
+ # higher level by the `BaseDatabaseWrapper.allow_thread_sharing`
+ # property. This is necessary as the shareability is disabled by
+ # default in pysqlite and it cannot be changed once a connection is
+ # opened.
+ if 'check_same_thread' in kwargs and kwargs['check_same_thread']:
+ warnings.warn(
+ 'The `check_same_thread` option was provided and set to '
+ 'True. It will be overriden with False. Use the '
+ '`DatabaseWrapper.allow_thread_sharing` property instead '
+ 'for controlling thread shareability.',
+ RuntimeWarning
+ )
+ kwargs.update({'check_same_thread': False})
+ return kwargs
+
+ def get_new_connection(self, conn_params):
+ conn = Database.connect(**conn_params)
+ conn.create_function("django_date_extract", 2, _sqlite_date_extract)
+ conn.create_function("django_date_trunc", 2, _sqlite_date_trunc)
+ conn.create_function("django_datetime_extract", 3, _sqlite_datetime_extract)
+ conn.create_function("django_datetime_trunc", 3, _sqlite_datetime_trunc)
+ conn.create_function("regexp", 2, _sqlite_regexp)
+ conn.create_function("django_format_dtdelta", 5, _sqlite_format_dtdelta)
+ return conn
+
+ def init_connection_state(self):
+ pass
+
+ def create_cursor(self):
+ return self.connection.cursor(factory=SQLiteCursorWrapper)
+
+ def close(self):
+ self.validate_thread_sharing()
+ # If database is in memory, closing the connection destroys the
+ # database. To prevent accidental data loss, ignore close requests on
+ # an in-memory db.
+ if self.settings_dict['NAME'] != ":memory:":
+ BaseDatabaseWrapper.close(self)
+
+ def _savepoint_allowed(self):
+ # Two conditions are required here:
+ # - A sufficiently recent version of SQLite to support savepoints,
+ # - Being in a transaction, which can only happen inside 'atomic'.
+
+ # When 'isolation_level' is not None, sqlite3 commits before each
+ # savepoint; it's a bug. When it is None, savepoints don't make sense
+ # because autocommit is enabled. The only exception is inside 'atomic'
+ # blocks. To work around that bug, on SQLite, 'atomic' starts a
+ # transaction explicitly rather than simply disable autocommit.
+ return self.features.uses_savepoints and self.in_atomic_block
+
+ def _set_autocommit(self, autocommit):
+ if autocommit:
+ level = None
+ else:
+ # sqlite3's internal default is ''. It's different from None.
+ # See Modules/_sqlite/connection.c.
+ level = ''
+ # 'isolation_level' is a misleading API.
+ # SQLite always runs at the SERIALIZABLE isolation level.
+ with self.wrap_database_errors:
+ self.connection.isolation_level = level
+
+ def check_constraints(self, table_names=None):
+ """
+ Checks each table name in `table_names` for rows with invalid foreign key references. This method is
+ intended to be used in conjunction with `disable_constraint_checking()` and `enable_constraint_checking()`, to
+ determine if rows with invalid references were entered while constraint checks were off.
+
+ Raises an IntegrityError on the first invalid foreign key reference encountered (if any) and provides
+ detailed information about the invalid reference in the error message.
+
+ Backends can override this method if they can more directly apply constraint checking (e.g. via "SET CONSTRAINTS
+ ALL IMMEDIATE")
+ """
+ cursor = self.cursor()
+ if table_names is None:
+ table_names = self.introspection.table_names(cursor)
+ for table_name in table_names:
+ primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
+ if not primary_key_column_name:
+ continue
+ key_columns = self.introspection.get_key_columns(cursor, table_name)
+ for column_name, referenced_table_name, referenced_column_name in key_columns:
+ cursor.execute("""
+ SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING
+ LEFT JOIN `%s` as REFERRED
+ ON (REFERRING.`%s` = REFERRED.`%s`)
+ WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL"""
+ % (primary_key_column_name, column_name, table_name, referenced_table_name,
+ column_name, referenced_column_name, column_name, referenced_column_name))
+ for bad_row in cursor.fetchall():
+ raise utils.IntegrityError("The row in table '%s' with primary key '%s' has an invalid "
+ "foreign key: %s.%s contains a value '%s' that does not have a corresponding value in %s.%s."
+ % (table_name, bad_row[0], table_name, column_name, bad_row[1],
+ referenced_table_name, referenced_column_name))
+
+ def is_usable(self):
+ return True
+
+ def _start_transaction_under_autocommit(self):
+ """
+ Start a transaction explicitly in autocommit mode.
+
+ Staying in autocommit mode works around a bug of sqlite3 that breaks
+ savepoints when autocommit is disabled.
+ """
+ self.cursor().execute("BEGIN")
+
+FORMAT_QMARK_REGEX = re.compile(r'(?<!%)%s')
+
+class SQLiteCursorWrapper(Database.Cursor):
+ """
+ Django uses "format" style placeholders, but pysqlite2 uses "qmark" style.
+ This fixes it -- but note that if you want to use a literal "%s" in a query,
+ you'll need to use "%%s".
+ """
+ def execute(self, query, params=None):
+ if params is None:
+ return Database.Cursor.execute(self, query)
+ query = self.convert_query(query)
+ return Database.Cursor.execute(self, query, params)
+
+ def executemany(self, query, param_list):
+ query = self.convert_query(query)
+ return Database.Cursor.executemany(self, query, param_list)
+
+ def convert_query(self, query):
+ return FORMAT_QMARK_REGEX.sub('?', query).replace('%%', '%')
+
+def _sqlite_date_extract(lookup_type, dt):
+ if dt is None:
+ return None
+ try:
+ dt = util.typecast_timestamp(dt)
+ except (ValueError, TypeError):
+ return None
+ if lookup_type == 'week_day':
+ return (dt.isoweekday() % 7) + 1
+ else:
+ return getattr(dt, lookup_type)
+
+def _sqlite_date_trunc(lookup_type, dt):
+ try:
+ dt = util.typecast_timestamp(dt)
+ except (ValueError, TypeError):
+ return None
+ if lookup_type == 'year':
+ return "%i-01-01" % dt.year
+ elif lookup_type == 'month':
+ return "%i-%02i-01" % (dt.year, dt.month)
+ elif lookup_type == 'day':
+ return "%i-%02i-%02i" % (dt.year, dt.month, dt.day)
+
+def _sqlite_datetime_extract(lookup_type, dt, tzname):
+ if dt is None:
+ return None
+ try:
+ dt = util.typecast_timestamp(dt)
+ except (ValueError, TypeError):
+ return None
+ if tzname is not None:
+ dt = timezone.localtime(dt, pytz.timezone(tzname))
+ if lookup_type == 'week_day':
+ return (dt.isoweekday() % 7) + 1
+ else:
+ return getattr(dt, lookup_type)
+
+def _sqlite_datetime_trunc(lookup_type, dt, tzname):
+ try:
+ dt = util.typecast_timestamp(dt)
+ except (ValueError, TypeError):
+ return None
+ if tzname is not None:
+ dt = timezone.localtime(dt, pytz.timezone(tzname))
+ if lookup_type == 'year':
+ return "%i-01-01 00:00:00" % dt.year
+ elif lookup_type == 'month':
+ return "%i-%02i-01 00:00:00" % (dt.year, dt.month)
+ elif lookup_type == 'day':
+ return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day)
+ elif lookup_type == 'hour':
+ return "%i-%02i-%02i %02i:00:00" % (dt.year, dt.month, dt.day, dt.hour)
+ elif lookup_type == 'minute':
+ return "%i-%02i-%02i %02i:%02i:00" % (dt.year, dt.month, dt.day, dt.hour, dt.minute)
+ elif lookup_type == 'second':
+ return "%i-%02i-%02i %02i:%02i:%02i" % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
+
+def _sqlite_format_dtdelta(dt, conn, days, secs, usecs):
+ try:
+ dt = util.typecast_timestamp(dt)
+ delta = datetime.timedelta(int(days), int(secs), int(usecs))
+ if conn.strip() == '+':
+ dt = dt + delta
+ else:
+ dt = dt - delta
+ except (ValueError, TypeError):
+ return None
+ # typecast_timestamp returns a date or a datetime without timezone.
+ # It will be formatted as "%Y-%m-%d" or "%Y-%m-%d %H:%M:%S[.%f]"
+ return str(dt)
+
+def _sqlite_regexp(re_pattern, re_string):
+ return bool(re.search(re_pattern, force_text(re_string))) if re_string is not None else False
diff --git a/lib/python2.7/site-packages/django/db/backends/sqlite3/client.py b/lib/python2.7/site-packages/django/db/backends/sqlite3/client.py
new file mode 100644
index 0000000..5b5b732
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/sqlite3/client.py
@@ -0,0 +1,16 @@
+import os
+import sys
+
+from django.db.backends import BaseDatabaseClient
+
+class DatabaseClient(BaseDatabaseClient):
+ executable_name = 'sqlite3'
+
+ def runshell(self):
+ args = [self.executable_name,
+ self.connection.settings_dict['NAME']]
+ if os.name == 'nt':
+ sys.exit(os.system(" ".join(args)))
+ else:
+ os.execvp(self.executable_name, args)
+
diff --git a/lib/python2.7/site-packages/django/db/backends/sqlite3/creation.py b/lib/python2.7/site-packages/django/db/backends/sqlite3/creation.py
new file mode 100644
index 0000000..a9fb273
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/sqlite3/creation.py
@@ -0,0 +1,88 @@
+import os
+import sys
+from django.db.backends.creation import BaseDatabaseCreation
+from django.utils.six.moves import input
+
+class DatabaseCreation(BaseDatabaseCreation):
+ # SQLite doesn't actually support most of these types, but it "does the right
+ # thing" given more verbose field definitions, so leave them as is so that
+ # schema inspection is more useful.
+ data_types = {
+ 'AutoField': 'integer',
+ 'BinaryField': 'BLOB',
+ 'BooleanField': 'bool',
+ 'CharField': 'varchar(%(max_length)s)',
+ 'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
+ 'DateField': 'date',
+ 'DateTimeField': 'datetime',
+ 'DecimalField': 'decimal',
+ 'FileField': 'varchar(%(max_length)s)',
+ 'FilePathField': 'varchar(%(max_length)s)',
+ 'FloatField': 'real',
+ 'IntegerField': 'integer',
+ 'BigIntegerField': 'bigint',
+ 'IPAddressField': 'char(15)',
+ 'GenericIPAddressField': 'char(39)',
+ 'NullBooleanField': 'bool',
+ 'OneToOneField': 'integer',
+ 'PositiveIntegerField': 'integer unsigned',
+ 'PositiveSmallIntegerField': 'smallint unsigned',
+ 'SlugField': 'varchar(%(max_length)s)',
+ 'SmallIntegerField': 'smallint',
+ 'TextField': 'text',
+ 'TimeField': 'time',
+ }
+
+ def sql_for_pending_references(self, model, style, pending_references):
+ "SQLite3 doesn't support constraints"
+ return []
+
+ def sql_remove_table_constraints(self, model, references_to_delete, style):
+ "SQLite3 doesn't support constraints"
+ return []
+
+ def _get_test_db_name(self):
+ test_database_name = self.connection.settings_dict['TEST_NAME']
+ if test_database_name and test_database_name != ':memory:':
+ return test_database_name
+ return ':memory:'
+
+ def _create_test_db(self, verbosity, autoclobber):
+ test_database_name = self._get_test_db_name()
+ if test_database_name != ':memory:':
+ # Erase the old test database
+ if verbosity >= 1:
+ print("Destroying old test database '%s'..." % self.connection.alias)
+ if os.access(test_database_name, os.F_OK):
+ if not autoclobber:
+ confirm = input("Type 'yes' if you would like to try deleting the test database '%s', or 'no' to cancel: " % test_database_name)
+ if autoclobber or confirm == 'yes':
+ try:
+ os.remove(test_database_name)
+ except Exception as e:
+ sys.stderr.write("Got an error deleting the old test database: %s\n" % e)
+ sys.exit(2)
+ else:
+ print("Tests cancelled.")
+ sys.exit(1)
+ return test_database_name
+
+ def _destroy_test_db(self, test_database_name, verbosity):
+ if test_database_name and test_database_name != ":memory:":
+ # Remove the SQLite database file
+ os.remove(test_database_name)
+
+ def test_db_signature(self):
+ """
+ Returns a tuple that uniquely identifies a test database.
+
+ This takes into account the special cases of ":memory:" and "" for
+ SQLite since the databases will be distinct despite having the same
+ TEST_NAME. See http://www.sqlite.org/inmemorydb.html
+ """
+ settings_dict = self.connection.settings_dict
+ test_dbname = self._get_test_db_name()
+ sig = [self.connection.settings_dict['NAME']]
+ if test_dbname == ':memory:':
+ sig.append(self.connection.alias)
+ return tuple(sig)
diff --git a/lib/python2.7/site-packages/django/db/backends/sqlite3/introspection.py b/lib/python2.7/site-packages/django/db/backends/sqlite3/introspection.py
new file mode 100644
index 0000000..431e112
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/sqlite3/introspection.py
@@ -0,0 +1,185 @@
+import re
+from django.db.backends import BaseDatabaseIntrospection, FieldInfo
+
+field_size_re = re.compile(r'^\s*(?:var)?char\s*\(\s*(\d+)\s*\)\s*$')
+
+def get_field_size(name):
+ """ Extract the size number from a "varchar(11)" type name """
+ m = field_size_re.search(name)
+ return int(m.group(1)) if m else None
+
+
+# This light wrapper "fakes" a dictionary interface, because some SQLite data
+# types include variables in them -- e.g. "varchar(30)" -- and can't be matched
+# as a simple dictionary lookup.
+class FlexibleFieldLookupDict(object):
+ # Maps SQL types to Django Field types. Some of the SQL types have multiple
+ # entries here because SQLite allows for anything and doesn't normalize the
+ # field type; it uses whatever was given.
+ base_data_types_reverse = {
+ 'bool': 'BooleanField',
+ 'boolean': 'BooleanField',
+ 'smallint': 'SmallIntegerField',
+ 'smallint unsigned': 'PositiveSmallIntegerField',
+ 'smallinteger': 'SmallIntegerField',
+ 'int': 'IntegerField',
+ 'integer': 'IntegerField',
+ 'bigint': 'BigIntegerField',
+ 'integer unsigned': 'PositiveIntegerField',
+ 'decimal': 'DecimalField',
+ 'real': 'FloatField',
+ 'text': 'TextField',
+ 'char': 'CharField',
+ 'blob': 'BinaryField',
+ 'date': 'DateField',
+ 'datetime': 'DateTimeField',
+ 'time': 'TimeField',
+ }
+
+ def __getitem__(self, key):
+ key = key.lower()
+ try:
+ return self.base_data_types_reverse[key]
+ except KeyError:
+ size = get_field_size(key)
+ if size is not None:
+ return ('CharField', {'max_length': size})
+ raise KeyError
+
+class DatabaseIntrospection(BaseDatabaseIntrospection):
+ data_types_reverse = FlexibleFieldLookupDict()
+
+ def get_table_list(self, cursor):
+ "Returns a list of table names in the current database."
+ # Skip the sqlite_sequence system table used for autoincrement key
+ # generation.
+ cursor.execute("""
+ SELECT name FROM sqlite_master
+ WHERE type='table' AND NOT name='sqlite_sequence'
+ ORDER BY name""")
+ return [row[0] for row in cursor.fetchall()]
+
+ def get_table_description(self, cursor, table_name):
+ "Returns a description of the table, with the DB-API cursor.description interface."
+ return [FieldInfo(info['name'], info['type'], None, info['size'], None, None,
+ info['null_ok']) for info in self._table_info(cursor, table_name)]
+
+ def get_relations(self, cursor, table_name):
+ """
+ Returns a dictionary of {field_index: (field_index_other_table, other_table)}
+ representing all relationships to the given table. Indexes are 0-based.
+ """
+
+ # Dictionary of relations to return
+ relations = {}
+
+ # Schema for this table
+ cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
+ results = cursor.fetchone()[0].strip()
+ results = results[results.index('(')+1:results.rindex(')')]
+
+ # Walk through and look for references to other tables. SQLite doesn't
+ # really have enforced references, but since it echoes out the SQL used
+ # to create the table we can look for REFERENCES statements used there.
+ for field_index, field_desc in enumerate(results.split(',')):
+ field_desc = field_desc.strip()
+ if field_desc.startswith("UNIQUE"):
+ continue
+
+ m = re.search('references (.*) \(["|](.*)["|]\)', field_desc, re.I)
+ if not m:
+ continue
+
+ table, column = [s.strip('"') for s in m.groups()]
+
+ cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s", [table])
+ result = cursor.fetchall()[0]
+ other_table_results = result[0].strip()
+ li, ri = other_table_results.index('('), other_table_results.rindex(')')
+ other_table_results = other_table_results[li+1:ri]
+
+
+ for other_index, other_desc in enumerate(other_table_results.split(',')):
+ other_desc = other_desc.strip()
+ if other_desc.startswith('UNIQUE'):
+ continue
+
+ name = other_desc.split(' ', 1)[0].strip('"')
+ if name == column:
+ relations[field_index] = (other_index, table)
+ break
+
+ return relations
+
+ def get_key_columns(self, cursor, table_name):
+ """
+ Returns a list of (column_name, referenced_table_name, referenced_column_name) for all
+ key columns in given table.
+ """
+ key_columns = []
+
+ # Schema for this table
+ cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
+ results = cursor.fetchone()[0].strip()
+ results = results[results.index('(')+1:results.rindex(')')]
+
+ # Walk through and look for references to other tables. SQLite doesn't
+ # really have enforced references, but since it echoes out the SQL used
+ # to create the table we can look for REFERENCES statements used there.
+ for field_index, field_desc in enumerate(results.split(',')):
+ field_desc = field_desc.strip()
+ if field_desc.startswith("UNIQUE"):
+ continue
+
+ m = re.search('"(.*)".*references (.*) \(["|](.*)["|]\)', field_desc, re.I)
+ if not m:
+ continue
+
+ # This will append (column_name, referenced_table_name, referenced_column_name) to key_columns
+ key_columns.append(tuple([s.strip('"') for s in m.groups()]))
+
+ return key_columns
+
+ def get_indexes(self, cursor, table_name):
+ indexes = {}
+ for info in self._table_info(cursor, table_name):
+ if info['pk'] != 0:
+ indexes[info['name']] = {'primary_key': True,
+ 'unique': False}
+ cursor.execute('PRAGMA index_list(%s)' % self.connection.ops.quote_name(table_name))
+ # seq, name, unique
+ for index, unique in [(field[1], field[2]) for field in cursor.fetchall()]:
+ cursor.execute('PRAGMA index_info(%s)' % self.connection.ops.quote_name(index))
+ info = cursor.fetchall()
+ # Skip indexes across multiple fields
+ if len(info) != 1:
+ continue
+ name = info[0][2] # seqno, cid, name
+ indexes[name] = {'primary_key': False,
+ 'unique': unique}
+ return indexes
+
+ def get_primary_key_column(self, cursor, table_name):
+ """
+ Get the column name of the primary key for the given table.
+ """
+ # Don't use PRAGMA because that causes issues with some transactions
+ cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
+ results = cursor.fetchone()[0].strip()
+ results = results[results.index('(')+1:results.rindex(')')]
+ for field_desc in results.split(','):
+ field_desc = field_desc.strip()
+ m = re.search('"(.*)".*PRIMARY KEY$', field_desc)
+ if m:
+ return m.groups()[0]
+ return None
+
+ def _table_info(self, cursor, name):
+ cursor.execute('PRAGMA table_info(%s)' % self.connection.ops.quote_name(name))
+ # cid, name, type, notnull, dflt_value, pk
+ return [{'name': field[1],
+ 'type': field[2],
+ 'size': get_field_size(field[2]),
+ 'null_ok': not field[3],
+ 'pk': field[5] # undocumented
+ } for field in cursor.fetchall()]
diff --git a/lib/python2.7/site-packages/django/db/backends/util.py b/lib/python2.7/site-packages/django/db/backends/util.py
new file mode 100644
index 0000000..2820007
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/util.py
@@ -0,0 +1,179 @@
+from __future__ import unicode_literals
+
+import datetime
+import decimal
+import hashlib
+import logging
+from time import time
+
+from django.conf import settings
+from django.utils.encoding import force_bytes
+from django.utils.timezone import utc
+
+
+logger = logging.getLogger('django.db.backends')
+
+
+class CursorWrapper(object):
+ def __init__(self, cursor, db):
+ self.cursor = cursor
+ self.db = db
+
+ WRAP_ERROR_ATTRS = frozenset(['fetchone', 'fetchmany', 'fetchall', 'nextset'])
+
+ def __getattr__(self, attr):
+ cursor_attr = getattr(self.cursor, attr)
+ if attr in CursorWrapper.WRAP_ERROR_ATTRS:
+ return self.db.wrap_database_errors(cursor_attr)
+ else:
+ return cursor_attr
+
+ def __iter__(self):
+ return iter(self.cursor)
+
+ # The following methods cannot be implemented in __getattr__, because the
+ # code must run when the method is invoked, not just when it is accessed.
+
+ def callproc(self, procname, params=None):
+ self.db.validate_no_broken_transaction()
+ self.db.set_dirty()
+ with self.db.wrap_database_errors:
+ if params is None:
+ return self.cursor.callproc(procname)
+ else:
+ return self.cursor.callproc(procname, params)
+
+ def execute(self, sql, params=None):
+ self.db.validate_no_broken_transaction()
+ self.db.set_dirty()
+ with self.db.wrap_database_errors:
+ if params is None:
+ return self.cursor.execute(sql)
+ else:
+ return self.cursor.execute(sql, params)
+
+ def executemany(self, sql, param_list):
+ self.db.validate_no_broken_transaction()
+ self.db.set_dirty()
+ with self.db.wrap_database_errors:
+ return self.cursor.executemany(sql, param_list)
+
+
+class CursorDebugWrapper(CursorWrapper):
+
+ # XXX callproc isn't instrumented at this time.
+
+ def execute(self, sql, params=None):
+ start = time()
+ try:
+ return super(CursorDebugWrapper, self).execute(sql, params)
+ finally:
+ stop = time()
+ duration = stop - start
+ sql = self.db.ops.last_executed_query(self.cursor, sql, params)
+ self.db.queries.append({
+ 'sql': sql,
+ 'time': "%.3f" % duration,
+ })
+ logger.debug('(%.3f) %s; args=%s' % (duration, sql, params),
+ extra={'duration': duration, 'sql': sql, 'params': params}
+ )
+
+ def executemany(self, sql, param_list):
+ start = time()
+ try:
+ return super(CursorDebugWrapper, self).executemany(sql, param_list)
+ finally:
+ stop = time()
+ duration = stop - start
+ try:
+ times = len(param_list)
+ except TypeError: # param_list could be an iterator
+ times = '?'
+ self.db.queries.append({
+ 'sql': '%s times: %s' % (times, sql),
+ 'time': "%.3f" % duration,
+ })
+ logger.debug('(%.3f) %s; args=%s' % (duration, sql, param_list),
+ extra={'duration': duration, 'sql': sql, 'params': param_list}
+ )
+
+
+###############################################
+# Converters from database (string) to Python #
+###############################################
+
+def typecast_date(s):
+ return datetime.date(*map(int, s.split('-'))) if s else None # returns None if s is null
+
+def typecast_time(s): # does NOT store time zone information
+ if not s: return None
+ hour, minutes, seconds = s.split(':')
+ if '.' in seconds: # check whether seconds have a fractional part
+ seconds, microseconds = seconds.split('.')
+ else:
+ microseconds = '0'
+ return datetime.time(int(hour), int(minutes), int(seconds), int(float('.'+microseconds) * 1000000))
+
+def typecast_timestamp(s): # does NOT store time zone information
+ # "2005-07-29 15:48:00.590358-05"
+ # "2005-07-29 09:56:00-05"
+ if not s: return None
+ if not ' ' in s: return typecast_date(s)
+ d, t = s.split()
+ # Extract timezone information, if it exists. Currently we just throw
+ # it away, but in the future we may make use of it.
+ if '-' in t:
+ t, tz = t.split('-', 1)
+ tz = '-' + tz
+ elif '+' in t:
+ t, tz = t.split('+', 1)
+ tz = '+' + tz
+ else:
+ tz = ''
+ dates = d.split('-')
+ times = t.split(':')
+ seconds = times[2]
+ if '.' in seconds: # check whether seconds have a fractional part
+ seconds, microseconds = seconds.split('.')
+ else:
+ microseconds = '0'
+ tzinfo = utc if settings.USE_TZ else None
+ return datetime.datetime(int(dates[0]), int(dates[1]), int(dates[2]),
+ int(times[0]), int(times[1]), int(seconds),
+ int((microseconds + '000000')[:6]), tzinfo)
+
+def typecast_decimal(s):
+ if s is None or s == '':
+ return None
+ return decimal.Decimal(s)
+
+###############################################
+# Converters from Python to database (string) #
+###############################################
+
+def rev_typecast_decimal(d):
+ if d is None:
+ return None
+ return str(d)
+
+def truncate_name(name, length=None, hash_len=4):
+ """Shortens a string to a repeatable mangled version with the given length.
+ """
+ if length is None or len(name) <= length:
+ return name
+
+ hsh = hashlib.md5(force_bytes(name)).hexdigest()[:hash_len]
+ return '%s%s' % (name[:length-hash_len], hsh)
+
+def format_number(value, max_digits, decimal_places):
+ """
+ Formats a number into a string with the requisite number of digits and
+ decimal places.
+ """
+ if isinstance(value, decimal.Decimal):
+ context = decimal.getcontext().copy()
+ context.prec = max_digits
+ return "{0:f}".format(value.quantize(decimal.Decimal(".1") ** decimal_places, context=context))
+ else:
+ return "%.*f" % (decimal_places, value)