summaryrefslogtreecommitdiff
path: root/lib/python2.7/site-packages/django/db
diff options
context:
space:
mode:
authorttt2017-05-13 00:29:47 +0530
committerttt2017-05-13 00:29:47 +0530
commitabf599be33b383a6a5baf9493093b2126a622ac8 (patch)
tree4c5ab6e0d935d5e65fabcf0258e4a00dd20a5afa /lib/python2.7/site-packages/django/db
downloadSBHS-2018-Rpi-abf599be33b383a6a5baf9493093b2126a622ac8.tar.gz
SBHS-2018-Rpi-abf599be33b383a6a5baf9493093b2126a622ac8.tar.bz2
SBHS-2018-Rpi-abf599be33b383a6a5baf9493093b2126a622ac8.zip
added all server files
Diffstat (limited to 'lib/python2.7/site-packages/django/db')
-rw-r--r--lib/python2.7/site-packages/django/db/__init__.py96
-rw-r--r--lib/python2.7/site-packages/django/db/backends/__init__.py1356
-rw-r--r--lib/python2.7/site-packages/django/db/backends/creation.py489
-rw-r--r--lib/python2.7/site-packages/django/db/backends/dummy/__init__.py0
-rw-r--r--lib/python2.7/site-packages/django/db/backends/dummy/base.py73
-rw-r--r--lib/python2.7/site-packages/django/db/backends/mysql/__init__.py0
-rw-r--r--lib/python2.7/site-packages/django/db/backends/mysql/base.py533
-rw-r--r--lib/python2.7/site-packages/django/db/backends/mysql/client.py40
-rw-r--r--lib/python2.7/site-packages/django/db/backends/mysql/compiler.py37
-rw-r--r--lib/python2.7/site-packages/django/db/backends/mysql/creation.py70
-rw-r--r--lib/python2.7/site-packages/django/db/backends/mysql/introspection.py119
-rw-r--r--lib/python2.7/site-packages/django/db/backends/mysql/validation.py16
-rw-r--r--lib/python2.7/site-packages/django/db/backends/oracle/__init__.py0
-rw-r--r--lib/python2.7/site-packages/django/db/backends/oracle/base.py961
-rw-r--r--lib/python2.7/site-packages/django/db/backends/oracle/client.py16
-rw-r--r--lib/python2.7/site-packages/django/db/backends/oracle/compiler.py72
-rw-r--r--lib/python2.7/site-packages/django/db/backends/oracle/creation.py277
-rw-r--r--lib/python2.7/site-packages/django/db/backends/oracle/introspection.py138
-rw-r--r--lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/__init__.py0
-rw-r--r--lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/base.py184
-rw-r--r--lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/client.py23
-rw-r--r--lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/creation.py77
-rw-r--r--lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/introspection.py111
-rw-r--r--lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/operations.py222
-rw-r--r--lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/version.py43
-rw-r--r--lib/python2.7/site-packages/django/db/backends/signals.py3
-rw-r--r--lib/python2.7/site-packages/django/db/backends/sqlite3/__init__.py0
-rw-r--r--lib/python2.7/site-packages/django/db/backends/sqlite3/base.py533
-rw-r--r--lib/python2.7/site-packages/django/db/backends/sqlite3/client.py16
-rw-r--r--lib/python2.7/site-packages/django/db/backends/sqlite3/creation.py88
-rw-r--r--lib/python2.7/site-packages/django/db/backends/sqlite3/introspection.py185
-rw-r--r--lib/python2.7/site-packages/django/db/backends/util.py179
-rw-r--r--lib/python2.7/site-packages/django/db/models/__init__.py33
-rw-r--r--lib/python2.7/site-packages/django/db/models/aggregates.py80
-rw-r--r--lib/python2.7/site-packages/django/db/models/base.py1042
-rw-r--r--lib/python2.7/site-packages/django/db/models/constants.py6
-rw-r--r--lib/python2.7/site-packages/django/db/models/deletion.py292
-rw-r--r--lib/python2.7/site-packages/django/db/models/expressions.py186
-rw-r--r--lib/python2.7/site-packages/django/db/models/fields/__init__.py1438
-rw-r--r--lib/python2.7/site-packages/django/db/models/fields/files.py397
-rw-r--r--lib/python2.7/site-packages/django/db/models/fields/proxy.py17
-rw-r--r--lib/python2.7/site-packages/django/db/models/fields/related.py1545
-rw-r--r--lib/python2.7/site-packages/django/db/models/fields/subclassing.py53
-rw-r--r--lib/python2.7/site-packages/django/db/models/loading.py327
-rw-r--r--lib/python2.7/site-packages/django/db/models/manager.py283
-rw-r--r--lib/python2.7/site-packages/django/db/models/options.py589
-rw-r--r--lib/python2.7/site-packages/django/db/models/query.py1731
-rw-r--r--lib/python2.7/site-packages/django/db/models/query_utils.py205
-rw-r--r--lib/python2.7/site-packages/django/db/models/related.py67
-rw-r--r--lib/python2.7/site-packages/django/db/models/signals.py18
-rw-r--r--lib/python2.7/site-packages/django/db/models/sql/__init__.py9
-rw-r--r--lib/python2.7/site-packages/django/db/models/sql/aggregates.py125
-rw-r--r--lib/python2.7/site-packages/django/db/models/sql/compiler.py1128
-rw-r--r--lib/python2.7/site-packages/django/db/models/sql/constants.py41
-rw-r--r--lib/python2.7/site-packages/django/db/models/sql/datastructures.py62
-rw-r--r--lib/python2.7/site-packages/django/db/models/sql/expressions.py117
-rw-r--r--lib/python2.7/site-packages/django/db/models/sql/query.py1922
-rw-r--r--lib/python2.7/site-packages/django/db/models/sql/subqueries.py297
-rw-r--r--lib/python2.7/site-packages/django/db/models/sql/where.py419
-rw-r--r--lib/python2.7/site-packages/django/db/transaction.py541
-rw-r--r--lib/python2.7/site-packages/django/db/utils.py282
61 files changed, 19209 insertions, 0 deletions
diff --git a/lib/python2.7/site-packages/django/db/__init__.py b/lib/python2.7/site-packages/django/db/__init__.py
new file mode 100644
index 0000000..2421dde
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/__init__.py
@@ -0,0 +1,96 @@
+import warnings
+
+from django.core import signals
+from django.db.utils import (DEFAULT_DB_ALIAS,
+ DataError, OperationalError, IntegrityError, InternalError,
+ ProgrammingError, NotSupportedError, DatabaseError,
+ InterfaceError, Error,
+ load_backend, ConnectionHandler, ConnectionRouter)
+from django.utils.functional import cached_property
+
+__all__ = ('backend', 'connection', 'connections', 'router', 'DatabaseError',
+ 'IntegrityError', 'DEFAULT_DB_ALIAS')
+
+connections = ConnectionHandler()
+
+router = ConnectionRouter()
+
+# `connection`, `DatabaseError` and `IntegrityError` are convenient aliases
+# for backend bits.
+
+# DatabaseWrapper.__init__() takes a dictionary, not a settings module, so
+# we manually create the dictionary from the settings, passing only the
+# settings that the database backends care about. Note that TIME_ZONE is used
+# by the PostgreSQL backends.
+# We load all these up for backwards compatibility, you should use
+# connections['default'] instead.
+class DefaultConnectionProxy(object):
+ """
+ Proxy for accessing the default DatabaseWrapper object's attributes. If you
+ need to access the DatabaseWrapper object itself, use
+ connections[DEFAULT_DB_ALIAS] instead.
+ """
+ def __getattr__(self, item):
+ return getattr(connections[DEFAULT_DB_ALIAS], item)
+
+ def __setattr__(self, name, value):
+ return setattr(connections[DEFAULT_DB_ALIAS], name, value)
+
+ def __delattr__(self, name):
+ return delattr(connections[DEFAULT_DB_ALIAS], name)
+
+connection = DefaultConnectionProxy()
+
+class DefaultBackendProxy(object):
+ """
+ Temporary proxy class used during deprecation period of the `backend` module
+ variable.
+ """
+ @cached_property
+ def _backend(self):
+ warnings.warn("Accessing django.db.backend is deprecated.",
+ PendingDeprecationWarning, stacklevel=2)
+ return load_backend(connections[DEFAULT_DB_ALIAS].settings_dict['ENGINE'])
+
+ def __getattr__(self, item):
+ return getattr(self._backend, item)
+
+ def __setattr__(self, name, value):
+ return setattr(self._backend, name, value)
+
+ def __delattr__(self, name):
+ return delattr(self._backend, name)
+
+backend = DefaultBackendProxy()
+
+def close_connection(**kwargs):
+ warnings.warn(
+ "close_connection is superseded by close_old_connections.",
+ PendingDeprecationWarning, stacklevel=2)
+ # Avoid circular imports
+ from django.db import transaction
+ for conn in connections:
+ # If an error happens here the connection will be left in broken
+ # state. Once a good db connection is again available, the
+ # connection state will be cleaned up.
+ transaction.abort(conn)
+ connections[conn].close()
+
+# Register an event to reset saved queries when a Django request is started.
+def reset_queries(**kwargs):
+ for conn in connections.all():
+ conn.queries = []
+signals.request_started.connect(reset_queries)
+
+# Register an event to reset transaction state and close connections past
+# their lifetime. NB: abort() doesn't do anything outside of a transaction.
+def close_old_connections(**kwargs):
+ for conn in connections.all():
+ # Remove this when the legacy transaction management goes away.
+ try:
+ conn.abort()
+ except DatabaseError:
+ pass
+ conn.close_if_unusable_or_obsolete()
+signals.request_started.connect(close_old_connections)
+signals.request_finished.connect(close_old_connections)
diff --git a/lib/python2.7/site-packages/django/db/backends/__init__.py b/lib/python2.7/site-packages/django/db/backends/__init__.py
new file mode 100644
index 0000000..12f08a2
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/__init__.py
@@ -0,0 +1,1356 @@
+import datetime
+import time
+
+from django.db.utils import DatabaseError
+
+try:
+ from django.utils.six.moves import _thread as thread
+except ImportError:
+ from django.utils.six.moves import _dummy_thread as thread
+from collections import namedtuple
+from contextlib import contextmanager
+
+from django.conf import settings
+from django.db import DEFAULT_DB_ALIAS
+from django.db.backends.signals import connection_created
+from django.db.backends import util
+from django.db.transaction import TransactionManagementError
+from django.db.utils import DatabaseErrorWrapper
+from django.utils.functional import cached_property
+from django.utils.importlib import import_module
+from django.utils import six
+from django.utils import timezone
+
+
+class BaseDatabaseWrapper(object):
+ """
+ Represents a database connection.
+ """
+ ops = None
+ vendor = 'unknown'
+
+ def __init__(self, settings_dict, alias=DEFAULT_DB_ALIAS,
+ allow_thread_sharing=False):
+ # `settings_dict` should be a dictionary containing keys such as
+ # NAME, USER, etc. It's called `settings_dict` instead of `settings`
+ # to disambiguate it from Django settings modules.
+ self.connection = None
+ self.queries = []
+ self.settings_dict = settings_dict
+ self.alias = alias
+ self.use_debug_cursor = None
+
+ # Savepoint management related attributes
+ self.savepoint_state = 0
+
+ # Transaction management related attributes
+ self.autocommit = False
+ self.transaction_state = []
+ # Tracks if the connection is believed to be in transaction. This is
+ # set somewhat aggressively, as the DBAPI doesn't make it easy to
+ # deduce if the connection is in transaction or not.
+ self._dirty = False
+ # Tracks if the connection is in a transaction managed by 'atomic'.
+ self.in_atomic_block = False
+ # List of savepoints created by 'atomic'
+ self.savepoint_ids = []
+ # Tracks if the outermost 'atomic' block should commit on exit,
+ # ie. if autocommit was active on entry.
+ self.commit_on_exit = True
+ # Tracks if the transaction should be rolled back to the next
+ # available savepoint because of an exception in an inner block.
+ self.needs_rollback = False
+
+ # Connection termination related attributes
+ self.close_at = None
+ self.closed_in_transaction = False
+ self.errors_occurred = False
+
+ # Thread-safety related attributes
+ self.allow_thread_sharing = allow_thread_sharing
+ self._thread_ident = thread.get_ident()
+
+ def __eq__(self, other):
+ return self.alias == other.alias
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __hash__(self):
+ return hash(self.alias)
+
+ ##### Backend-specific methods for creating connections and cursors #####
+
+ def get_connection_params(self):
+ """Returns a dict of parameters suitable for get_new_connection."""
+ raise NotImplementedError
+
+ def get_new_connection(self, conn_params):
+ """Opens a connection to the database."""
+ raise NotImplementedError
+
+ def init_connection_state(self):
+ """Initializes the database connection settings."""
+ raise NotImplementedError
+
+ def create_cursor(self):
+ """Creates a cursor. Assumes that a connection is established."""
+ raise NotImplementedError
+
+ ##### Backend-specific methods for creating connections #####
+
+ def connect(self):
+ """Connects to the database. Assumes that the connection is closed."""
+ # In case the previous connection was closed while in an atomic block
+ self.in_atomic_block = False
+ self.savepoint_ids = []
+ self.needs_rollback = False
+ # Reset parameters defining when to close the connection
+ max_age = self.settings_dict['CONN_MAX_AGE']
+ self.close_at = None if max_age is None else time.time() + max_age
+ self.closed_in_transaction = False
+ self.errors_occurred = False
+ # Establish the connection
+ conn_params = self.get_connection_params()
+ self.connection = self.get_new_connection(conn_params)
+ self.init_connection_state()
+ if self.settings_dict['AUTOCOMMIT']:
+ self.set_autocommit(True)
+ connection_created.send(sender=self.__class__, connection=self)
+
+ def ensure_connection(self):
+ """
+ Guarantees that a connection to the database is established.
+ """
+ if self.connection is None:
+ with self.wrap_database_errors:
+ self.connect()
+
+ ##### Backend-specific wrappers for PEP-249 connection methods #####
+
+ def _cursor(self):
+ self.ensure_connection()
+ with self.wrap_database_errors:
+ return self.create_cursor()
+
+ def _commit(self):
+ if self.connection is not None:
+ with self.wrap_database_errors:
+ return self.connection.commit()
+
+ def _rollback(self):
+ if self.connection is not None:
+ with self.wrap_database_errors:
+ return self.connection.rollback()
+
+ def _close(self):
+ if self.connection is not None:
+ with self.wrap_database_errors:
+ return self.connection.close()
+
+ ##### Generic wrappers for PEP-249 connection methods #####
+
+ def cursor(self):
+ """
+ Creates a cursor, opening a connection if necessary.
+ """
+ self.validate_thread_sharing()
+ if (self.use_debug_cursor or
+ (self.use_debug_cursor is None and settings.DEBUG)):
+ cursor = self.make_debug_cursor(self._cursor())
+ else:
+ cursor = util.CursorWrapper(self._cursor(), self)
+ return cursor
+
+ def commit(self):
+ """
+ Commits a transaction and resets the dirty flag.
+ """
+ self.validate_thread_sharing()
+ self.validate_no_atomic_block()
+ self._commit()
+ self.set_clean()
+
+ def rollback(self):
+ """
+ Rolls back a transaction and resets the dirty flag.
+ """
+ self.validate_thread_sharing()
+ self.validate_no_atomic_block()
+ self._rollback()
+ self.set_clean()
+
+ def close(self):
+ """
+ Closes the connection to the database.
+ """
+ self.validate_thread_sharing()
+ # Don't call validate_no_atomic_block() to avoid making it difficult
+ # to get rid of a connection in an invalid state. The next connect()
+ # will reset the transaction state anyway.
+ try:
+ self._close()
+ finally:
+ if self.in_atomic_block:
+ self.closed_in_transaction = True
+ self.needs_rollback = True
+ else:
+ self.connection = None
+ self.set_clean()
+
+ ##### Backend-specific savepoint management methods #####
+
+ def _savepoint(self, sid):
+ self.cursor().execute(self.ops.savepoint_create_sql(sid))
+
+ def _savepoint_rollback(self, sid):
+ self.cursor().execute(self.ops.savepoint_rollback_sql(sid))
+
+ def _savepoint_commit(self, sid):
+ self.cursor().execute(self.ops.savepoint_commit_sql(sid))
+
+ def _savepoint_allowed(self):
+ # Savepoints cannot be created outside a transaction
+ return self.features.uses_savepoints and not self.get_autocommit()
+
+ ##### Generic savepoint management methods #####
+
+ def savepoint(self):
+ """
+ Creates a savepoint inside the current transaction. Returns an
+ identifier for the savepoint that will be used for the subsequent
+ rollback or commit. Does nothing if savepoints are not supported.
+ """
+ if not self._savepoint_allowed():
+ return
+
+ thread_ident = thread.get_ident()
+ tid = str(thread_ident).replace('-', '')
+
+ self.savepoint_state += 1
+ sid = "s%s_x%d" % (tid, self.savepoint_state)
+
+ self.validate_thread_sharing()
+ self._savepoint(sid)
+
+ return sid
+
+ def savepoint_rollback(self, sid):
+ """
+ Rolls back to a savepoint. Does nothing if savepoints are not supported.
+ """
+ if not self._savepoint_allowed():
+ return
+
+ self.validate_thread_sharing()
+ self._savepoint_rollback(sid)
+
+ def savepoint_commit(self, sid):
+ """
+ Releases a savepoint. Does nothing if savepoints are not supported.
+ """
+ if not self._savepoint_allowed():
+ return
+
+ self.validate_thread_sharing()
+ self._savepoint_commit(sid)
+
+ def clean_savepoints(self):
+ """
+ Resets the counter used to generate unique savepoint ids in this thread.
+ """
+ self.savepoint_state = 0
+
+ ##### Backend-specific transaction management methods #####
+
+ def _set_autocommit(self, autocommit):
+ """
+ Backend-specific implementation to enable or disable autocommit.
+ """
+ raise NotImplementedError
+
+ ##### Generic transaction management methods #####
+
+ def enter_transaction_management(self, managed=True, forced=False):
+ """
+ Enters transaction management for a running thread. It must be balanced with
+ the appropriate leave_transaction_management call, since the actual state is
+ managed as a stack.
+
+ The state and dirty flag are carried over from the surrounding block or
+ from the settings, if there is no surrounding block (dirty is always false
+ when no current block is running).
+
+ If you switch off transaction management and there is a pending
+ commit/rollback, the data will be commited, unless "forced" is True.
+ """
+ self.validate_no_atomic_block()
+
+ self.transaction_state.append(managed)
+
+ if not managed and self.is_dirty() and not forced:
+ self.commit()
+ self.set_clean()
+
+ if managed == self.get_autocommit():
+ self.set_autocommit(not managed)
+
+ def leave_transaction_management(self):
+ """
+ Leaves transaction management for a running thread. A dirty flag is carried
+ over to the surrounding block, as a commit will commit all changes, even
+ those from outside. (Commits are on connection level.)
+ """
+ self.validate_no_atomic_block()
+
+ if self.transaction_state:
+ del self.transaction_state[-1]
+ else:
+ raise TransactionManagementError(
+ "This code isn't under transaction management")
+
+ if self.transaction_state:
+ managed = self.transaction_state[-1]
+ else:
+ managed = not self.settings_dict['AUTOCOMMIT']
+
+ if self._dirty:
+ self.rollback()
+ if managed == self.get_autocommit():
+ self.set_autocommit(not managed)
+ raise TransactionManagementError(
+ "Transaction managed block ended with pending COMMIT/ROLLBACK")
+
+ if managed == self.get_autocommit():
+ self.set_autocommit(not managed)
+
+ def get_autocommit(self):
+ """
+ Check the autocommit state.
+ """
+ self.ensure_connection()
+ return self.autocommit
+
+ def set_autocommit(self, autocommit):
+ """
+ Enable or disable autocommit.
+ """
+ self.validate_no_atomic_block()
+ self.ensure_connection()
+ self._set_autocommit(autocommit)
+ self.autocommit = autocommit
+
+ def get_rollback(self):
+ """
+ Get the "needs rollback" flag -- for *advanced use* only.
+ """
+ if not self.in_atomic_block:
+ raise TransactionManagementError(
+ "The rollback flag doesn't work outside of an 'atomic' block.")
+ return self.needs_rollback
+
+ def set_rollback(self, rollback):
+ """
+ Set or unset the "needs rollback" flag -- for *advanced use* only.
+ """
+ if not self.in_atomic_block:
+ raise TransactionManagementError(
+ "The rollback flag doesn't work outside of an 'atomic' block.")
+ self.needs_rollback = rollback
+
+ def validate_no_atomic_block(self):
+ """
+ Raise an error if an atomic block is active.
+ """
+ if self.in_atomic_block:
+ raise TransactionManagementError(
+ "This is forbidden when an 'atomic' block is active.")
+
+ def validate_no_broken_transaction(self):
+ if self.needs_rollback:
+ raise TransactionManagementError(
+ "An error occurred in the current transaction. You can't "
+ "execute queries until the end of the 'atomic' block.")
+
+ def abort(self):
+ """
+ Roll back any ongoing transaction and clean the transaction state
+ stack.
+ """
+ if self._dirty:
+ self.rollback()
+ while self.transaction_state:
+ self.leave_transaction_management()
+
+ def is_dirty(self):
+ """
+ Returns True if the current transaction requires a commit for changes to
+ happen.
+ """
+ return self._dirty
+
+ def set_dirty(self):
+ """
+ Sets a dirty flag for the current thread and code streak. This can be used
+ to decide in a managed block of code to decide whether there are open
+ changes waiting for commit.
+ """
+ if not self.get_autocommit():
+ self._dirty = True
+
+ def set_clean(self):
+ """
+ Resets a dirty flag for the current thread and code streak. This can be used
+ to decide in a managed block of code to decide whether a commit or rollback
+ should happen.
+ """
+ self._dirty = False
+ self.clean_savepoints()
+
+ ##### Foreign key constraints checks handling #####
+
+ @contextmanager
+ def constraint_checks_disabled(self):
+ """
+ Context manager that disables foreign key constraint checking.
+ """
+ disabled = self.disable_constraint_checking()
+ try:
+ yield
+ finally:
+ if disabled:
+ self.enable_constraint_checking()
+
+ def disable_constraint_checking(self):
+ """
+ Backends can implement as needed to temporarily disable foreign key
+ constraint checking. Should return True if the constraints were
+ disabled and will need to be reenabled.
+ """
+ return False
+
+ def enable_constraint_checking(self):
+ """
+ Backends can implement as needed to re-enable foreign key constraint
+ checking.
+ """
+ pass
+
+ def check_constraints(self, table_names=None):
+ """
+ Backends can override this method if they can apply constraint
+ checking (e.g. via "SET CONSTRAINTS ALL IMMEDIATE"). Should raise an
+ IntegrityError if any invalid foreign key references are encountered.
+ """
+ pass
+
+ ##### Connection termination handling #####
+
+ def is_usable(self):
+ """
+ Tests if the database connection is usable.
+
+ This function may assume that self.connection is not None.
+
+ Actual implementations should take care not to raise exceptions
+ as that may prevent Django from recycling unusable connections.
+ """
+ raise NotImplementedError(
+ "subclasses of BaseDatabaseWrapper may require an is_usable() method")
+
+ def close_if_unusable_or_obsolete(self):
+ """
+ Closes the current connection if unrecoverable errors have occurred,
+ or if it outlived its maximum age.
+ """
+ if self.connection is not None:
+ # If the application didn't restore the original autocommit setting,
+ # don't take chances, drop the connection.
+ if self.get_autocommit() != self.settings_dict['AUTOCOMMIT']:
+ self.close()
+ return
+
+ if self.errors_occurred:
+ if self.is_usable():
+ self.errors_occurred = False
+ else:
+ self.close()
+ return
+
+ if self.close_at is not None and time.time() >= self.close_at:
+ self.close()
+ return
+
+ ##### Thread safety handling #####
+
+ def validate_thread_sharing(self):
+ """
+ Validates that the connection isn't accessed by another thread than the
+ one which originally created it, unless the connection was explicitly
+ authorized to be shared between threads (via the `allow_thread_sharing`
+ property). Raises an exception if the validation fails.
+ """
+ if not (self.allow_thread_sharing
+ or self._thread_ident == thread.get_ident()):
+ raise DatabaseError("DatabaseWrapper objects created in a "
+ "thread can only be used in that same thread. The object "
+ "with alias '%s' was created in thread id %s and this is "
+ "thread id %s."
+ % (self.alias, self._thread_ident, thread.get_ident()))
+
+ ##### Miscellaneous #####
+
+ @cached_property
+ def wrap_database_errors(self):
+ """
+ Context manager and decorator that re-throws backend-specific database
+ exceptions using Django's common wrappers.
+ """
+ return DatabaseErrorWrapper(self)
+
+ def make_debug_cursor(self, cursor):
+ """
+ Creates a cursor that logs all queries in self.queries.
+ """
+ return util.CursorDebugWrapper(cursor, self)
+
+ @contextmanager
+ def temporary_connection(self):
+ """
+ Context manager that ensures that a connection is established, and
+ if it opened one, closes it to avoid leaving a dangling connection.
+ This is useful for operations outside of the request-response cycle.
+
+ Provides a cursor: with self.temporary_connection() as cursor: ...
+ """
+ must_close = self.connection is None
+ cursor = self.cursor()
+ try:
+ yield cursor
+ finally:
+ cursor.close()
+ if must_close:
+ self.close()
+
+ def _start_transaction_under_autocommit(self):
+ """
+ Only required when autocommits_when_autocommit_is_off = True.
+ """
+ raise NotImplementedError
+
+
+class BaseDatabaseFeatures(object):
+ allows_group_by_pk = False
+ # True if django.db.backend.utils.typecast_timestamp is used on values
+ # returned from dates() calls.
+ needs_datetime_string_cast = True
+ empty_fetchmany_value = []
+ update_can_self_select = True
+
+ # Does the backend distinguish between '' and None?
+ interprets_empty_strings_as_nulls = False
+
+ # Does the backend allow inserting duplicate rows when a unique_together
+ # constraint exists, but one of the unique_together columns is NULL?
+ ignores_nulls_in_unique_constraints = True
+
+ can_use_chunked_reads = True
+ can_return_id_from_insert = False
+ has_bulk_insert = False
+ uses_savepoints = False
+ can_combine_inserts_with_and_without_auto_increment_pk = False
+
+ # If True, don't use integer foreign keys referring to, e.g., positive
+ # integer primary keys.
+ related_fields_match_type = False
+ allow_sliced_subqueries = True
+ has_select_for_update = False
+ has_select_for_update_nowait = False
+
+ supports_select_related = True
+
+ # Does the default test database allow multiple connections?
+ # Usually an indication that the test database is in-memory
+ test_db_allows_multiple_connections = True
+
+ # Can an object be saved without an explicit primary key?
+ supports_unspecified_pk = False
+
+ # Can a fixture contain forward references? i.e., are
+ # FK constraints checked at the end of transaction, or
+ # at the end of each save operation?
+ supports_forward_references = True
+
+ # Does a dirty transaction need to be rolled back
+ # before the cursor can be used again?
+ requires_rollback_on_dirty_transaction = False
+
+ # Does the backend allow very long model names without error?
+ supports_long_model_names = True
+
+ # Is there a REAL datatype in addition to floats/doubles?
+ has_real_datatype = False
+ supports_subqueries_in_group_by = True
+ supports_bitwise_or = True
+
+ # Do time/datetime fields have microsecond precision?
+ supports_microsecond_precision = True
+
+ # Does the __regex lookup support backreferencing and grouping?
+ supports_regex_backreferencing = True
+
+ # Can date/datetime lookups be performed using a string?
+ supports_date_lookup_using_string = True
+
+ # Can datetimes with timezones be used?
+ supports_timezones = True
+
+ # Does the database have a copy of the zoneinfo database?
+ has_zoneinfo_database = True
+
+ # When performing a GROUP BY, is an ORDER BY NULL required
+ # to remove any ordering?
+ requires_explicit_null_ordering_when_grouping = False
+
+ # Is there a 1000 item limit on query parameters?
+ supports_1000_query_parameters = True
+
+ # Can an object have a primary key of 0? MySQL says No.
+ allows_primary_key_0 = True
+
+ # Do we need to NULL a ForeignKey out, or can the constraint check be
+ # deferred
+ can_defer_constraint_checks = False
+
+ # date_interval_sql can properly handle mixed Date/DateTime fields and timedeltas
+ supports_mixed_date_datetime_comparisons = True
+
+ # Does the backend support tablespaces? Default to False because it isn't
+ # in the SQL standard.
+ supports_tablespaces = False
+
+ # Does the backend reset sequences between tests?
+ supports_sequence_reset = True
+
+ # Confirm support for introspected foreign keys
+ # Every database can do this reliably, except MySQL,
+ # which can't do it for MyISAM tables
+ can_introspect_foreign_keys = True
+
+ # Support for the DISTINCT ON clause
+ can_distinct_on_fields = False
+
+ # Does the backend decide to commit before SAVEPOINT statements
+ # when autocommit is disabled? http://bugs.python.org/issue8145#msg109965
+ autocommits_when_autocommit_is_off = False
+
+ # Does the backend prevent running SQL queries in broken transactions?
+ atomic_transactions = True
+
+ # Does the backend support 'pyformat' style ("... %(name)s ...", {'name': value})
+ # parameter passing? Note this can be provided by the backend even if not
+ # supported by the Python driver
+ supports_paramstyle_pyformat = True
+
+ def __init__(self, connection):
+ self.connection = connection
+
+ @cached_property
+ def supports_transactions(self):
+ "Confirm support for transactions"
+ try:
+ # Make sure to run inside a managed transaction block,
+ # otherwise autocommit will cause the confimation to
+ # fail.
+ self.connection.enter_transaction_management()
+ cursor = self.connection.cursor()
+ cursor.execute('CREATE TABLE ROLLBACK_TEST (X INT)')
+ self.connection.commit()
+ cursor.execute('INSERT INTO ROLLBACK_TEST (X) VALUES (8)')
+ self.connection.rollback()
+ cursor.execute('SELECT COUNT(X) FROM ROLLBACK_TEST')
+ count, = cursor.fetchone()
+ cursor.execute('DROP TABLE ROLLBACK_TEST')
+ self.connection.commit()
+ finally:
+ self.connection.leave_transaction_management()
+ return count == 0
+
+ @cached_property
+ def supports_stddev(self):
+ "Confirm support for STDDEV and related stats functions"
+ class StdDevPop(object):
+ sql_function = 'STDDEV_POP'
+
+ try:
+ self.connection.ops.check_aggregate_support(StdDevPop())
+ return True
+ except NotImplementedError:
+ return False
+
+
+class BaseDatabaseOperations(object):
+ """
+ This class encapsulates all backend-specific differences, such as the way
+ a backend performs ordering or calculates the ID of a recently-inserted
+ row.
+ """
+ compiler_module = "django.db.models.sql.compiler"
+
+ def __init__(self, connection):
+ self.connection = connection
+ self._cache = None
+
+ def autoinc_sql(self, table, column):
+ """
+ Returns any SQL needed to support auto-incrementing primary keys, or
+ None if no SQL is necessary.
+
+ This SQL is executed when a table is created.
+ """
+ return None
+
+ def bulk_batch_size(self, fields, objs):
+ """
+ Returns the maximum allowed batch size for the backend. The fields
+ are the fields going to be inserted in the batch, the objs contains
+ all the objects to be inserted.
+ """
+ return len(objs)
+
+ def cache_key_culling_sql(self):
+ """
+ Returns an SQL query that retrieves the first cache key greater than the
+ n smallest.
+
+ This is used by the 'db' cache backend to determine where to start
+ culling.
+ """
+ return "SELECT cache_key FROM %s ORDER BY cache_key LIMIT 1 OFFSET %%s"
+
+ def date_extract_sql(self, lookup_type, field_name):
+ """
+ Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
+ extracts a value from the given date field field_name.
+ """
+ raise NotImplementedError()
+
+ def date_interval_sql(self, sql, connector, timedelta):
+ """
+ Implements the date interval functionality for expressions
+ """
+ raise NotImplementedError()
+
+ def date_trunc_sql(self, lookup_type, field_name):
+ """
+ Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
+ truncates the given date field field_name to a date object with only
+ the given specificity.
+ """
+ raise NotImplementedError()
+
+ def datetime_cast_sql(self):
+ """
+ Returns the SQL necessary to cast a datetime value so that it will be
+ retrieved as a Python datetime object instead of a string.
+
+ This SQL should include a '%s' in place of the field's name.
+ """
+ return "%s"
+
+ def datetime_extract_sql(self, lookup_type, field_name, tzname):
+ """
+ Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute' or
+ 'second', returns the SQL that extracts a value from the given
+ datetime field field_name, and a tuple of parameters.
+ """
+ raise NotImplementedError()
+
+ def datetime_trunc_sql(self, lookup_type, field_name, tzname):
+ """
+ Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute' or
+ 'second', returns the SQL that truncates the given datetime field
+ field_name to a datetime object with only the given specificity, and
+ a tuple of parameters.
+ """
+ raise NotImplementedError()
+
+ def deferrable_sql(self):
+ """
+ Returns the SQL necessary to make a constraint "initially deferred"
+ during a CREATE TABLE statement.
+ """
+ return ''
+
+ def distinct_sql(self, fields):
+ """
+ Returns an SQL DISTINCT clause which removes duplicate rows from the
+ result set. If any fields are given, only the given fields are being
+ checked for duplicates.
+ """
+ if fields:
+ raise NotImplementedError('DISTINCT ON fields is not supported by this database backend')
+ else:
+ return 'DISTINCT'
+
+ def drop_foreignkey_sql(self):
+ """
+ Returns the SQL command that drops a foreign key.
+ """
+ return "DROP CONSTRAINT"
+
+ def drop_sequence_sql(self, table):
+ """
+ Returns any SQL necessary to drop the sequence for the given table.
+ Returns None if no SQL is necessary.
+ """
+ return None
+
+ def fetch_returned_insert_id(self, cursor):
+ """
+ Given a cursor object that has just performed an INSERT...RETURNING
+ statement into a table that has an auto-incrementing ID, returns the
+ newly created ID.
+ """
+ return cursor.fetchone()[0]
+
+ def field_cast_sql(self, db_type, internal_type):
+ """
+ Given a column type (e.g. 'BLOB', 'VARCHAR'), and an internal type
+ (e.g. 'GenericIPAddressField'), returns the SQL necessary to cast it
+ before using it in a WHERE statement. Note that the resulting string
+ should contain a '%s' placeholder for the column being searched against.
+ """
+ return '%s'
+
+ def force_no_ordering(self):
+ """
+ Returns a list used in the "ORDER BY" clause to force no ordering at
+ all. Returning an empty list means that nothing will be included in the
+ ordering.
+ """
+ return []
+
+ def for_update_sql(self, nowait=False):
+ """
+ Returns the FOR UPDATE SQL clause to lock rows for an update operation.
+ """
+ if nowait:
+ return 'FOR UPDATE NOWAIT'
+ else:
+ return 'FOR UPDATE'
+
+ def fulltext_search_sql(self, field_name):
+ """
+ Returns the SQL WHERE clause to use in order to perform a full-text
+ search of the given field_name. Note that the resulting string should
+ contain a '%s' placeholder for the value being searched against.
+ """
+ raise NotImplementedError('Full-text search is not implemented for this database backend')
+
+ def last_executed_query(self, cursor, sql, params):
+ """
+ Returns a string of the query last executed by the given cursor, with
+ placeholders replaced with actual values.
+
+ `sql` is the raw query containing placeholders, and `params` is the
+ sequence of parameters. These are used by default, but this method
+ exists for database backends to provide a better implementation
+ according to their own quoting schemes.
+ """
+ from django.utils.encoding import force_text
+
+ # Convert params to contain Unicode values.
+ to_unicode = lambda s: force_text(s, strings_only=True, errors='replace')
+ if isinstance(params, (list, tuple)):
+ u_params = tuple(to_unicode(val) for val in params)
+ elif params is None:
+ u_params = ()
+ else:
+ u_params = dict((to_unicode(k), to_unicode(v)) for k, v in params.items())
+
+ return six.text_type("QUERY = %r - PARAMS = %r") % (sql, u_params)
+
+ def last_insert_id(self, cursor, table_name, pk_name):
+ """
+ Given a cursor object that has just performed an INSERT statement into
+ a table that has an auto-incrementing ID, returns the newly created ID.
+
+ This method also receives the table name and the name of the primary-key
+ column.
+ """
+ return cursor.lastrowid
+
+ def lookup_cast(self, lookup_type):
+ """
+ Returns the string to use in a query when performing lookups
+ ("contains", "like", etc). The resulting string should contain a '%s'
+ placeholder for the column being searched against.
+ """
+ return "%s"
+
+ def max_in_list_size(self):
+ """
+ Returns the maximum number of items that can be passed in a single 'IN'
+ list condition, or None if the backend does not impose a limit.
+ """
+ return None
+
+ def max_name_length(self):
+ """
+ Returns the maximum length of table and column names, or None if there
+ is no limit.
+ """
+ return None
+
+ def no_limit_value(self):
+ """
+ Returns the value to use for the LIMIT when we are wanting "LIMIT
+ infinity". Returns None if the limit clause can be omitted in this case.
+ """
+ raise NotImplementedError
+
+ def pk_default_value(self):
+ """
+ Returns the value to use during an INSERT statement to specify that
+ the field should use its default value.
+ """
+ return 'DEFAULT'
+
+ def process_clob(self, value):
+ """
+ Returns the value of a CLOB column, for backends that return a locator
+ object that requires additional processing.
+ """
+ return value
+
+ def return_insert_id(self):
+ """
+ For backends that support returning the last insert ID as part
+ of an insert query, this method returns the SQL and params to
+ append to the INSERT query. The returned fragment should
+ contain a format string to hold the appropriate column.
+ """
+ pass
+
+ def compiler(self, compiler_name):
+ """
+ Returns the SQLCompiler class corresponding to the given name,
+ in the namespace corresponding to the `compiler_module` attribute
+ on this backend.
+ """
+ if self._cache is None:
+ self._cache = import_module(self.compiler_module)
+ return getattr(self._cache, compiler_name)
+
+ def quote_name(self, name):
+ """
+ Returns a quoted version of the given table, index or column name. Does
+ not quote the given name if it's already been quoted.
+ """
+ raise NotImplementedError()
+
+ def random_function_sql(self):
+ """
+ Returns an SQL expression that returns a random value.
+ """
+ return 'RANDOM()'
+
+ def regex_lookup(self, lookup_type):
+ """
+ Returns the string to use in a query when performing regular expression
+ lookups (using "regex" or "iregex"). The resulting string should
+ contain a '%s' placeholder for the column being searched against.
+
+ If the feature is not supported (or part of it is not supported), a
+ NotImplementedError exception can be raised.
+ """
+ raise NotImplementedError
+
+ def savepoint_create_sql(self, sid):
+ """
+ Returns the SQL for starting a new savepoint. Only required if the
+ "uses_savepoints" feature is True. The "sid" parameter is a string
+ for the savepoint id.
+ """
+ return "SAVEPOINT %s" % self.quote_name(sid)
+
+ def savepoint_commit_sql(self, sid):
+ """
+ Returns the SQL for committing the given savepoint.
+ """
+ return "RELEASE SAVEPOINT %s" % self.quote_name(sid)
+
+ def savepoint_rollback_sql(self, sid):
+ """
+ Returns the SQL for rolling back the given savepoint.
+ """
+ return "ROLLBACK TO SAVEPOINT %s" % self.quote_name(sid)
+
+ def set_time_zone_sql(self):
+ """
+ Returns the SQL that will set the connection's time zone.
+
+ Returns '' if the backend doesn't support time zones.
+ """
+ return ''
+
+ def sql_flush(self, style, tables, sequences, allow_cascade=False):
+ """
+ Returns a list of SQL statements required to remove all data from
+ the given database tables (without actually removing the tables
+ themselves).
+
+ The returned value also includes SQL statements required to reset DB
+ sequences passed in :param sequences:.
+
+ The `style` argument is a Style object as returned by either
+ color_style() or no_style() in django.core.management.color.
+
+ The `allow_cascade` argument determines whether truncation may cascade
+ to tables with foreign keys pointing the tables being truncated.
+ PostgreSQL requires a cascade even if these tables are empty.
+ """
+ raise NotImplementedError()
+
+ def sequence_reset_by_name_sql(self, style, sequences):
+ """
+ Returns a list of the SQL statements required to reset sequences
+ passed in :param sequences:.
+
+ The `style` argument is a Style object as returned by either
+ color_style() or no_style() in django.core.management.color.
+ """
+ return []
+
+ def sequence_reset_sql(self, style, model_list):
+ """
+ Returns a list of the SQL statements required to reset sequences for
+ the given models.
+
+ The `style` argument is a Style object as returned by either
+ color_style() or no_style() in django.core.management.color.
+ """
+ return [] # No sequence reset required by default.
+
+ def start_transaction_sql(self):
+ """
+ Returns the SQL statement required to start a transaction.
+ """
+ return "BEGIN;"
+
+ def end_transaction_sql(self, success=True):
+ """
+ Returns the SQL statement required to end a transaction.
+ """
+ if not success:
+ return "ROLLBACK;"
+ return "COMMIT;"
+
+ def tablespace_sql(self, tablespace, inline=False):
+ """
+ Returns the SQL that will be used in a query to define the tablespace.
+
+ Returns '' if the backend doesn't support tablespaces.
+
+ If inline is True, the SQL is appended to a row; otherwise it's appended
+ to the entire CREATE TABLE or CREATE INDEX statement.
+ """
+ return ''
+
+ def prep_for_like_query(self, x):
+ """Prepares a value for use in a LIKE query."""
+ from django.utils.encoding import force_text
+ return force_text(x).replace("\\", "\\\\").replace("%", "\%").replace("_", "\_")
+
+ # Same as prep_for_like_query(), but called for "iexact" matches, which
+ # need not necessarily be implemented using "LIKE" in the backend.
+ prep_for_iexact_query = prep_for_like_query
+
+ def validate_autopk_value(self, value):
+ """
+ Certain backends do not accept some values for "serial" fields
+ (for example zero in MySQL). This method will raise a ValueError
+ if the value is invalid, otherwise returns validated value.
+ """
+ return value
+
+ def value_to_db_date(self, value):
+ """
+ Transform a date value to an object compatible with what is expected
+ by the backend driver for date columns.
+ """
+ if value is None:
+ return None
+ return six.text_type(value)
+
+ def value_to_db_datetime(self, value):
+ """
+ Transform a datetime value to an object compatible with what is expected
+ by the backend driver for datetime columns.
+ """
+ if value is None:
+ return None
+ return six.text_type(value)
+
+ def value_to_db_time(self, value):
+ """
+ Transform a time value to an object compatible with what is expected
+ by the backend driver for time columns.
+ """
+ if value is None:
+ return None
+ if timezone.is_aware(value):
+ raise ValueError("Django does not support timezone-aware times.")
+ return six.text_type(value)
+
+ def value_to_db_decimal(self, value, max_digits, decimal_places):
+ """
+ Transform a decimal.Decimal value to an object compatible with what is
+ expected by the backend driver for decimal (numeric) columns.
+ """
+ if value is None:
+ return None
+ return util.format_number(value, max_digits, decimal_places)
+
+ def year_lookup_bounds_for_date_field(self, value):
+ """
+ Returns a two-elements list with the lower and upper bound to be used
+ with a BETWEEN operator to query a DateField value using a year
+ lookup.
+
+ `value` is an int, containing the looked-up year.
+ """
+ first = datetime.date(value, 1, 1)
+ second = datetime.date(value, 12, 31)
+ return [first, second]
+
+ def year_lookup_bounds_for_datetime_field(self, value):
+ """
+ Returns a two-elements list with the lower and upper bound to be used
+ with a BETWEEN operator to query a DateTimeField value using a year
+ lookup.
+
+ `value` is an int, containing the looked-up year.
+ """
+ first = datetime.datetime(value, 1, 1)
+ second = datetime.datetime(value, 12, 31, 23, 59, 59, 999999)
+ if settings.USE_TZ:
+ tz = timezone.get_current_timezone()
+ first = timezone.make_aware(first, tz)
+ second = timezone.make_aware(second, tz)
+ return [first, second]
+
+ def convert_values(self, value, field):
+ """
+ Coerce the value returned by the database backend into a consistent type
+ that is compatible with the field type.
+ """
+ if value is None or field is None:
+ return value
+ internal_type = field.get_internal_type()
+ if internal_type == 'FloatField':
+ return float(value)
+ elif (internal_type and (internal_type.endswith('IntegerField')
+ or internal_type == 'AutoField')):
+ return int(value)
+ return value
+
+ def check_aggregate_support(self, aggregate_func):
+ """Check that the backend supports the provided aggregate
+
+ This is used on specific backends to rule out known aggregates
+ that are known to have faulty implementations. If the named
+ aggregate function has a known problem, the backend should
+ raise NotImplementedError.
+ """
+ pass
+
+ def combine_expression(self, connector, sub_expressions):
+ """Combine a list of subexpressions into a single expression, using
+ the provided connecting operator. This is required because operators
+ can vary between backends (e.g., Oracle with %% and &) and between
+ subexpression types (e.g., date expressions)
+ """
+ conn = ' %s ' % connector
+ return conn.join(sub_expressions)
+
+ def modify_insert_params(self, placeholders, params):
+ """Allow modification of insert parameters. Needed for Oracle Spatial
+ backend due to #10888.
+ """
+ return params
+
+
+# Structure returned by the DB-API cursor.description interface (PEP 249)
+FieldInfo = namedtuple('FieldInfo',
+ 'name type_code display_size internal_size precision scale null_ok'
+)
+
+class BaseDatabaseIntrospection(object):
+ """
+ This class encapsulates all backend-specific introspection utilities
+ """
+ data_types_reverse = {}
+
+ def __init__(self, connection):
+ self.connection = connection
+
+ def get_field_type(self, data_type, description):
+ """Hook for a database backend to use the cursor description to
+ match a Django field type to a database column.
+
+ For Oracle, the column data_type on its own is insufficient to
+ distinguish between a FloatField and IntegerField, for example."""
+ return self.data_types_reverse[data_type]
+
+ def table_name_converter(self, name):
+ """Apply a conversion to the name for the purposes of comparison.
+
+ The default table name converter is for case sensitive comparison.
+ """
+ return name
+
+ def table_names(self, cursor=None):
+ """
+ Returns a list of names of all tables that exist in the database.
+ The returned table list is sorted by Python's default sorting. We
+ do NOT use database's ORDER BY here to avoid subtle differences
+ in sorting order between databases.
+ """
+ if cursor is None:
+ cursor = self.connection.cursor()
+ return sorted(self.get_table_list(cursor))
+
+ def get_table_list(self, cursor):
+ """
+ Returns an unsorted list of names of all tables that exist in the
+ database.
+ """
+ raise NotImplementedError
+
+ def django_table_names(self, only_existing=False):
+ """
+ Returns a list of all table names that have associated Django models and
+ are in INSTALLED_APPS.
+
+ If only_existing is True, the resulting list will only include the tables
+ that actually exist in the database.
+ """
+ from django.db import models, router
+ tables = set()
+ for app in models.get_apps():
+ for model in models.get_models(app):
+ if not model._meta.managed:
+ continue
+ if not router.allow_syncdb(self.connection.alias, model):
+ continue
+ tables.add(model._meta.db_table)
+ tables.update([f.m2m_db_table() for f in model._meta.local_many_to_many])
+ tables = list(tables)
+ if only_existing:
+ existing_tables = self.table_names()
+ tables = [
+ t
+ for t in tables
+ if self.table_name_converter(t) in existing_tables
+ ]
+ return tables
+
+ def installed_models(self, tables):
+ "Returns a set of all models represented by the provided list of table names."
+ from django.db import models, router
+ all_models = []
+ for app in models.get_apps():
+ for model in models.get_models(app):
+ if router.allow_syncdb(self.connection.alias, model):
+ all_models.append(model)
+ tables = list(map(self.table_name_converter, tables))
+ return set([
+ m for m in all_models
+ if self.table_name_converter(m._meta.db_table) in tables
+ ])
+
+ def sequence_list(self):
+ "Returns a list of information about all DB sequences for all models in all apps."
+ from django.db import models, router
+
+ apps = models.get_apps()
+ sequence_list = []
+
+ for app in apps:
+ for model in models.get_models(app):
+ if not model._meta.managed:
+ continue
+ if model._meta.swapped:
+ continue
+ if not router.allow_syncdb(self.connection.alias, model):
+ continue
+ for f in model._meta.local_fields:
+ if isinstance(f, models.AutoField):
+ sequence_list.append({'table': model._meta.db_table, 'column': f.column})
+ break # Only one AutoField is allowed per model, so don't bother continuing.
+
+ for f in model._meta.local_many_to_many:
+ # If this is an m2m using an intermediate table,
+ # we don't need to reset the sequence.
+ if f.rel.through is None:
+ sequence_list.append({'table': f.m2m_db_table(), 'column': None})
+
+ return sequence_list
+
+ def get_key_columns(self, cursor, table_name):
+ """
+ Backends can override this to return a list of (column_name, referenced_table_name,
+ referenced_column_name) for all key columns in given table.
+ """
+ raise NotImplementedError
+
+ def get_primary_key_column(self, cursor, table_name):
+ """
+ Returns the name of the primary key column for the given table.
+ """
+ for column in six.iteritems(self.get_indexes(cursor, table_name)):
+ if column[1]['primary_key']:
+ return column[0]
+ return None
+
+ def get_indexes(self, cursor, table_name):
+ """
+ Returns a dictionary of indexed fieldname -> infodict for the given
+ table, where each infodict is in the format:
+ {'primary_key': boolean representing whether it's the primary key,
+ 'unique': boolean representing whether it's a unique index}
+
+ Only single-column indexes are introspected.
+ """
+ raise NotImplementedError
+
+
+class BaseDatabaseClient(object):
+ """
+ This class encapsulates all backend-specific methods for opening a
+ client shell.
+ """
+ # This should be a string representing the name of the executable
+ # (e.g., "psql"). Subclasses must override this.
+ executable_name = None
+
+ def __init__(self, connection):
+ # connection is an instance of BaseDatabaseWrapper.
+ self.connection = connection
+
+ def runshell(self):
+ raise NotImplementedError()
+
+
+class BaseDatabaseValidation(object):
+ """
+ This class encapsualtes all backend-specific model validation.
+ """
+ def __init__(self, connection):
+ self.connection = connection
+
+ def validate_field(self, errors, opts, f):
+ "By default, there is no backend-specific validation"
+ pass
diff --git a/lib/python2.7/site-packages/django/db/backends/creation.py b/lib/python2.7/site-packages/django/db/backends/creation.py
new file mode 100644
index 0000000..bae439b
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/creation.py
@@ -0,0 +1,489 @@
+import hashlib
+import sys
+import time
+import warnings
+
+from django.conf import settings
+from django.db.utils import load_backend
+from django.utils.encoding import force_bytes
+from django.utils.six.moves import input
+
+from .util import truncate_name
+
+# The prefix to put on the default database name when creating
+# the test database.
+TEST_DATABASE_PREFIX = 'test_'
+
+
+class BaseDatabaseCreation(object):
+ """
+ This class encapsulates all backend-specific differences that pertain to
+ database *creation*, such as the column types to use for particular Django
+ Fields, the SQL used to create and destroy tables, and the creation and
+ destruction of test databases.
+ """
+ data_types = {}
+
+ def __init__(self, connection):
+ self.connection = connection
+
+ def _digest(self, *args):
+ """
+ Generates a 32-bit digest of a set of arguments that can be used to
+ shorten identifying names.
+ """
+ h = hashlib.md5()
+ for arg in args:
+ h.update(force_bytes(arg))
+ return h.hexdigest()[:8]
+
+ def sql_create_model(self, model, style, known_models=set()):
+ """
+ Returns the SQL required to create a single model, as a tuple of:
+ (list_of_sql, pending_references_dict)
+ """
+ opts = model._meta
+ if not opts.managed or opts.proxy or opts.swapped:
+ return [], {}
+ final_output = []
+ table_output = []
+ pending_references = {}
+ qn = self.connection.ops.quote_name
+ for f in opts.local_fields:
+ col_type = f.db_type(connection=self.connection)
+ tablespace = f.db_tablespace or opts.db_tablespace
+ if col_type is None:
+ # Skip ManyToManyFields, because they're not represented as
+ # database columns in this table.
+ continue
+ # Make the definition (e.g. 'foo VARCHAR(30)') for this field.
+ field_output = [style.SQL_FIELD(qn(f.column)),
+ style.SQL_COLTYPE(col_type)]
+ # Oracle treats the empty string ('') as null, so coerce the null
+ # option whenever '' is a possible value.
+ null = f.null
+ if (f.empty_strings_allowed and not f.primary_key and
+ self.connection.features.interprets_empty_strings_as_nulls):
+ null = True
+ if not null:
+ field_output.append(style.SQL_KEYWORD('NOT NULL'))
+ if f.primary_key:
+ field_output.append(style.SQL_KEYWORD('PRIMARY KEY'))
+ elif f.unique:
+ field_output.append(style.SQL_KEYWORD('UNIQUE'))
+ if tablespace and f.unique:
+ # We must specify the index tablespace inline, because we
+ # won't be generating a CREATE INDEX statement for this field.
+ tablespace_sql = self.connection.ops.tablespace_sql(
+ tablespace, inline=True)
+ if tablespace_sql:
+ field_output.append(tablespace_sql)
+ if f.rel and f.db_constraint:
+ ref_output, pending = self.sql_for_inline_foreign_key_references(
+ model, f, known_models, style)
+ if pending:
+ pending_references.setdefault(f.rel.to, []).append(
+ (model, f))
+ else:
+ field_output.extend(ref_output)
+ table_output.append(' '.join(field_output))
+ for field_constraints in opts.unique_together:
+ table_output.append(style.SQL_KEYWORD('UNIQUE') + ' (%s)' %
+ ", ".join(
+ [style.SQL_FIELD(qn(opts.get_field(f).column))
+ for f in field_constraints]))
+
+ full_statement = [style.SQL_KEYWORD('CREATE TABLE') + ' ' +
+ style.SQL_TABLE(qn(opts.db_table)) + ' (']
+ for i, line in enumerate(table_output): # Combine and add commas.
+ full_statement.append(
+ ' %s%s' % (line, ',' if i < len(table_output) - 1 else ''))
+ full_statement.append(')')
+ if opts.db_tablespace:
+ tablespace_sql = self.connection.ops.tablespace_sql(
+ opts.db_tablespace)
+ if tablespace_sql:
+ full_statement.append(tablespace_sql)
+ full_statement.append(';')
+ final_output.append('\n'.join(full_statement))
+
+ if opts.has_auto_field:
+ # Add any extra SQL needed to support auto-incrementing primary
+ # keys.
+ auto_column = opts.auto_field.db_column or opts.auto_field.name
+ autoinc_sql = self.connection.ops.autoinc_sql(opts.db_table,
+ auto_column)
+ if autoinc_sql:
+ for stmt in autoinc_sql:
+ final_output.append(stmt)
+
+ return final_output, pending_references
+
+ def sql_for_inline_foreign_key_references(self, model, field, known_models, style):
+ """
+ Return the SQL snippet defining the foreign key reference for a field.
+ """
+ qn = self.connection.ops.quote_name
+ rel_to = field.rel.to
+ if rel_to in known_models or rel_to == model:
+ output = [style.SQL_KEYWORD('REFERENCES') + ' ' +
+ style.SQL_TABLE(qn(rel_to._meta.db_table)) + ' (' +
+ style.SQL_FIELD(qn(rel_to._meta.get_field(
+ field.rel.field_name).column)) + ')' +
+ self.connection.ops.deferrable_sql()
+ ]
+ pending = False
+ else:
+ # We haven't yet created the table to which this field
+ # is related, so save it for later.
+ output = []
+ pending = True
+
+ return output, pending
+
+ def sql_for_pending_references(self, model, style, pending_references):
+ """
+ Returns any ALTER TABLE statements to add constraints after the fact.
+ """
+ opts = model._meta
+ if not opts.managed or opts.swapped:
+ return []
+ qn = self.connection.ops.quote_name
+ final_output = []
+ if model in pending_references:
+ for rel_class, f in pending_references[model]:
+ rel_opts = rel_class._meta
+ r_table = rel_opts.db_table
+ r_col = f.column
+ table = opts.db_table
+ col = opts.get_field(f.rel.field_name).column
+ # For MySQL, r_name must be unique in the first 64 characters.
+ # So we are careful with character usage here.
+ r_name = '%s_refs_%s_%s' % (
+ r_col, col, self._digest(r_table, table))
+ final_output.append(style.SQL_KEYWORD('ALTER TABLE') +
+ ' %s ADD CONSTRAINT %s FOREIGN KEY (%s) REFERENCES %s (%s)%s;' %
+ (qn(r_table), qn(truncate_name(
+ r_name, self.connection.ops.max_name_length())),
+ qn(r_col), qn(table), qn(col),
+ self.connection.ops.deferrable_sql()))
+ del pending_references[model]
+ return final_output
+
+ def sql_indexes_for_model(self, model, style):
+ """
+ Returns the CREATE INDEX SQL statements for a single model.
+ """
+ if not model._meta.managed or model._meta.proxy or model._meta.swapped:
+ return []
+ output = []
+ for f in model._meta.local_fields:
+ output.extend(self.sql_indexes_for_field(model, f, style))
+ for fs in model._meta.index_together:
+ fields = [model._meta.get_field_by_name(f)[0] for f in fs]
+ output.extend(self.sql_indexes_for_fields(model, fields, style))
+ return output
+
+ def sql_indexes_for_field(self, model, f, style):
+ """
+ Return the CREATE INDEX SQL statements for a single model field.
+ """
+ if f.db_index and not f.unique:
+ return self.sql_indexes_for_fields(model, [f], style)
+ else:
+ return []
+
+ def sql_indexes_for_fields(self, model, fields, style):
+ if len(fields) == 1 and fields[0].db_tablespace:
+ tablespace_sql = self.connection.ops.tablespace_sql(fields[0].db_tablespace)
+ elif model._meta.db_tablespace:
+ tablespace_sql = self.connection.ops.tablespace_sql(model._meta.db_tablespace)
+ else:
+ tablespace_sql = ""
+ if tablespace_sql:
+ tablespace_sql = " " + tablespace_sql
+
+ field_names = []
+ qn = self.connection.ops.quote_name
+ for f in fields:
+ field_names.append(style.SQL_FIELD(qn(f.column)))
+
+ index_name = "%s_%s" % (model._meta.db_table, self._digest([f.name for f in fields]))
+
+ return [
+ style.SQL_KEYWORD("CREATE INDEX") + " " +
+ style.SQL_TABLE(qn(truncate_name(index_name, self.connection.ops.max_name_length()))) + " " +
+ style.SQL_KEYWORD("ON") + " " +
+ style.SQL_TABLE(qn(model._meta.db_table)) + " " +
+ "(%s)" % style.SQL_FIELD(", ".join(field_names)) +
+ "%s;" % tablespace_sql,
+ ]
+
+ def sql_destroy_model(self, model, references_to_delete, style):
+ """
+ Return the DROP TABLE and restraint dropping statements for a single
+ model.
+ """
+ if not model._meta.managed or model._meta.proxy or model._meta.swapped:
+ return []
+ # Drop the table now
+ qn = self.connection.ops.quote_name
+ output = ['%s %s;' % (style.SQL_KEYWORD('DROP TABLE'),
+ style.SQL_TABLE(qn(model._meta.db_table)))]
+ if model in references_to_delete:
+ output.extend(self.sql_remove_table_constraints(
+ model, references_to_delete, style))
+ if model._meta.has_auto_field:
+ ds = self.connection.ops.drop_sequence_sql(model._meta.db_table)
+ if ds:
+ output.append(ds)
+ return output
+
+ def sql_remove_table_constraints(self, model, references_to_delete, style):
+ if not model._meta.managed or model._meta.proxy or model._meta.swapped:
+ return []
+ output = []
+ qn = self.connection.ops.quote_name
+ for rel_class, f in references_to_delete[model]:
+ table = rel_class._meta.db_table
+ col = f.column
+ r_table = model._meta.db_table
+ r_col = model._meta.get_field(f.rel.field_name).column
+ r_name = '%s_refs_%s_%s' % (
+ col, r_col, self._digest(table, r_table))
+ output.append('%s %s %s %s;' % \
+ (style.SQL_KEYWORD('ALTER TABLE'),
+ style.SQL_TABLE(qn(table)),
+ style.SQL_KEYWORD(self.connection.ops.drop_foreignkey_sql()),
+ style.SQL_FIELD(qn(truncate_name(
+ r_name, self.connection.ops.max_name_length())))))
+ del references_to_delete[model]
+ return output
+
+ def sql_destroy_indexes_for_model(self, model, style):
+ """
+ Returns the DROP INDEX SQL statements for a single model.
+ """
+ if not model._meta.managed or model._meta.proxy or model._meta.swapped:
+ return []
+ output = []
+ for f in model._meta.local_fields:
+ output.extend(self.sql_destroy_indexes_for_field(model, f, style))
+ for fs in model._meta.index_together:
+ fields = [model._meta.get_field_by_name(f)[0] for f in fs]
+ output.extend(self.sql_destroy_indexes_for_fields(model, fields, style))
+ return output
+
+ def sql_destroy_indexes_for_field(self, model, f, style):
+ """
+ Return the DROP INDEX SQL statements for a single model field.
+ """
+ if f.db_index and not f.unique:
+ return self.sql_destroy_indexes_for_fields(model, [f], style)
+ else:
+ return []
+
+ def sql_destroy_indexes_for_fields(self, model, fields, style):
+ if len(fields) == 1 and fields[0].db_tablespace:
+ tablespace_sql = self.connection.ops.tablespace_sql(fields[0].db_tablespace)
+ elif model._meta.db_tablespace:
+ tablespace_sql = self.connection.ops.tablespace_sql(model._meta.db_tablespace)
+ else:
+ tablespace_sql = ""
+ if tablespace_sql:
+ tablespace_sql = " " + tablespace_sql
+
+ field_names = []
+ qn = self.connection.ops.quote_name
+ for f in fields:
+ field_names.append(style.SQL_FIELD(qn(f.column)))
+
+ index_name = "%s_%s" % (model._meta.db_table, self._digest([f.name for f in fields]))
+
+ return [
+ style.SQL_KEYWORD("DROP INDEX") + " " +
+ style.SQL_TABLE(qn(truncate_name(index_name, self.connection.ops.max_name_length()))) + " " +
+ ";",
+ ]
+
+ def create_test_db(self, verbosity=1, autoclobber=False):
+ """
+ Creates a test database, prompting the user for confirmation if the
+ database already exists. Returns the name of the test database created.
+ """
+ # Don't import django.core.management if it isn't needed.
+ from django.core.management import call_command
+
+ test_database_name = self._get_test_db_name()
+
+ if verbosity >= 1:
+ test_db_repr = ''
+ if verbosity >= 2:
+ test_db_repr = " ('%s')" % test_database_name
+ print("Creating test database for alias '%s'%s..." % (
+ self.connection.alias, test_db_repr))
+
+ self._create_test_db(verbosity, autoclobber)
+
+ self.connection.close()
+ settings.DATABASES[self.connection.alias]["NAME"] = test_database_name
+ self.connection.settings_dict["NAME"] = test_database_name
+
+ # Report syncdb messages at one level lower than that requested.
+ # This ensures we don't get flooded with messages during testing
+ # (unless you really ask to be flooded)
+ call_command('syncdb',
+ verbosity=max(verbosity - 1, 0),
+ interactive=False,
+ database=self.connection.alias,
+ load_initial_data=False)
+
+ # We need to then do a flush to ensure that any data installed by
+ # custom SQL has been removed. The only test data should come from
+ # test fixtures, or autogenerated from post_syncdb triggers.
+ # This has the side effect of loading initial data (which was
+ # intentionally skipped in the syncdb).
+ call_command('flush',
+ verbosity=max(verbosity - 1, 0),
+ interactive=False,
+ database=self.connection.alias)
+
+ from django.core.cache import get_cache
+ from django.core.cache.backends.db import BaseDatabaseCache
+ for cache_alias in settings.CACHES:
+ cache = get_cache(cache_alias)
+ if isinstance(cache, BaseDatabaseCache):
+ call_command('createcachetable', cache._table,
+ database=self.connection.alias)
+
+ # Get a cursor (even though we don't need one yet). This has
+ # the side effect of initializing the test database.
+ self.connection.cursor()
+
+ return test_database_name
+
+ def _get_test_db_name(self):
+ """
+ Internal implementation - returns the name of the test DB that will be
+ created. Only useful when called from create_test_db() and
+ _create_test_db() and when no external munging is done with the 'NAME'
+ or 'TEST_NAME' settings.
+ """
+ if self.connection.settings_dict['TEST_NAME']:
+ return self.connection.settings_dict['TEST_NAME']
+ return TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
+
+ def _create_test_db(self, verbosity, autoclobber):
+ """
+ Internal implementation - creates the test db tables.
+ """
+ suffix = self.sql_table_creation_suffix()
+
+ test_database_name = self._get_test_db_name()
+
+ qn = self.connection.ops.quote_name
+
+ # Create the test database and connect to it.
+ cursor = self.connection.cursor()
+ try:
+ cursor.execute(
+ "CREATE DATABASE %s %s" % (qn(test_database_name), suffix))
+ except Exception as e:
+ sys.stderr.write(
+ "Got an error creating the test database: %s\n" % e)
+ if not autoclobber:
+ confirm = input(
+ "Type 'yes' if you would like to try deleting the test "
+ "database '%s', or 'no' to cancel: " % test_database_name)
+ if autoclobber or confirm == 'yes':
+ try:
+ if verbosity >= 1:
+ print("Destroying old test database '%s'..."
+ % self.connection.alias)
+ cursor.execute(
+ "DROP DATABASE %s" % qn(test_database_name))
+ cursor.execute(
+ "CREATE DATABASE %s %s" % (qn(test_database_name),
+ suffix))
+ except Exception as e:
+ sys.stderr.write(
+ "Got an error recreating the test database: %s\n" % e)
+ sys.exit(2)
+ else:
+ print("Tests cancelled.")
+ sys.exit(1)
+
+ return test_database_name
+
+ def destroy_test_db(self, old_database_name, verbosity=1):
+ """
+ Destroy a test database, prompting the user for confirmation if the
+ database already exists.
+ """
+ self.connection.close()
+ test_database_name = self.connection.settings_dict['NAME']
+ if verbosity >= 1:
+ test_db_repr = ''
+ if verbosity >= 2:
+ test_db_repr = " ('%s')" % test_database_name
+ print("Destroying test database for alias '%s'%s..." % (
+ self.connection.alias, test_db_repr))
+
+ # Temporarily use a new connection and a copy of the settings dict.
+ # This prevents the production database from being exposed to potential
+ # child threads while (or after) the test database is destroyed.
+ # Refs #10868 and #17786.
+ settings_dict = self.connection.settings_dict.copy()
+ settings_dict['NAME'] = old_database_name
+ backend = load_backend(settings_dict['ENGINE'])
+ new_connection = backend.DatabaseWrapper(
+ settings_dict,
+ alias='__destroy_test_db__',
+ allow_thread_sharing=False)
+ new_connection.creation._destroy_test_db(test_database_name, verbosity)
+
+ def _destroy_test_db(self, test_database_name, verbosity):
+ """
+ Internal implementation - remove the test db tables.
+ """
+ # Remove the test database to clean up after
+ # ourselves. Connect to the previous database (not the test database)
+ # to do so, because it's not allowed to delete a database while being
+ # connected to it.
+ cursor = self.connection.cursor()
+ # Wait to avoid "database is being accessed by other users" errors.
+ time.sleep(1)
+ cursor.execute("DROP DATABASE %s"
+ % self.connection.ops.quote_name(test_database_name))
+ self.connection.close()
+
+ def set_autocommit(self):
+ """
+ Make sure a connection is in autocommit mode. - Deprecated, not used
+ anymore by Django code. Kept for compatibility with user code that
+ might use it.
+ """
+ warnings.warn(
+ "set_autocommit was moved from BaseDatabaseCreation to "
+ "BaseDatabaseWrapper.", PendingDeprecationWarning, stacklevel=2)
+ return self.connection.set_autocommit(True)
+
+ def sql_table_creation_suffix(self):
+ """
+ SQL to append to the end of the test table creation statements.
+ """
+ return ''
+
+ def test_db_signature(self):
+ """
+ Returns a tuple with elements of self.connection.settings_dict (a
+ DATABASES setting value) that uniquely identify a database
+ accordingly to the RDBMS particularities.
+ """
+ settings_dict = self.connection.settings_dict
+ return (
+ settings_dict['HOST'],
+ settings_dict['PORT'],
+ settings_dict['ENGINE'],
+ settings_dict['NAME']
+ )
diff --git a/lib/python2.7/site-packages/django/db/backends/dummy/__init__.py b/lib/python2.7/site-packages/django/db/backends/dummy/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/dummy/__init__.py
diff --git a/lib/python2.7/site-packages/django/db/backends/dummy/base.py b/lib/python2.7/site-packages/django/db/backends/dummy/base.py
new file mode 100644
index 0000000..9a220ff
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/dummy/base.py
@@ -0,0 +1,73 @@
+"""
+Dummy database backend for Django.
+
+Django uses this if the database ENGINE setting is empty (None or empty string).
+
+Each of these API functions, except connection.close(), raises
+ImproperlyConfigured.
+"""
+
+from django.core.exceptions import ImproperlyConfigured
+from django.db.backends import *
+from django.db.backends.creation import BaseDatabaseCreation
+
+def complain(*args, **kwargs):
+ raise ImproperlyConfigured("settings.DATABASES is improperly configured. "
+ "Please supply the ENGINE value. Check "
+ "settings documentation for more details.")
+
+def ignore(*args, **kwargs):
+ pass
+
+class DatabaseError(Exception):
+ pass
+
+class IntegrityError(DatabaseError):
+ pass
+
+class DatabaseOperations(BaseDatabaseOperations):
+ quote_name = complain
+
+class DatabaseClient(BaseDatabaseClient):
+ runshell = complain
+
+class DatabaseCreation(BaseDatabaseCreation):
+ create_test_db = ignore
+ destroy_test_db = ignore
+
+class DatabaseIntrospection(BaseDatabaseIntrospection):
+ get_table_list = complain
+ get_table_description = complain
+ get_relations = complain
+ get_indexes = complain
+ get_key_columns = complain
+
+class DatabaseWrapper(BaseDatabaseWrapper):
+ operators = {}
+ # Override the base class implementations with null
+ # implementations. Anything that tries to actually
+ # do something raises complain; anything that tries
+ # to rollback or undo something raises ignore.
+ _cursor = complain
+ _commit = complain
+ _rollback = ignore
+ _close = ignore
+ _savepoint = ignore
+ _savepoint_commit = complain
+ _savepoint_rollback = ignore
+ _set_autocommit = complain
+ set_dirty = complain
+ set_clean = complain
+
+ def __init__(self, *args, **kwargs):
+ super(DatabaseWrapper, self).__init__(*args, **kwargs)
+
+ self.features = BaseDatabaseFeatures(self)
+ self.ops = DatabaseOperations(self)
+ self.client = DatabaseClient(self)
+ self.creation = DatabaseCreation(self)
+ self.introspection = DatabaseIntrospection(self)
+ self.validation = BaseDatabaseValidation(self)
+
+ def is_usable(self):
+ return True
diff --git a/lib/python2.7/site-packages/django/db/backends/mysql/__init__.py b/lib/python2.7/site-packages/django/db/backends/mysql/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/mysql/__init__.py
diff --git a/lib/python2.7/site-packages/django/db/backends/mysql/base.py b/lib/python2.7/site-packages/django/db/backends/mysql/base.py
new file mode 100644
index 0000000..ea04a5e
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/mysql/base.py
@@ -0,0 +1,533 @@
+"""
+MySQL database backend for Django.
+
+Requires MySQLdb: http://sourceforge.net/projects/mysql-python
+"""
+from __future__ import unicode_literals
+
+import datetime
+import re
+import sys
+import warnings
+
+try:
+ import MySQLdb as Database
+except ImportError as e:
+ from django.core.exceptions import ImproperlyConfigured
+ raise ImproperlyConfigured("Error loading MySQLdb module: %s" % e)
+
+from django.utils.functional import cached_property
+
+# We want version (1, 2, 1, 'final', 2) or later. We can't just use
+# lexicographic ordering in this check because then (1, 2, 1, 'gamma')
+# inadvertently passes the version test.
+version = Database.version_info
+if (version < (1, 2, 1) or (version[:3] == (1, 2, 1) and
+ (len(version) < 5 or version[3] != 'final' or version[4] < 2))):
+ from django.core.exceptions import ImproperlyConfigured
+ raise ImproperlyConfigured("MySQLdb-1.2.1p2 or newer is required; you have %s" % Database.__version__)
+
+from MySQLdb.converters import conversions, Thing2Literal
+from MySQLdb.constants import FIELD_TYPE, CLIENT
+
+try:
+ import pytz
+except ImportError:
+ pytz = None
+
+from django.conf import settings
+from django.db import utils
+from django.db.backends import *
+from django.db.backends.mysql.client import DatabaseClient
+from django.db.backends.mysql.creation import DatabaseCreation
+from django.db.backends.mysql.introspection import DatabaseIntrospection
+from django.db.backends.mysql.validation import DatabaseValidation
+from django.utils.encoding import force_str, force_text
+from django.utils.safestring import SafeBytes, SafeText
+from django.utils import six
+from django.utils import timezone
+
+# Raise exceptions for database warnings if DEBUG is on
+if settings.DEBUG:
+ warnings.filterwarnings("error", category=Database.Warning)
+
+DatabaseError = Database.DatabaseError
+IntegrityError = Database.IntegrityError
+
+# It's impossible to import datetime_or_None directly from MySQLdb.times
+parse_datetime = conversions[FIELD_TYPE.DATETIME]
+
+def parse_datetime_with_timezone_support(value):
+ dt = parse_datetime(value)
+ # Confirm that dt is naive before overwriting its tzinfo.
+ if dt is not None and settings.USE_TZ and timezone.is_naive(dt):
+ dt = dt.replace(tzinfo=timezone.utc)
+ return dt
+
+def adapt_datetime_with_timezone_support(value, conv):
+ # Equivalent to DateTimeField.get_db_prep_value. Used only by raw SQL.
+ if settings.USE_TZ:
+ if timezone.is_naive(value):
+ warnings.warn("MySQL received a naive datetime (%s)"
+ " while time zone support is active." % value,
+ RuntimeWarning)
+ default_timezone = timezone.get_default_timezone()
+ value = timezone.make_aware(value, default_timezone)
+ value = value.astimezone(timezone.utc).replace(tzinfo=None)
+ return Thing2Literal(value.strftime("%Y-%m-%d %H:%M:%S"), conv)
+
+# MySQLdb-1.2.1 returns TIME columns as timedelta -- they are more like
+# timedelta in terms of actual behavior as they are signed and include days --
+# and Django expects time, so we still need to override that. We also need to
+# add special handling for SafeText and SafeBytes as MySQLdb's type
+# checking is too tight to catch those (see Django ticket #6052).
+# Finally, MySQLdb always returns naive datetime objects. However, when
+# timezone support is active, Django expects timezone-aware datetime objects.
+django_conversions = conversions.copy()
+django_conversions.update({
+ FIELD_TYPE.TIME: util.typecast_time,
+ FIELD_TYPE.DECIMAL: util.typecast_decimal,
+ FIELD_TYPE.NEWDECIMAL: util.typecast_decimal,
+ FIELD_TYPE.DATETIME: parse_datetime_with_timezone_support,
+ datetime.datetime: adapt_datetime_with_timezone_support,
+})
+
+# This should match the numerical portion of the version numbers (we can treat
+# versions like 5.0.24 and 5.0.24a as the same). Based on the list of version
+# at http://dev.mysql.com/doc/refman/4.1/en/news.html and
+# http://dev.mysql.com/doc/refman/5.0/en/news.html .
+server_version_re = re.compile(r'(\d{1,2})\.(\d{1,2})\.(\d{1,2})')
+
+# MySQLdb-1.2.1 and newer automatically makes use of SHOW WARNINGS on
+# MySQL-4.1 and newer, so the MysqlDebugWrapper is unnecessary. Since the
+# point is to raise Warnings as exceptions, this can be done with the Python
+# warning module, and this is setup when the connection is created, and the
+# standard util.CursorDebugWrapper can be used. Also, using sql_mode
+# TRADITIONAL will automatically cause most warnings to be treated as errors.
+
+class CursorWrapper(object):
+ """
+ A thin wrapper around MySQLdb's normal cursor class so that we can catch
+ particular exception instances and reraise them with the right types.
+
+ Implemented as a wrapper, rather than a subclass, so that we aren't stuck
+ to the particular underlying representation returned by Connection.cursor().
+ """
+ codes_for_integrityerror = (1048,)
+
+ def __init__(self, cursor):
+ self.cursor = cursor
+
+ def execute(self, query, args=None):
+ try:
+ # args is None means no string interpolation
+ return self.cursor.execute(query, args)
+ except Database.OperationalError as e:
+ # Map some error codes to IntegrityError, since they seem to be
+ # misclassified and Django would prefer the more logical place.
+ if e.args[0] in self.codes_for_integrityerror:
+ six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
+ raise
+
+ def executemany(self, query, args):
+ try:
+ return self.cursor.executemany(query, args)
+ except Database.OperationalError as e:
+ # Map some error codes to IntegrityError, since they seem to be
+ # misclassified and Django would prefer the more logical place.
+ if e.args[0] in self.codes_for_integrityerror:
+ six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
+ raise
+
+ def __getattr__(self, attr):
+ if attr in self.__dict__:
+ return self.__dict__[attr]
+ else:
+ return getattr(self.cursor, attr)
+
+ def __iter__(self):
+ return iter(self.cursor)
+
+class DatabaseFeatures(BaseDatabaseFeatures):
+ empty_fetchmany_value = ()
+ update_can_self_select = False
+ allows_group_by_pk = True
+ related_fields_match_type = True
+ allow_sliced_subqueries = False
+ has_bulk_insert = True
+ has_select_for_update = True
+ has_select_for_update_nowait = False
+ supports_forward_references = False
+ supports_long_model_names = False
+ supports_microsecond_precision = False
+ supports_regex_backreferencing = False
+ supports_date_lookup_using_string = False
+ supports_timezones = False
+ requires_explicit_null_ordering_when_grouping = True
+ allows_primary_key_0 = False
+ uses_savepoints = True
+ atomic_transactions = False
+
+ def __init__(self, connection):
+ super(DatabaseFeatures, self).__init__(connection)
+
+ @cached_property
+ def _mysql_storage_engine(self):
+ "Internal method used in Django tests. Don't rely on this from your code"
+ cursor = self.connection.cursor()
+ cursor.execute('CREATE TABLE INTROSPECT_TEST (X INT)')
+ # This command is MySQL specific; the second column
+ # will tell you the default table type of the created
+ # table. Since all Django's test tables will have the same
+ # table type, that's enough to evaluate the feature.
+ cursor.execute("SHOW TABLE STATUS WHERE Name='INTROSPECT_TEST'")
+ result = cursor.fetchone()
+ cursor.execute('DROP TABLE INTROSPECT_TEST')
+ return result[1]
+
+ @cached_property
+ def can_introspect_foreign_keys(self):
+ "Confirm support for introspected foreign keys"
+ return self._mysql_storage_engine != 'MyISAM'
+
+ @cached_property
+ def has_zoneinfo_database(self):
+ # MySQL accepts full time zones names (eg. Africa/Nairobi) but rejects
+ # abbreviations (eg. EAT). When pytz isn't installed and the current
+ # time zone is LocalTimezone (the only sensible value in this
+ # context), the current time zone name will be an abbreviation. As a
+ # consequence, MySQL cannot perform time zone conversions reliably.
+ if pytz is None:
+ return False
+
+ # Test if the time zone definitions are installed.
+ cursor = self.connection.cursor()
+ cursor.execute("SELECT 1 FROM mysql.time_zone LIMIT 1")
+ return cursor.fetchone() is not None
+
+class DatabaseOperations(BaseDatabaseOperations):
+ compiler_module = "django.db.backends.mysql.compiler"
+
+ def date_extract_sql(self, lookup_type, field_name):
+ # http://dev.mysql.com/doc/mysql/en/date-and-time-functions.html
+ if lookup_type == 'week_day':
+ # DAYOFWEEK() returns an integer, 1-7, Sunday=1.
+ # Note: WEEKDAY() returns 0-6, Monday=0.
+ return "DAYOFWEEK(%s)" % field_name
+ else:
+ return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
+
+ def date_trunc_sql(self, lookup_type, field_name):
+ fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
+ format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape.
+ format_def = ('0000-', '01', '-01', ' 00:', '00', ':00')
+ try:
+ i = fields.index(lookup_type) + 1
+ except ValueError:
+ sql = field_name
+ else:
+ format_str = ''.join([f for f in format[:i]] + [f for f in format_def[i:]])
+ sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str)
+ return sql
+
+ def datetime_extract_sql(self, lookup_type, field_name, tzname):
+ if settings.USE_TZ:
+ field_name = "CONVERT_TZ(%s, 'UTC', %%s)" % field_name
+ params = [tzname]
+ else:
+ params = []
+ # http://dev.mysql.com/doc/mysql/en/date-and-time-functions.html
+ if lookup_type == 'week_day':
+ # DAYOFWEEK() returns an integer, 1-7, Sunday=1.
+ # Note: WEEKDAY() returns 0-6, Monday=0.
+ sql = "DAYOFWEEK(%s)" % field_name
+ else:
+ sql = "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
+ return sql, params
+
+ def datetime_trunc_sql(self, lookup_type, field_name, tzname):
+ if settings.USE_TZ:
+ field_name = "CONVERT_TZ(%s, 'UTC', %%s)" % field_name
+ params = [tzname]
+ else:
+ params = []
+ fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
+ format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape.
+ format_def = ('0000-', '01', '-01', ' 00:', '00', ':00')
+ try:
+ i = fields.index(lookup_type) + 1
+ except ValueError:
+ sql = field_name
+ else:
+ format_str = ''.join([f for f in format[:i]] + [f for f in format_def[i:]])
+ sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str)
+ return sql, params
+
+ def date_interval_sql(self, sql, connector, timedelta):
+ return "(%s %s INTERVAL '%d 0:0:%d:%d' DAY_MICROSECOND)" % (sql, connector,
+ timedelta.days, timedelta.seconds, timedelta.microseconds)
+
+ def drop_foreignkey_sql(self):
+ return "DROP FOREIGN KEY"
+
+ def force_no_ordering(self):
+ """
+ "ORDER BY NULL" prevents MySQL from implicitly ordering by grouped
+ columns. If no ordering would otherwise be applied, we don't want any
+ implicit sorting going on.
+ """
+ return ["NULL"]
+
+ def fulltext_search_sql(self, field_name):
+ return 'MATCH (%s) AGAINST (%%s IN BOOLEAN MODE)' % field_name
+
+ def last_executed_query(self, cursor, sql, params):
+ # With MySQLdb, cursor objects have an (undocumented) "_last_executed"
+ # attribute where the exact query sent to the database is saved.
+ # See MySQLdb/cursors.py in the source distribution.
+ return force_text(getattr(cursor, '_last_executed', None), errors='replace')
+
+ def no_limit_value(self):
+ # 2**64 - 1, as recommended by the MySQL documentation
+ return 18446744073709551615
+
+ def quote_name(self, name):
+ if name.startswith("`") and name.endswith("`"):
+ return name # Quoting once is enough.
+ return "`%s`" % name
+
+ def random_function_sql(self):
+ return 'RAND()'
+
+ def sql_flush(self, style, tables, sequences, allow_cascade=False):
+ # NB: The generated SQL below is specific to MySQL
+ # 'TRUNCATE x;', 'TRUNCATE y;', 'TRUNCATE z;'... style SQL statements
+ # to clear all tables of all data
+ if tables:
+ sql = ['SET FOREIGN_KEY_CHECKS = 0;']
+ for table in tables:
+ sql.append('%s %s;' % (
+ style.SQL_KEYWORD('TRUNCATE'),
+ style.SQL_FIELD(self.quote_name(table)),
+ ))
+ sql.append('SET FOREIGN_KEY_CHECKS = 1;')
+ sql.extend(self.sequence_reset_by_name_sql(style, sequences))
+ return sql
+ else:
+ return []
+
+ def sequence_reset_by_name_sql(self, style, sequences):
+ # Truncate already resets the AUTO_INCREMENT field from
+ # MySQL version 5.0.13 onwards. Refs #16961.
+ if self.connection.mysql_version < (5, 0, 13):
+ return ["%s %s %s %s %s;" % \
+ (style.SQL_KEYWORD('ALTER'),
+ style.SQL_KEYWORD('TABLE'),
+ style.SQL_TABLE(self.quote_name(sequence['table'])),
+ style.SQL_KEYWORD('AUTO_INCREMENT'),
+ style.SQL_FIELD('= 1'),
+ ) for sequence in sequences]
+ else:
+ return []
+
+ def validate_autopk_value(self, value):
+ # MySQLism: zero in AUTO_INCREMENT field does not work. Refs #17653.
+ if value == 0:
+ raise ValueError('The database backend does not accept 0 as a '
+ 'value for AutoField.')
+ return value
+
+ def value_to_db_datetime(self, value):
+ if value is None:
+ return None
+
+ # MySQL doesn't support tz-aware datetimes
+ if timezone.is_aware(value):
+ if settings.USE_TZ:
+ value = value.astimezone(timezone.utc).replace(tzinfo=None)
+ else:
+ raise ValueError("MySQL backend does not support timezone-aware datetimes when USE_TZ is False.")
+
+ # MySQL doesn't support microseconds
+ return six.text_type(value.replace(microsecond=0))
+
+ def value_to_db_time(self, value):
+ if value is None:
+ return None
+
+ # MySQL doesn't support tz-aware times
+ if timezone.is_aware(value):
+ raise ValueError("MySQL backend does not support timezone-aware times.")
+
+ # MySQL doesn't support microseconds
+ return six.text_type(value.replace(microsecond=0))
+
+ def year_lookup_bounds_for_datetime_field(self, value):
+ # Again, no microseconds
+ first, second = super(DatabaseOperations, self).year_lookup_bounds_for_datetime_field(value)
+ return [first.replace(microsecond=0), second.replace(microsecond=0)]
+
+ def max_name_length(self):
+ return 64
+
+ def bulk_insert_sql(self, fields, num_values):
+ items_sql = "(%s)" % ", ".join(["%s"] * len(fields))
+ return "VALUES " + ", ".join([items_sql] * num_values)
+
+class DatabaseWrapper(BaseDatabaseWrapper):
+ vendor = 'mysql'
+ operators = {
+ 'exact': '= %s',
+ 'iexact': 'LIKE %s',
+ 'contains': 'LIKE BINARY %s',
+ 'icontains': 'LIKE %s',
+ 'regex': 'REGEXP BINARY %s',
+ 'iregex': 'REGEXP %s',
+ 'gt': '> %s',
+ 'gte': '>= %s',
+ 'lt': '< %s',
+ 'lte': '<= %s',
+ 'startswith': 'LIKE BINARY %s',
+ 'endswith': 'LIKE BINARY %s',
+ 'istartswith': 'LIKE %s',
+ 'iendswith': 'LIKE %s',
+ }
+
+ Database = Database
+
+ def __init__(self, *args, **kwargs):
+ super(DatabaseWrapper, self).__init__(*args, **kwargs)
+
+ self.features = DatabaseFeatures(self)
+ self.ops = DatabaseOperations(self)
+ self.client = DatabaseClient(self)
+ self.creation = DatabaseCreation(self)
+ self.introspection = DatabaseIntrospection(self)
+ self.validation = DatabaseValidation(self)
+
+ def get_connection_params(self):
+ kwargs = {
+ 'conv': django_conversions,
+ 'charset': 'utf8',
+ }
+ if six.PY2:
+ kwargs['use_unicode'] = True
+ settings_dict = self.settings_dict
+ if settings_dict['USER']:
+ kwargs['user'] = settings_dict['USER']
+ if settings_dict['NAME']:
+ kwargs['db'] = settings_dict['NAME']
+ if settings_dict['PASSWORD']:
+ kwargs['passwd'] = force_str(settings_dict['PASSWORD'])
+ if settings_dict['HOST'].startswith('/'):
+ kwargs['unix_socket'] = settings_dict['HOST']
+ elif settings_dict['HOST']:
+ kwargs['host'] = settings_dict['HOST']
+ if settings_dict['PORT']:
+ kwargs['port'] = int(settings_dict['PORT'])
+ # We need the number of potentially affected rows after an
+ # "UPDATE", not the number of changed rows.
+ kwargs['client_flag'] = CLIENT.FOUND_ROWS
+ kwargs.update(settings_dict['OPTIONS'])
+ return kwargs
+
+ def get_new_connection(self, conn_params):
+ conn = Database.connect(**conn_params)
+ conn.encoders[SafeText] = conn.encoders[six.text_type]
+ conn.encoders[SafeBytes] = conn.encoders[bytes]
+ return conn
+
+ def init_connection_state(self):
+ cursor = self.connection.cursor()
+ # SQL_AUTO_IS_NULL in MySQL controls whether an AUTO_INCREMENT column
+ # on a recently-inserted row will return when the field is tested for
+ # NULL. Disabling this value brings this aspect of MySQL in line with
+ # SQL standards.
+ cursor.execute('SET SQL_AUTO_IS_NULL = 0')
+ cursor.close()
+
+ def create_cursor(self):
+ cursor = self.connection.cursor()
+ return CursorWrapper(cursor)
+
+ def _rollback(self):
+ try:
+ BaseDatabaseWrapper._rollback(self)
+ except Database.NotSupportedError:
+ pass
+
+ def _set_autocommit(self, autocommit):
+ with self.wrap_database_errors:
+ self.connection.autocommit(autocommit)
+
+ def disable_constraint_checking(self):
+ """
+ Disables foreign key checks, primarily for use in adding rows with forward references. Always returns True,
+ to indicate constraint checks need to be re-enabled.
+ """
+ self.cursor().execute('SET foreign_key_checks=0')
+ return True
+
+ def enable_constraint_checking(self):
+ """
+ Re-enable foreign key checks after they have been disabled.
+ """
+ # Override needs_rollback in case constraint_checks_disabled is
+ # nested inside transaction.atomic.
+ self.needs_rollback, needs_rollback = False, self.needs_rollback
+ try:
+ self.cursor().execute('SET foreign_key_checks=1')
+ finally:
+ self.needs_rollback = needs_rollback
+
+ def check_constraints(self, table_names=None):
+ """
+ Checks each table name in `table_names` for rows with invalid foreign key references. This method is
+ intended to be used in conjunction with `disable_constraint_checking()` and `enable_constraint_checking()`, to
+ determine if rows with invalid references were entered while constraint checks were off.
+
+ Raises an IntegrityError on the first invalid foreign key reference encountered (if any) and provides
+ detailed information about the invalid reference in the error message.
+
+ Backends can override this method if they can more directly apply constraint checking (e.g. via "SET CONSTRAINTS
+ ALL IMMEDIATE")
+ """
+ cursor = self.cursor()
+ if table_names is None:
+ table_names = self.introspection.table_names(cursor)
+ for table_name in table_names:
+ primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
+ if not primary_key_column_name:
+ continue
+ key_columns = self.introspection.get_key_columns(cursor, table_name)
+ for column_name, referenced_table_name, referenced_column_name in key_columns:
+ cursor.execute("""
+ SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING
+ LEFT JOIN `%s` as REFERRED
+ ON (REFERRING.`%s` = REFERRED.`%s`)
+ WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL"""
+ % (primary_key_column_name, column_name, table_name, referenced_table_name,
+ column_name, referenced_column_name, column_name, referenced_column_name))
+ for bad_row in cursor.fetchall():
+ raise utils.IntegrityError("The row in table '%s' with primary key '%s' has an invalid "
+ "foreign key: %s.%s contains a value '%s' that does not have a corresponding value in %s.%s."
+ % (table_name, bad_row[0],
+ table_name, column_name, bad_row[1],
+ referenced_table_name, referenced_column_name))
+
+ def is_usable(self):
+ try:
+ self.connection.ping()
+ except Database.Error:
+ return False
+ else:
+ return True
+
+ @cached_property
+ def mysql_version(self):
+ with self.temporary_connection():
+ server_info = self.connection.get_server_info()
+ match = server_version_re.match(server_info)
+ if not match:
+ raise Exception('Unable to determine MySQL version from version string %r' % server_info)
+ return tuple([int(x) for x in match.groups()])
diff --git a/lib/python2.7/site-packages/django/db/backends/mysql/client.py b/lib/python2.7/site-packages/django/db/backends/mysql/client.py
new file mode 100644
index 0000000..1cf8cee
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/mysql/client.py
@@ -0,0 +1,40 @@
+import os
+import sys
+
+from django.db.backends import BaseDatabaseClient
+
+class DatabaseClient(BaseDatabaseClient):
+ executable_name = 'mysql'
+
+ def runshell(self):
+ settings_dict = self.connection.settings_dict
+ args = [self.executable_name]
+ db = settings_dict['OPTIONS'].get('db', settings_dict['NAME'])
+ user = settings_dict['OPTIONS'].get('user', settings_dict['USER'])
+ passwd = settings_dict['OPTIONS'].get('passwd', settings_dict['PASSWORD'])
+ host = settings_dict['OPTIONS'].get('host', settings_dict['HOST'])
+ port = settings_dict['OPTIONS'].get('port', settings_dict['PORT'])
+ defaults_file = settings_dict['OPTIONS'].get('read_default_file')
+ # Seems to be no good way to set sql_mode with CLI.
+
+ if defaults_file:
+ args += ["--defaults-file=%s" % defaults_file]
+ if user:
+ args += ["--user=%s" % user]
+ if passwd:
+ args += ["--password=%s" % passwd]
+ if host:
+ if '/' in host:
+ args += ["--socket=%s" % host]
+ else:
+ args += ["--host=%s" % host]
+ if port:
+ args += ["--port=%s" % port]
+ if db:
+ args += [db]
+
+ if os.name == 'nt':
+ sys.exit(os.system(" ".join(args)))
+ else:
+ os.execvp(self.executable_name, args)
+
diff --git a/lib/python2.7/site-packages/django/db/backends/mysql/compiler.py b/lib/python2.7/site-packages/django/db/backends/mysql/compiler.py
new file mode 100644
index 0000000..d3439bf
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/mysql/compiler.py
@@ -0,0 +1,37 @@
+from django.db.models.sql import compiler
+from django.utils.six.moves import zip_longest
+
+
+class SQLCompiler(compiler.SQLCompiler):
+ def resolve_columns(self, row, fields=()):
+ values = []
+ index_extra_select = len(self.query.extra_select)
+ for value, field in zip_longest(row[index_extra_select:], fields):
+ if (field and field.get_internal_type() in ("BooleanField", "NullBooleanField") and
+ value in (0, 1)):
+ value = bool(value)
+ values.append(value)
+ return row[:index_extra_select] + tuple(values)
+
+ def as_subquery_condition(self, alias, columns, qn):
+ qn2 = self.connection.ops.quote_name
+ sql, params = self.as_sql()
+ return '(%s) IN (%s)' % (', '.join(['%s.%s' % (qn(alias), qn2(column)) for column in columns]), sql), params
+
+class SQLInsertCompiler(compiler.SQLInsertCompiler, SQLCompiler):
+ pass
+
+class SQLDeleteCompiler(compiler.SQLDeleteCompiler, SQLCompiler):
+ pass
+
+class SQLUpdateCompiler(compiler.SQLUpdateCompiler, SQLCompiler):
+ pass
+
+class SQLAggregateCompiler(compiler.SQLAggregateCompiler, SQLCompiler):
+ pass
+
+class SQLDateCompiler(compiler.SQLDateCompiler, SQLCompiler):
+ pass
+
+class SQLDateTimeCompiler(compiler.SQLDateTimeCompiler, SQLCompiler):
+ pass
diff --git a/lib/python2.7/site-packages/django/db/backends/mysql/creation.py b/lib/python2.7/site-packages/django/db/backends/mysql/creation.py
new file mode 100644
index 0000000..3a57c29
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/mysql/creation.py
@@ -0,0 +1,70 @@
+from django.db.backends.creation import BaseDatabaseCreation
+
+class DatabaseCreation(BaseDatabaseCreation):
+ # This dictionary maps Field objects to their associated MySQL column
+ # types, as strings. Column-type strings can contain format strings; they'll
+ # be interpolated against the values of Field.__dict__ before being output.
+ # If a column type is set to None, it won't be included in the output.
+ data_types = {
+ 'AutoField': 'integer AUTO_INCREMENT',
+ 'BinaryField': 'longblob',
+ 'BooleanField': 'bool',
+ 'CharField': 'varchar(%(max_length)s)',
+ 'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
+ 'DateField': 'date',
+ 'DateTimeField': 'datetime',
+ 'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
+ 'FileField': 'varchar(%(max_length)s)',
+ 'FilePathField': 'varchar(%(max_length)s)',
+ 'FloatField': 'double precision',
+ 'IntegerField': 'integer',
+ 'BigIntegerField': 'bigint',
+ 'IPAddressField': 'char(15)',
+ 'GenericIPAddressField': 'char(39)',
+ 'NullBooleanField': 'bool',
+ 'OneToOneField': 'integer',
+ 'PositiveIntegerField': 'integer UNSIGNED',
+ 'PositiveSmallIntegerField': 'smallint UNSIGNED',
+ 'SlugField': 'varchar(%(max_length)s)',
+ 'SmallIntegerField': 'smallint',
+ 'TextField': 'longtext',
+ 'TimeField': 'time',
+ }
+
+ def sql_table_creation_suffix(self):
+ suffix = []
+ if self.connection.settings_dict['TEST_CHARSET']:
+ suffix.append('CHARACTER SET %s' % self.connection.settings_dict['TEST_CHARSET'])
+ if self.connection.settings_dict['TEST_COLLATION']:
+ suffix.append('COLLATE %s' % self.connection.settings_dict['TEST_COLLATION'])
+ return ' '.join(suffix)
+
+ def sql_for_inline_foreign_key_references(self, model, field, known_models, style):
+ "All inline references are pending under MySQL"
+ return [], True
+
+ def sql_destroy_indexes_for_fields(self, model, fields, style):
+ if len(fields) == 1 and fields[0].db_tablespace:
+ tablespace_sql = self.connection.ops.tablespace_sql(fields[0].db_tablespace)
+ elif model._meta.db_tablespace:
+ tablespace_sql = self.connection.ops.tablespace_sql(model._meta.db_tablespace)
+ else:
+ tablespace_sql = ""
+ if tablespace_sql:
+ tablespace_sql = " " + tablespace_sql
+
+ field_names = []
+ qn = self.connection.ops.quote_name
+ for f in fields:
+ field_names.append(style.SQL_FIELD(qn(f.column)))
+
+ index_name = "%s_%s" % (model._meta.db_table, self._digest([f.name for f in fields]))
+
+ from ..util import truncate_name
+
+ return [
+ style.SQL_KEYWORD("DROP INDEX") + " " +
+ style.SQL_TABLE(qn(truncate_name(index_name, self.connection.ops.max_name_length()))) + " " +
+ style.SQL_KEYWORD("ON") + " " +
+ style.SQL_TABLE(qn(model._meta.db_table)) + ";",
+ ]
diff --git a/lib/python2.7/site-packages/django/db/backends/mysql/introspection.py b/lib/python2.7/site-packages/django/db/backends/mysql/introspection.py
new file mode 100644
index 0000000..548877e
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/mysql/introspection.py
@@ -0,0 +1,119 @@
+import re
+from .base import FIELD_TYPE
+
+from django.db.backends import BaseDatabaseIntrospection, FieldInfo
+from django.utils.encoding import force_text
+
+
+foreign_key_re = re.compile(r"\sCONSTRAINT `[^`]*` FOREIGN KEY \(`([^`]*)`\) REFERENCES `([^`]*)` \(`([^`]*)`\)")
+
+class DatabaseIntrospection(BaseDatabaseIntrospection):
+ data_types_reverse = {
+ FIELD_TYPE.BLOB: 'TextField',
+ FIELD_TYPE.CHAR: 'CharField',
+ FIELD_TYPE.DECIMAL: 'DecimalField',
+ FIELD_TYPE.NEWDECIMAL: 'DecimalField',
+ FIELD_TYPE.DATE: 'DateField',
+ FIELD_TYPE.DATETIME: 'DateTimeField',
+ FIELD_TYPE.DOUBLE: 'FloatField',
+ FIELD_TYPE.FLOAT: 'FloatField',
+ FIELD_TYPE.INT24: 'IntegerField',
+ FIELD_TYPE.LONG: 'IntegerField',
+ FIELD_TYPE.LONGLONG: 'BigIntegerField',
+ FIELD_TYPE.SHORT: 'IntegerField',
+ FIELD_TYPE.STRING: 'CharField',
+ FIELD_TYPE.TIME: 'TimeField',
+ FIELD_TYPE.TIMESTAMP: 'DateTimeField',
+ FIELD_TYPE.TINY: 'IntegerField',
+ FIELD_TYPE.TINY_BLOB: 'TextField',
+ FIELD_TYPE.MEDIUM_BLOB: 'TextField',
+ FIELD_TYPE.LONG_BLOB: 'TextField',
+ FIELD_TYPE.VAR_STRING: 'CharField',
+ }
+
+ def get_table_list(self, cursor):
+ "Returns a list of table names in the current database."
+ cursor.execute("SHOW TABLES")
+ return [row[0] for row in cursor.fetchall()]
+
+ def get_table_description(self, cursor, table_name):
+ """
+ Returns a description of the table, with the DB-API cursor.description interface."
+ """
+ # varchar length returned by cursor.description is an internal length,
+ # not visible length (#5725), use information_schema database to fix this
+ cursor.execute("""
+ SELECT column_name, character_maximum_length FROM information_schema.columns
+ WHERE table_name = %s AND table_schema = DATABASE()
+ AND character_maximum_length IS NOT NULL""", [table_name])
+ length_map = dict(cursor.fetchall())
+
+ # Also getting precision and scale from information_schema (see #5014)
+ cursor.execute("""
+ SELECT column_name, numeric_precision, numeric_scale FROM information_schema.columns
+ WHERE table_name = %s AND table_schema = DATABASE()
+ AND data_type='decimal'""", [table_name])
+ numeric_map = dict([(line[0], tuple([int(n) for n in line[1:]])) for line in cursor.fetchall()])
+
+ cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name))
+ return [FieldInfo(*((force_text(line[0]),)
+ + line[1:3]
+ + (length_map.get(line[0], line[3]),)
+ + numeric_map.get(line[0], line[4:6])
+ + (line[6],)))
+ for line in cursor.description]
+
+ def _name_to_index(self, cursor, table_name):
+ """
+ Returns a dictionary of {field_name: field_index} for the given table.
+ Indexes are 0-based.
+ """
+ return dict([(d[0], i) for i, d in enumerate(self.get_table_description(cursor, table_name))])
+
+ def get_relations(self, cursor, table_name):
+ """
+ Returns a dictionary of {field_index: (field_index_other_table, other_table)}
+ representing all relationships to the given table. Indexes are 0-based.
+ """
+ my_field_dict = self._name_to_index(cursor, table_name)
+ constraints = self.get_key_columns(cursor, table_name)
+ relations = {}
+ for my_fieldname, other_table, other_field in constraints:
+ other_field_index = self._name_to_index(cursor, other_table)[other_field]
+ my_field_index = my_field_dict[my_fieldname]
+ relations[my_field_index] = (other_field_index, other_table)
+ return relations
+
+ def get_key_columns(self, cursor, table_name):
+ """
+ Returns a list of (column_name, referenced_table_name, referenced_column_name) for all
+ key columns in given table.
+ """
+ key_columns = []
+ cursor.execute("""
+ SELECT column_name, referenced_table_name, referenced_column_name
+ FROM information_schema.key_column_usage
+ WHERE table_name = %s
+ AND table_schema = DATABASE()
+ AND referenced_table_name IS NOT NULL
+ AND referenced_column_name IS NOT NULL""", [table_name])
+ key_columns.extend(cursor.fetchall())
+ return key_columns
+
+ def get_indexes(self, cursor, table_name):
+ cursor.execute("SHOW INDEX FROM %s" % self.connection.ops.quote_name(table_name))
+ # Do a two-pass search for indexes: on first pass check which indexes
+ # are multicolumn, on second pass check which single-column indexes
+ # are present.
+ rows = list(cursor.fetchall())
+ multicol_indexes = set()
+ for row in rows:
+ if row[3] > 1:
+ multicol_indexes.add(row[2])
+ indexes = {}
+ for row in rows:
+ if row[2] in multicol_indexes:
+ continue
+ indexes[row[4]] = {'primary_key': (row[2] == 'PRIMARY'), 'unique': not bool(row[1])}
+ return indexes
+
diff --git a/lib/python2.7/site-packages/django/db/backends/mysql/validation.py b/lib/python2.7/site-packages/django/db/backends/mysql/validation.py
new file mode 100644
index 0000000..2ce957c
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/mysql/validation.py
@@ -0,0 +1,16 @@
+from django.db.backends import BaseDatabaseValidation
+
+class DatabaseValidation(BaseDatabaseValidation):
+ def validate_field(self, errors, opts, f):
+ """
+ MySQL has the following field length restriction:
+ No character (varchar) fields can have a length exceeding 255
+ characters if they have a unique index on them.
+ """
+ from django.db import models
+ varchar_fields = (models.CharField, models.CommaSeparatedIntegerField,
+ models.SlugField)
+ if (isinstance(f, varchar_fields) and f.unique
+ and (f.max_length is None or int(f.max_length) > 255)):
+ msg = '"%(name)s": %(cls)s cannot have a "max_length" greater than 255 when using "unique=True".'
+ errors.add(opts, msg % {'name': f.name, 'cls': f.__class__.__name__})
diff --git a/lib/python2.7/site-packages/django/db/backends/oracle/__init__.py b/lib/python2.7/site-packages/django/db/backends/oracle/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/oracle/__init__.py
diff --git a/lib/python2.7/site-packages/django/db/backends/oracle/base.py b/lib/python2.7/site-packages/django/db/backends/oracle/base.py
new file mode 100644
index 0000000..11ab574
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/oracle/base.py
@@ -0,0 +1,961 @@
+"""
+Oracle database backend for Django.
+
+Requires cx_Oracle: http://cx-oracle.sourceforge.net/
+"""
+from __future__ import unicode_literals
+
+import decimal
+import re
+import sys
+import warnings
+
+def _setup_environment(environ):
+ import platform
+ # Cygwin requires some special voodoo to set the environment variables
+ # properly so that Oracle will see them.
+ if platform.system().upper().startswith('CYGWIN'):
+ try:
+ import ctypes
+ except ImportError as e:
+ from django.core.exceptions import ImproperlyConfigured
+ raise ImproperlyConfigured("Error loading ctypes: %s; "
+ "the Oracle backend requires ctypes to "
+ "operate correctly under Cygwin." % e)
+ kernel32 = ctypes.CDLL('kernel32')
+ for name, value in environ:
+ kernel32.SetEnvironmentVariableA(name, value)
+ else:
+ import os
+ os.environ.update(environ)
+
+_setup_environment([
+ # Oracle takes client-side character set encoding from the environment.
+ ('NLS_LANG', '.UTF8'),
+ # This prevents unicode from getting mangled by getting encoded into the
+ # potentially non-unicode database character set.
+ ('ORA_NCHAR_LITERAL_REPLACE', 'TRUE'),
+])
+
+
+try:
+ import cx_Oracle as Database
+except ImportError as e:
+ from django.core.exceptions import ImproperlyConfigured
+ raise ImproperlyConfigured("Error loading cx_Oracle module: %s" % e)
+
+try:
+ import pytz
+except ImportError:
+ pytz = None
+
+from django.db import utils
+from django.db.backends import *
+from django.db.backends.oracle.client import DatabaseClient
+from django.db.backends.oracle.creation import DatabaseCreation
+from django.db.backends.oracle.introspection import DatabaseIntrospection
+from django.utils.encoding import force_bytes, force_text
+
+
+DatabaseError = Database.DatabaseError
+IntegrityError = Database.IntegrityError
+
+# Check whether cx_Oracle was compiled with the WITH_UNICODE option if cx_Oracle is pre-5.1. This will
+# also be True for cx_Oracle 5.1 and in Python 3.0. See #19606
+if int(Database.version.split('.', 1)[0]) >= 5 and \
+ (int(Database.version.split('.', 2)[1]) >= 1 or
+ not hasattr(Database, 'UNICODE')):
+ convert_unicode = force_text
+else:
+ convert_unicode = force_bytes
+
+
+class DatabaseFeatures(BaseDatabaseFeatures):
+ empty_fetchmany_value = ()
+ needs_datetime_string_cast = False
+ interprets_empty_strings_as_nulls = True
+ uses_savepoints = True
+ has_select_for_update = True
+ has_select_for_update_nowait = True
+ can_return_id_from_insert = True
+ allow_sliced_subqueries = False
+ supports_subqueries_in_group_by = False
+ supports_transactions = True
+ supports_timezones = False
+ has_zoneinfo_database = pytz is not None
+ supports_bitwise_or = False
+ can_defer_constraint_checks = True
+ ignores_nulls_in_unique_constraints = False
+ has_bulk_insert = True
+ supports_tablespaces = True
+ supports_sequence_reset = False
+ atomic_transactions = False
+
+class DatabaseOperations(BaseDatabaseOperations):
+ compiler_module = "django.db.backends.oracle.compiler"
+
+ def autoinc_sql(self, table, column):
+ # To simulate auto-incrementing primary keys in Oracle, we have to
+ # create a sequence and a trigger.
+ sq_name = self._get_sequence_name(table)
+ tr_name = self._get_trigger_name(table)
+ tbl_name = self.quote_name(table)
+ col_name = self.quote_name(column)
+ sequence_sql = """
+DECLARE
+ i INTEGER;
+BEGIN
+ SELECT COUNT(*) INTO i FROM USER_CATALOG
+ WHERE TABLE_NAME = '%(sq_name)s' AND TABLE_TYPE = 'SEQUENCE';
+ IF i = 0 THEN
+ EXECUTE IMMEDIATE 'CREATE SEQUENCE "%(sq_name)s"';
+ END IF;
+END;
+/""" % locals()
+ trigger_sql = """
+CREATE OR REPLACE TRIGGER "%(tr_name)s"
+BEFORE INSERT ON %(tbl_name)s
+FOR EACH ROW
+WHEN (new.%(col_name)s IS NULL)
+ BEGIN
+ SELECT "%(sq_name)s".nextval
+ INTO :new.%(col_name)s FROM dual;
+ END;
+/""" % locals()
+ return sequence_sql, trigger_sql
+
+ def cache_key_culling_sql(self):
+ return """
+ SELECT cache_key
+ FROM (SELECT cache_key, rank() OVER (ORDER BY cache_key) AS rank FROM %s)
+ WHERE rank = %%s + 1
+ """
+
+ def date_extract_sql(self, lookup_type, field_name):
+ if lookup_type == 'week_day':
+ # TO_CHAR(field, 'D') returns an integer from 1-7, where 1=Sunday.
+ return "TO_CHAR(%s, 'D')" % field_name
+ else:
+ # http://docs.oracle.com/cd/B19306_01/server.102/b14200/functions050.htm
+ return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
+
+ def date_interval_sql(self, sql, connector, timedelta):
+ """
+ Implements the interval functionality for expressions
+ format for Oracle:
+ (datefield + INTERVAL '3 00:03:20.000000' DAY(1) TO SECOND(6))
+ """
+ minutes, seconds = divmod(timedelta.seconds, 60)
+ hours, minutes = divmod(minutes, 60)
+ days = str(timedelta.days)
+ day_precision = len(days)
+ fmt = "(%s %s INTERVAL '%s %02d:%02d:%02d.%06d' DAY(%d) TO SECOND(6))"
+ return fmt % (sql, connector, days, hours, minutes, seconds,
+ timedelta.microseconds, day_precision)
+
+ def date_trunc_sql(self, lookup_type, field_name):
+ # http://docs.oracle.com/cd/B19306_01/server.102/b14200/functions230.htm#i1002084
+ if lookup_type in ('year', 'month'):
+ return "TRUNC(%s, '%s')" % (field_name, lookup_type.upper())
+ else:
+ return "TRUNC(%s)" % field_name
+
+ # Oracle crashes with "ORA-03113: end-of-file on communication channel"
+ # if the time zone name is passed in parameter. Use interpolation instead.
+ # https://groups.google.com/forum/#!msg/django-developers/zwQju7hbG78/9l934yelwfsJ
+ # This regexp matches all time zone names from the zoneinfo database.
+ _tzname_re = re.compile(r'^[\w/:+-]+$')
+
+ def _convert_field_to_tz(self, field_name, tzname):
+ if not self._tzname_re.match(tzname):
+ raise ValueError("Invalid time zone name: %s" % tzname)
+ # Convert from UTC to local time, returning TIMESTAMP WITH TIME ZONE.
+ result = "(FROM_TZ(%s, '0:00') AT TIME ZONE '%s')" % (field_name, tzname)
+ # Extracting from a TIMESTAMP WITH TIME ZONE ignore the time zone.
+ # Convert to a DATETIME, which is called DATE by Oracle. There's no
+ # built-in function to do that; the easiest is to go through a string.
+ result = "TO_CHAR(%s, 'YYYY-MM-DD HH24:MI:SS')" % result
+ result = "TO_DATE(%s, 'YYYY-MM-DD HH24:MI:SS')" % result
+ # Re-convert to a TIMESTAMP because EXTRACT only handles the date part
+ # on DATE values, even though they actually store the time part.
+ return "CAST(%s AS TIMESTAMP)" % result
+
+ def datetime_extract_sql(self, lookup_type, field_name, tzname):
+ if settings.USE_TZ:
+ field_name = self._convert_field_to_tz(field_name, tzname)
+ if lookup_type == 'week_day':
+ # TO_CHAR(field, 'D') returns an integer from 1-7, where 1=Sunday.
+ sql = "TO_CHAR(%s, 'D')" % field_name
+ else:
+ # http://docs.oracle.com/cd/B19306_01/server.102/b14200/functions050.htm
+ sql = "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
+ return sql, []
+
+ def datetime_trunc_sql(self, lookup_type, field_name, tzname):
+ if settings.USE_TZ:
+ field_name = self._convert_field_to_tz(field_name, tzname)
+ # http://docs.oracle.com/cd/B19306_01/server.102/b14200/functions230.htm#i1002084
+ if lookup_type in ('year', 'month'):
+ sql = "TRUNC(%s, '%s')" % (field_name, lookup_type.upper())
+ elif lookup_type == 'day':
+ sql = "TRUNC(%s)" % field_name
+ elif lookup_type == 'hour':
+ sql = "TRUNC(%s, 'HH24')" % field_name
+ elif lookup_type == 'minute':
+ sql = "TRUNC(%s, 'MI')" % field_name
+ else:
+ sql = field_name # Cast to DATE removes sub-second precision.
+ return sql, []
+
+ def convert_values(self, value, field):
+ if isinstance(value, Database.LOB):
+ value = value.read()
+ if field and field.get_internal_type() == 'TextField':
+ value = force_text(value)
+
+ # Oracle stores empty strings as null. We need to undo this in
+ # order to adhere to the Django convention of using the empty
+ # string instead of null, but only if the field accepts the
+ # empty string.
+ if value is None and field and field.empty_strings_allowed:
+ value = ''
+ # Convert 1 or 0 to True or False
+ elif value in (1, 0) and field and field.get_internal_type() in ('BooleanField', 'NullBooleanField'):
+ value = bool(value)
+ # Force floats to the correct type
+ elif value is not None and field and field.get_internal_type() == 'FloatField':
+ value = float(value)
+ # Convert floats to decimals
+ elif value is not None and field and field.get_internal_type() == 'DecimalField':
+ value = util.typecast_decimal(field.format_number(value))
+ # cx_Oracle always returns datetime.datetime objects for
+ # DATE and TIMESTAMP columns, but Django wants to see a
+ # python datetime.date, .time, or .datetime. We use the type
+ # of the Field to determine which to cast to, but it's not
+ # always available.
+ # As a workaround, we cast to date if all the time-related
+ # values are 0, or to time if the date is 1/1/1900.
+ # This could be cleaned a bit by adding a method to the Field
+ # classes to normalize values from the database (the to_python
+ # method is used for validation and isn't what we want here).
+ elif isinstance(value, Database.Timestamp):
+ if field and field.get_internal_type() == 'DateTimeField':
+ pass
+ elif field and field.get_internal_type() == 'DateField':
+ value = value.date()
+ elif field and field.get_internal_type() == 'TimeField' or (value.year == 1900 and value.month == value.day == 1):
+ value = value.time()
+ elif value.hour == value.minute == value.second == value.microsecond == 0:
+ value = value.date()
+ return value
+
+ def deferrable_sql(self):
+ return " DEFERRABLE INITIALLY DEFERRED"
+
+ def drop_sequence_sql(self, table):
+ return "DROP SEQUENCE %s;" % self.quote_name(self._get_sequence_name(table))
+
+ def fetch_returned_insert_id(self, cursor):
+ return int(cursor._insert_id_var.getvalue())
+
+ def field_cast_sql(self, db_type, internal_type):
+ if db_type and db_type.endswith('LOB'):
+ return "DBMS_LOB.SUBSTR(%s)"
+ else:
+ return "%s"
+
+ def last_executed_query(self, cursor, sql, params):
+ # http://cx-oracle.sourceforge.net/html/cursor.html#Cursor.statement
+ # The DB API definition does not define this attribute.
+ statement = cursor.statement
+ if statement and six.PY2 and not isinstance(statement, unicode):
+ statement = statement.decode('utf-8')
+ # Unlike Psycopg's `query` and MySQLdb`'s `_last_executed`, CxOracle's
+ # `statement` doesn't contain the query parameters. refs #20010.
+ return super(DatabaseOperations, self).last_executed_query(cursor, statement, params)
+
+ def last_insert_id(self, cursor, table_name, pk_name):
+ sq_name = self._get_sequence_name(table_name)
+ cursor.execute('SELECT "%s".currval FROM dual' % sq_name)
+ return cursor.fetchone()[0]
+
+ def lookup_cast(self, lookup_type):
+ if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
+ return "UPPER(%s)"
+ return "%s"
+
+ def max_in_list_size(self):
+ return 1000
+
+ def max_name_length(self):
+ return 30
+
+ def prep_for_iexact_query(self, x):
+ return x
+
+ def process_clob(self, value):
+ if value is None:
+ return ''
+ return force_text(value.read())
+
+ def quote_name(self, name):
+ # SQL92 requires delimited (quoted) names to be case-sensitive. When
+ # not quoted, Oracle has case-insensitive behavior for identifiers, but
+ # always defaults to uppercase.
+ # We simplify things by making Oracle identifiers always uppercase.
+ if not name.startswith('"') and not name.endswith('"'):
+ name = '"%s"' % util.truncate_name(name.upper(),
+ self.max_name_length())
+ # Oracle puts the query text into a (query % args) construct, so % signs
+ # in names need to be escaped. The '%%' will be collapsed back to '%' at
+ # that stage so we aren't really making the name longer here.
+ name = name.replace('%','%%')
+ return name.upper()
+
+ def random_function_sql(self):
+ return "DBMS_RANDOM.RANDOM"
+
+ def regex_lookup_9(self, lookup_type):
+ raise NotImplementedError("Regexes are not supported in Oracle before version 10g.")
+
+ def regex_lookup_10(self, lookup_type):
+ if lookup_type == 'regex':
+ match_option = "'c'"
+ else:
+ match_option = "'i'"
+ return 'REGEXP_LIKE(%%s, %%s, %s)' % match_option
+
+ def regex_lookup(self, lookup_type):
+ # If regex_lookup is called before it's been initialized, then create
+ # a cursor to initialize it and recur.
+ self.connection.cursor()
+ return self.connection.ops.regex_lookup(lookup_type)
+
+ def return_insert_id(self):
+ return "RETURNING %s INTO %%s", (InsertIdVar(),)
+
+ def savepoint_create_sql(self, sid):
+ return convert_unicode("SAVEPOINT " + self.quote_name(sid))
+
+ def savepoint_rollback_sql(self, sid):
+ return convert_unicode("ROLLBACK TO SAVEPOINT " + self.quote_name(sid))
+
+ def sql_flush(self, style, tables, sequences, allow_cascade=False):
+ # Return a list of 'TRUNCATE x;', 'TRUNCATE y;',
+ # 'TRUNCATE z;'... style SQL statements
+ if tables:
+ # Oracle does support TRUNCATE, but it seems to get us into
+ # FK referential trouble, whereas DELETE FROM table works.
+ sql = ['%s %s %s;' % (
+ style.SQL_KEYWORD('DELETE'),
+ style.SQL_KEYWORD('FROM'),
+ style.SQL_FIELD(self.quote_name(table))
+ ) for table in tables]
+ # Since we've just deleted all the rows, running our sequence
+ # ALTER code will reset the sequence to 0.
+ sql.extend(self.sequence_reset_by_name_sql(style, sequences))
+ return sql
+ else:
+ return []
+
+ def sequence_reset_by_name_sql(self, style, sequences):
+ sql = []
+ for sequence_info in sequences:
+ sequence_name = self._get_sequence_name(sequence_info['table'])
+ table_name = self.quote_name(sequence_info['table'])
+ column_name = self.quote_name(sequence_info['column'] or 'id')
+ query = _get_sequence_reset_sql() % {'sequence': sequence_name,
+ 'table': table_name,
+ 'column': column_name}
+ sql.append(query)
+ return sql
+
+ def sequence_reset_sql(self, style, model_list):
+ from django.db import models
+ output = []
+ query = _get_sequence_reset_sql()
+ for model in model_list:
+ for f in model._meta.local_fields:
+ if isinstance(f, models.AutoField):
+ table_name = self.quote_name(model._meta.db_table)
+ sequence_name = self._get_sequence_name(model._meta.db_table)
+ column_name = self.quote_name(f.column)
+ output.append(query % {'sequence': sequence_name,
+ 'table': table_name,
+ 'column': column_name})
+ # Only one AutoField is allowed per model, so don't
+ # continue to loop
+ break
+ for f in model._meta.many_to_many:
+ if not f.rel.through:
+ table_name = self.quote_name(f.m2m_db_table())
+ sequence_name = self._get_sequence_name(f.m2m_db_table())
+ column_name = self.quote_name('id')
+ output.append(query % {'sequence': sequence_name,
+ 'table': table_name,
+ 'column': column_name})
+ return output
+
+ def start_transaction_sql(self):
+ return ''
+
+ def tablespace_sql(self, tablespace, inline=False):
+ if inline:
+ return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace)
+ else:
+ return "TABLESPACE %s" % self.quote_name(tablespace)
+
+ def value_to_db_datetime(self, value):
+ if value is None:
+ return None
+
+ # Oracle doesn't support tz-aware datetimes
+ if timezone.is_aware(value):
+ if settings.USE_TZ:
+ value = value.astimezone(timezone.utc).replace(tzinfo=None)
+ else:
+ raise ValueError("Oracle backend does not support timezone-aware datetimes when USE_TZ is False.")
+
+ return six.text_type(value)
+
+ def value_to_db_time(self, value):
+ if value is None:
+ return None
+
+ if isinstance(value, six.string_types):
+ return datetime.datetime.strptime(value, '%H:%M:%S')
+
+ # Oracle doesn't support tz-aware times
+ if timezone.is_aware(value):
+ raise ValueError("Oracle backend does not support timezone-aware times.")
+
+ return datetime.datetime(1900, 1, 1, value.hour, value.minute,
+ value.second, value.microsecond)
+
+ def year_lookup_bounds_for_date_field(self, value):
+ first = '%s-01-01'
+ second = '%s-12-31'
+ return [first % value, second % value]
+
+ def year_lookup_bounds_for_datetime_field(self, value):
+ # The default implementation uses datetime objects for the bounds.
+ # This must be overridden here, to use a formatted date (string) as
+ # 'second' instead -- cx_Oracle chops the fraction-of-second part
+ # off of datetime objects, leaving almost an entire second out of
+ # the year under the default implementation.
+ bounds = super(DatabaseOperations, self).year_lookup_bounds_for_datetime_field(value)
+ if settings.USE_TZ:
+ bounds = [b.astimezone(timezone.utc).replace(tzinfo=None) for b in bounds]
+ return [b.isoformat(b' ') for b in bounds]
+
+ def combine_expression(self, connector, sub_expressions):
+ "Oracle requires special cases for %% and & operators in query expressions"
+ if connector == '%%':
+ return 'MOD(%s)' % ','.join(sub_expressions)
+ elif connector == '&':
+ return 'BITAND(%s)' % ','.join(sub_expressions)
+ elif connector == '|':
+ raise NotImplementedError("Bit-wise or is not supported in Oracle.")
+ return super(DatabaseOperations, self).combine_expression(connector, sub_expressions)
+
+ def _get_sequence_name(self, table):
+ name_length = self.max_name_length() - 3
+ return '%s_SQ' % util.truncate_name(table, name_length).upper()
+
+ def _get_trigger_name(self, table):
+ name_length = self.max_name_length() - 3
+ return '%s_TR' % util.truncate_name(table, name_length).upper()
+
+ def bulk_insert_sql(self, fields, num_values):
+ items_sql = "SELECT %s FROM DUAL" % ", ".join(["%s"] * len(fields))
+ return " UNION ALL ".join([items_sql] * num_values)
+
+
+class _UninitializedOperatorsDescriptor(object):
+
+ def __get__(self, instance, owner):
+ # If connection.operators is looked up before a connection has been
+ # created, transparently initialize connection.operators to avert an
+ # AttributeError.
+ if instance is None:
+ raise AttributeError("operators not available as class attribute")
+ # Creating a cursor will initialize the operators.
+ instance.cursor().close()
+ return instance.__dict__['operators']
+
+
+class DatabaseWrapper(BaseDatabaseWrapper):
+ vendor = 'oracle'
+ operators = _UninitializedOperatorsDescriptor()
+
+ _standard_operators = {
+ 'exact': '= %s',
+ 'iexact': '= UPPER(%s)',
+ 'contains': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
+ 'icontains': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
+ 'gt': '> %s',
+ 'gte': '>= %s',
+ 'lt': '< %s',
+ 'lte': '<= %s',
+ 'startswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
+ 'endswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
+ 'istartswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
+ 'iendswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
+ }
+
+ _likec_operators = _standard_operators.copy()
+ _likec_operators.update({
+ 'contains': "LIKEC %s ESCAPE '\\'",
+ 'icontains': "LIKEC UPPER(%s) ESCAPE '\\'",
+ 'startswith': "LIKEC %s ESCAPE '\\'",
+ 'endswith': "LIKEC %s ESCAPE '\\'",
+ 'istartswith': "LIKEC UPPER(%s) ESCAPE '\\'",
+ 'iendswith': "LIKEC UPPER(%s) ESCAPE '\\'",
+ })
+
+ Database = Database
+
+ def __init__(self, *args, **kwargs):
+ super(DatabaseWrapper, self).__init__(*args, **kwargs)
+
+ self.features = DatabaseFeatures(self)
+ use_returning_into = self.settings_dict["OPTIONS"].get('use_returning_into', True)
+ self.features.can_return_id_from_insert = use_returning_into
+ self.ops = DatabaseOperations(self)
+ self.client = DatabaseClient(self)
+ self.creation = DatabaseCreation(self)
+ self.introspection = DatabaseIntrospection(self)
+ self.validation = BaseDatabaseValidation(self)
+
+ def _connect_string(self):
+ settings_dict = self.settings_dict
+ if not settings_dict['HOST'].strip():
+ settings_dict['HOST'] = 'localhost'
+ if settings_dict['PORT'].strip():
+ dsn = Database.makedsn(settings_dict['HOST'],
+ int(settings_dict['PORT']),
+ settings_dict['NAME'])
+ else:
+ dsn = settings_dict['NAME']
+ return "%s/%s@%s" % (settings_dict['USER'],
+ settings_dict['PASSWORD'], dsn)
+
+ def get_connection_params(self):
+ conn_params = self.settings_dict['OPTIONS'].copy()
+ if 'use_returning_into' in conn_params:
+ del conn_params['use_returning_into']
+ return conn_params
+
+ def get_new_connection(self, conn_params):
+ conn_string = convert_unicode(self._connect_string())
+ return Database.connect(conn_string, **conn_params)
+
+ def init_connection_state(self):
+ cursor = self.create_cursor()
+ # Set the territory first. The territory overrides NLS_DATE_FORMAT
+ # and NLS_TIMESTAMP_FORMAT to the territory default. When all of
+ # these are set in single statement it isn't clear what is supposed
+ # to happen.
+ cursor.execute("ALTER SESSION SET NLS_TERRITORY = 'AMERICA'")
+ # Set oracle date to ansi date format. This only needs to execute
+ # once when we create a new connection. We also set the Territory
+ # to 'AMERICA' which forces Sunday to evaluate to a '1' in
+ # TO_CHAR().
+ cursor.execute(
+ "ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS'"
+ " NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS.FF'"
+ + (" TIME_ZONE = 'UTC'" if settings.USE_TZ else ''))
+ cursor.close()
+ if 'operators' not in self.__dict__:
+ # Ticket #14149: Check whether our LIKE implementation will
+ # work for this connection or we need to fall back on LIKEC.
+ # This check is performed only once per DatabaseWrapper
+ # instance per thread, since subsequent connections will use
+ # the same settings.
+ cursor = self.create_cursor()
+ try:
+ cursor.execute("SELECT 1 FROM DUAL WHERE DUMMY %s"
+ % self._standard_operators['contains'],
+ ['X'])
+ except DatabaseError:
+ self.operators = self._likec_operators
+ else:
+ self.operators = self._standard_operators
+ cursor.close()
+
+ # There's no way for the DatabaseOperations class to know the
+ # currently active Oracle version, so we do some setups here.
+ # TODO: Multi-db support will need a better solution (a way to
+ # communicate the current version).
+ if self.oracle_version is not None and self.oracle_version <= 9:
+ self.ops.regex_lookup = self.ops.regex_lookup_9
+ else:
+ self.ops.regex_lookup = self.ops.regex_lookup_10
+
+ try:
+ self.connection.stmtcachesize = 20
+ except:
+ # Django docs specify cx_Oracle version 4.3.1 or higher, but
+ # stmtcachesize is available only in 4.3.2 and up.
+ pass
+
+ def create_cursor(self):
+ return FormatStylePlaceholderCursor(self.connection)
+
+ def _commit(self):
+ if self.connection is not None:
+ try:
+ return self.connection.commit()
+ except Database.DatabaseError as e:
+ # cx_Oracle 5.0.4 raises a cx_Oracle.DatabaseError exception
+ # with the following attributes and values:
+ # code = 2091
+ # message = 'ORA-02091: transaction rolled back
+ # 'ORA-02291: integrity constraint (TEST_DJANGOTEST.SYS
+ # _C00102056) violated - parent key not found'
+ # We convert that particular case to our IntegrityError exception
+ x = e.args[0]
+ if hasattr(x, 'code') and hasattr(x, 'message') \
+ and x.code == 2091 and 'ORA-02291' in x.message:
+ six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
+ raise
+
+ # Oracle doesn't support savepoint commits. Ignore them.
+ def _savepoint_commit(self, sid):
+ pass
+
+ def _set_autocommit(self, autocommit):
+ with self.wrap_database_errors:
+ self.connection.autocommit = autocommit
+
+ def check_constraints(self, table_names=None):
+ """
+ To check constraints, we set constraints to immediate. Then, when, we're done we must ensure they
+ are returned to deferred.
+ """
+ self.cursor().execute('SET CONSTRAINTS ALL IMMEDIATE')
+ self.cursor().execute('SET CONSTRAINTS ALL DEFERRED')
+
+ def is_usable(self):
+ try:
+ if hasattr(self.connection, 'ping'): # Oracle 10g R2 and higher
+ self.connection.ping()
+ else:
+ # Use a cx_Oracle cursor directly, bypassing Django's utilities.
+ self.connection.cursor().execute("SELECT 1 FROM DUAL")
+ except Database.Error:
+ return False
+ else:
+ return True
+
+ @cached_property
+ def oracle_version(self):
+ with self.temporary_connection():
+ version = self.connection.version
+ try:
+ return int(version.split('.')[0])
+ except ValueError:
+ return None
+
+
+class OracleParam(object):
+ """
+ Wrapper object for formatting parameters for Oracle. If the string
+ representation of the value is large enough (greater than 4000 characters)
+ the input size needs to be set as CLOB. Alternatively, if the parameter
+ has an `input_size` attribute, then the value of the `input_size` attribute
+ will be used instead. Otherwise, no input size will be set for the
+ parameter when executing the query.
+ """
+
+ def __init__(self, param, cursor, strings_only=False):
+ # With raw SQL queries, datetimes can reach this function
+ # without being converted by DateTimeField.get_db_prep_value.
+ if settings.USE_TZ and isinstance(param, datetime.datetime):
+ if timezone.is_naive(param):
+ warnings.warn("Oracle received a naive datetime (%s)"
+ " while time zone support is active." % param,
+ RuntimeWarning)
+ default_timezone = timezone.get_default_timezone()
+ param = timezone.make_aware(param, default_timezone)
+ param = param.astimezone(timezone.utc).replace(tzinfo=None)
+
+ # Oracle doesn't recognize True and False correctly in Python 3.
+ # The conversion done below works both in 2 and 3.
+ if param is True:
+ param = "1"
+ elif param is False:
+ param = "0"
+ if hasattr(param, 'bind_parameter'):
+ self.force_bytes = param.bind_parameter(cursor)
+ elif isinstance(param, six.memoryview):
+ self.force_bytes = param
+ else:
+ self.force_bytes = convert_unicode(param, cursor.charset,
+ strings_only)
+ if hasattr(param, 'input_size'):
+ # If parameter has `input_size` attribute, use that.
+ self.input_size = param.input_size
+ elif isinstance(param, six.string_types) and len(param) > 4000:
+ # Mark any string param greater than 4000 characters as a CLOB.
+ self.input_size = Database.CLOB
+ else:
+ self.input_size = None
+
+
+class VariableWrapper(object):
+ """
+ An adapter class for cursor variables that prevents the wrapped object
+ from being converted into a string when used to instanciate an OracleParam.
+ This can be used generally for any other object that should be passed into
+ Cursor.execute as-is.
+ """
+
+ def __init__(self, var):
+ self.var = var
+
+ def bind_parameter(self, cursor):
+ return self.var
+
+ def __getattr__(self, key):
+ return getattr(self.var, key)
+
+ def __setattr__(self, key, value):
+ if key == 'var':
+ self.__dict__[key] = value
+ else:
+ setattr(self.var, key, value)
+
+
+class InsertIdVar(object):
+ """
+ A late-binding cursor variable that can be passed to Cursor.execute
+ as a parameter, in order to receive the id of the row created by an
+ insert statement.
+ """
+
+ def bind_parameter(self, cursor):
+ param = cursor.cursor.var(Database.NUMBER)
+ cursor._insert_id_var = param
+ return param
+
+
+class FormatStylePlaceholderCursor(object):
+ """
+ Django uses "format" (e.g. '%s') style placeholders, but Oracle uses ":var"
+ style. This fixes it -- but note that if you want to use a literal "%s" in
+ a query, you'll need to use "%%s".
+
+ We also do automatic conversion between Unicode on the Python side and
+ UTF-8 -- for talking to Oracle -- in here.
+ """
+ charset = 'utf-8'
+
+ def __init__(self, connection):
+ self.cursor = connection.cursor()
+ # Necessary to retrieve decimal values without rounding error.
+ self.cursor.numbersAsStrings = True
+ # Default arraysize of 1 is highly sub-optimal.
+ self.cursor.arraysize = 100
+
+ def _format_params(self, params):
+ try:
+ return dict((k,OracleParam(v, self, True)) for k,v in params.items())
+ except AttributeError:
+ return tuple([OracleParam(p, self, True) for p in params])
+
+ def _guess_input_sizes(self, params_list):
+ # Try dict handling; if that fails, treat as sequence
+ if hasattr(params_list[0], 'keys'):
+ sizes = {}
+ for params in params_list:
+ for k, value in params.items():
+ if value.input_size:
+ sizes[k] = value.input_size
+ self.setinputsizes(**sizes)
+ else:
+ # It's not a list of dicts; it's a list of sequences
+ sizes = [None] * len(params_list[0])
+ for params in params_list:
+ for i, value in enumerate(params):
+ if value.input_size:
+ sizes[i] = value.input_size
+ self.setinputsizes(*sizes)
+
+ def _param_generator(self, params):
+ # Try dict handling; if that fails, treat as sequence
+ if hasattr(params, 'items'):
+ return dict((k, v.force_bytes) for k,v in params.items())
+ else:
+ return [p.force_bytes for p in params]
+
+ def _fix_for_params(self, query, params):
+ # cx_Oracle wants no trailing ';' for SQL statements. For PL/SQL, it
+ # it does want a trailing ';' but not a trailing '/'. However, these
+ # characters must be included in the original query in case the query
+ # is being passed to SQL*Plus.
+ if query.endswith(';') or query.endswith('/'):
+ query = query[:-1]
+ if params is None:
+ params = []
+ query = convert_unicode(query, self.charset)
+ elif hasattr(params, 'keys'):
+ # Handle params as dict
+ args = dict((k, ":%s"%k) for k in params.keys())
+ query = convert_unicode(query % args, self.charset)
+ else:
+ # Handle params as sequence
+ args = [(':arg%d' % i) for i in range(len(params))]
+ query = convert_unicode(query % tuple(args), self.charset)
+ return query, self._format_params(params)
+
+ def execute(self, query, params=None):
+ query, params = self._fix_for_params(query, params)
+ self._guess_input_sizes([params])
+ try:
+ return self.cursor.execute(query, self._param_generator(params))
+ except Database.DatabaseError as e:
+ # cx_Oracle <= 4.4.0 wrongly raises a DatabaseError for ORA-01400.
+ if hasattr(e.args[0], 'code') and e.args[0].code == 1400 and not isinstance(e, IntegrityError):
+ six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
+ raise
+
+ def executemany(self, query, params=None):
+ if not params:
+ # No params given, nothing to do
+ return None
+ # uniform treatment for sequences and iterables
+ params_iter = iter(params)
+ query, firstparams = self._fix_for_params(query, next(params_iter))
+ # we build a list of formatted params; as we're going to traverse it
+ # more than once, we can't make it lazy by using a generator
+ formatted = [firstparams]+[self._format_params(p) for p in params_iter]
+ self._guess_input_sizes(formatted)
+ try:
+ return self.cursor.executemany(query,
+ [self._param_generator(p) for p in formatted])
+ except Database.DatabaseError as e:
+ # cx_Oracle <= 4.4.0 wrongly raises a DatabaseError for ORA-01400.
+ if hasattr(e.args[0], 'code') and e.args[0].code == 1400 and not isinstance(e, IntegrityError):
+ six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
+ raise
+
+ def fetchone(self):
+ row = self.cursor.fetchone()
+ if row is None:
+ return row
+ return _rowfactory(row, self.cursor)
+
+ def fetchmany(self, size=None):
+ if size is None:
+ size = self.arraysize
+ return tuple([_rowfactory(r, self.cursor)
+ for r in self.cursor.fetchmany(size)])
+
+ def fetchall(self):
+ return tuple([_rowfactory(r, self.cursor)
+ for r in self.cursor.fetchall()])
+
+ def var(self, *args):
+ return VariableWrapper(self.cursor.var(*args))
+
+ def arrayvar(self, *args):
+ return VariableWrapper(self.cursor.arrayvar(*args))
+
+ def __getattr__(self, attr):
+ if attr in self.__dict__:
+ return self.__dict__[attr]
+ else:
+ return getattr(self.cursor, attr)
+
+ def __iter__(self):
+ return CursorIterator(self.cursor)
+
+
+class CursorIterator(six.Iterator):
+
+ """Cursor iterator wrapper that invokes our custom row factory."""
+
+ def __init__(self, cursor):
+ self.cursor = cursor
+ self.iter = iter(cursor)
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ return _rowfactory(next(self.iter), self.cursor)
+
+
+def _rowfactory(row, cursor):
+ # Cast numeric values as the appropriate Python type based upon the
+ # cursor description, and convert strings to unicode.
+ casted = []
+ for value, desc in zip(row, cursor.description):
+ if value is not None and desc[1] is Database.NUMBER:
+ precision, scale = desc[4:6]
+ if scale == -127:
+ if precision == 0:
+ # NUMBER column: decimal-precision floating point
+ # This will normally be an integer from a sequence,
+ # but it could be a decimal value.
+ if '.' in value:
+ value = decimal.Decimal(value)
+ else:
+ value = int(value)
+ else:
+ # FLOAT column: binary-precision floating point.
+ # This comes from FloatField columns.
+ value = float(value)
+ elif precision > 0:
+ # NUMBER(p,s) column: decimal-precision fixed point.
+ # This comes from IntField and DecimalField columns.
+ if scale == 0:
+ value = int(value)
+ else:
+ value = decimal.Decimal(value)
+ elif '.' in value:
+ # No type information. This normally comes from a
+ # mathematical expression in the SELECT list. Guess int
+ # or Decimal based on whether it has a decimal point.
+ value = decimal.Decimal(value)
+ else:
+ value = int(value)
+ # datetimes are returned as TIMESTAMP, except the results
+ # of "dates" queries, which are returned as DATETIME.
+ elif desc[1] in (Database.TIMESTAMP, Database.DATETIME):
+ # Confirm that dt is naive before overwriting its tzinfo.
+ if settings.USE_TZ and value is not None and timezone.is_naive(value):
+ value = value.replace(tzinfo=timezone.utc)
+ elif desc[1] in (Database.STRING, Database.FIXED_CHAR,
+ Database.LONG_STRING):
+ value = to_unicode(value)
+ casted.append(value)
+ return tuple(casted)
+
+
+def to_unicode(s):
+ """
+ Convert strings to Unicode objects (and return all other data types
+ unchanged).
+ """
+ if isinstance(s, six.string_types):
+ return force_text(s)
+ return s
+
+
+def _get_sequence_reset_sql():
+ # TODO: colorize this SQL code with style.SQL_KEYWORD(), etc.
+ return """
+DECLARE
+ table_value integer;
+ seq_value integer;
+BEGIN
+ SELECT NVL(MAX(%(column)s), 0) INTO table_value FROM %(table)s;
+ SELECT NVL(last_number - cache_size, 0) INTO seq_value FROM user_sequences
+ WHERE sequence_name = '%(sequence)s';
+ WHILE table_value > seq_value LOOP
+ SELECT "%(sequence)s".nextval INTO seq_value FROM dual;
+ END LOOP;
+END;
+/"""
diff --git a/lib/python2.7/site-packages/django/db/backends/oracle/client.py b/lib/python2.7/site-packages/django/db/backends/oracle/client.py
new file mode 100644
index 0000000..ccc64eb
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/oracle/client.py
@@ -0,0 +1,16 @@
+import os
+import sys
+
+from django.db.backends import BaseDatabaseClient
+
+class DatabaseClient(BaseDatabaseClient):
+ executable_name = 'sqlplus'
+
+ def runshell(self):
+ conn_string = self.connection._connect_string()
+ args = [self.executable_name, "-L", conn_string]
+ if os.name == 'nt':
+ sys.exit(os.system(" ".join(args)))
+ else:
+ os.execvp(self.executable_name, args)
+
diff --git a/lib/python2.7/site-packages/django/db/backends/oracle/compiler.py b/lib/python2.7/site-packages/django/db/backends/oracle/compiler.py
new file mode 100644
index 0000000..bb8ef59
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/oracle/compiler.py
@@ -0,0 +1,72 @@
+from django.db.models.sql import compiler
+from django.utils.six.moves import zip_longest
+
+
+class SQLCompiler(compiler.SQLCompiler):
+ def resolve_columns(self, row, fields=()):
+ # If this query has limit/offset information, then we expect the
+ # first column to be an extra "_RN" column that we need to throw
+ # away.
+ if self.query.high_mark is not None or self.query.low_mark:
+ rn_offset = 1
+ else:
+ rn_offset = 0
+ index_start = rn_offset + len(self.query.extra_select)
+ values = [self.query.convert_values(v, None, connection=self.connection)
+ for v in row[rn_offset:index_start]]
+ for value, field in zip_longest(row[index_start:], fields):
+ values.append(self.query.convert_values(value, field, connection=self.connection))
+ return tuple(values)
+
+ def as_sql(self, with_limits=True, with_col_aliases=False):
+ """
+ Creates the SQL for this query. Returns the SQL string and list
+ of parameters. This is overriden from the original Query class
+ to handle the additional SQL Oracle requires to emulate LIMIT
+ and OFFSET.
+
+ If 'with_limits' is False, any limit/offset information is not
+ included in the query.
+ """
+ if with_limits and self.query.low_mark == self.query.high_mark:
+ return '', ()
+
+ # The `do_offset` flag indicates whether we need to construct
+ # the SQL needed to use limit/offset with Oracle.
+ do_offset = with_limits and (self.query.high_mark is not None
+ or self.query.low_mark)
+ if not do_offset:
+ sql, params = super(SQLCompiler, self).as_sql(with_limits=False,
+ with_col_aliases=with_col_aliases)
+ else:
+ sql, params = super(SQLCompiler, self).as_sql(with_limits=False,
+ with_col_aliases=True)
+
+ # Wrap the base query in an outer SELECT * with boundaries on
+ # the "_RN" column. This is the canonical way to emulate LIMIT
+ # and OFFSET on Oracle.
+ high_where = ''
+ if self.query.high_mark is not None:
+ high_where = 'WHERE ROWNUM <= %d' % (self.query.high_mark,)
+ sql = 'SELECT * FROM (SELECT ROWNUM AS "_RN", "_SUB".* FROM (%s) "_SUB" %s) WHERE "_RN" > %d' % (sql, high_where, self.query.low_mark)
+
+ return sql, params
+
+
+class SQLInsertCompiler(compiler.SQLInsertCompiler, SQLCompiler):
+ pass
+
+class SQLDeleteCompiler(compiler.SQLDeleteCompiler, SQLCompiler):
+ pass
+
+class SQLUpdateCompiler(compiler.SQLUpdateCompiler, SQLCompiler):
+ pass
+
+class SQLAggregateCompiler(compiler.SQLAggregateCompiler, SQLCompiler):
+ pass
+
+class SQLDateCompiler(compiler.SQLDateCompiler, SQLCompiler):
+ pass
+
+class SQLDateTimeCompiler(compiler.SQLDateTimeCompiler, SQLCompiler):
+ pass
diff --git a/lib/python2.7/site-packages/django/db/backends/oracle/creation.py b/lib/python2.7/site-packages/django/db/backends/oracle/creation.py
new file mode 100644
index 0000000..2f2f391
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/oracle/creation.py
@@ -0,0 +1,277 @@
+import sys
+import time
+
+from django.conf import settings
+from django.db.backends.creation import BaseDatabaseCreation
+from django.utils.six.moves import input
+
+TEST_DATABASE_PREFIX = 'test_'
+PASSWORD = 'Im_a_lumberjack'
+
+class DatabaseCreation(BaseDatabaseCreation):
+ # This dictionary maps Field objects to their associated Oracle column
+ # types, as strings. Column-type strings can contain format strings; they'll
+ # be interpolated against the values of Field.__dict__ before being output.
+ # If a column type is set to None, it won't be included in the output.
+ #
+ # Any format strings starting with "qn_" are quoted before being used in the
+ # output (the "qn_" prefix is stripped before the lookup is performed.
+
+ data_types = {
+ 'AutoField': 'NUMBER(11)',
+ 'BinaryField': 'BLOB',
+ 'BooleanField': 'NUMBER(1) CHECK (%(qn_column)s IN (0,1))',
+ 'CharField': 'NVARCHAR2(%(max_length)s)',
+ 'CommaSeparatedIntegerField': 'VARCHAR2(%(max_length)s)',
+ 'DateField': 'DATE',
+ 'DateTimeField': 'TIMESTAMP',
+ 'DecimalField': 'NUMBER(%(max_digits)s, %(decimal_places)s)',
+ 'FileField': 'NVARCHAR2(%(max_length)s)',
+ 'FilePathField': 'NVARCHAR2(%(max_length)s)',
+ 'FloatField': 'DOUBLE PRECISION',
+ 'IntegerField': 'NUMBER(11)',
+ 'BigIntegerField': 'NUMBER(19)',
+ 'IPAddressField': 'VARCHAR2(15)',
+ 'GenericIPAddressField': 'VARCHAR2(39)',
+ 'NullBooleanField': 'NUMBER(1) CHECK ((%(qn_column)s IN (0,1)) OR (%(qn_column)s IS NULL))',
+ 'OneToOneField': 'NUMBER(11)',
+ 'PositiveIntegerField': 'NUMBER(11) CHECK (%(qn_column)s >= 0)',
+ 'PositiveSmallIntegerField': 'NUMBER(11) CHECK (%(qn_column)s >= 0)',
+ 'SlugField': 'NVARCHAR2(%(max_length)s)',
+ 'SmallIntegerField': 'NUMBER(11)',
+ 'TextField': 'NCLOB',
+ 'TimeField': 'TIMESTAMP',
+ 'URLField': 'VARCHAR2(%(max_length)s)',
+ }
+
+ def __init__(self, connection):
+ super(DatabaseCreation, self).__init__(connection)
+
+ def _create_test_db(self, verbosity=1, autoclobber=False):
+ TEST_NAME = self._test_database_name()
+ TEST_USER = self._test_database_user()
+ TEST_PASSWD = self._test_database_passwd()
+ TEST_TBLSPACE = self._test_database_tblspace()
+ TEST_TBLSPACE_TMP = self._test_database_tblspace_tmp()
+
+ parameters = {
+ 'dbname': TEST_NAME,
+ 'user': TEST_USER,
+ 'password': TEST_PASSWD,
+ 'tblspace': TEST_TBLSPACE,
+ 'tblspace_temp': TEST_TBLSPACE_TMP,
+ }
+
+ cursor = self.connection.cursor()
+ if self._test_database_create():
+ try:
+ self._execute_test_db_creation(cursor, parameters, verbosity)
+ except Exception as e:
+ sys.stderr.write("Got an error creating the test database: %s\n" % e)
+ if not autoclobber:
+ confirm = input("It appears the test database, %s, already exists. Type 'yes' to delete it, or 'no' to cancel: " % TEST_NAME)
+ if autoclobber or confirm == 'yes':
+ try:
+ if verbosity >= 1:
+ print("Destroying old test database '%s'..." % self.connection.alias)
+ self._execute_test_db_destruction(cursor, parameters, verbosity)
+ self._execute_test_db_creation(cursor, parameters, verbosity)
+ except Exception as e:
+ sys.stderr.write("Got an error recreating the test database: %s\n" % e)
+ sys.exit(2)
+ else:
+ print("Tests cancelled.")
+ sys.exit(1)
+
+ if self._test_user_create():
+ if verbosity >= 1:
+ print("Creating test user...")
+ try:
+ self._create_test_user(cursor, parameters, verbosity)
+ except Exception as e:
+ sys.stderr.write("Got an error creating the test user: %s\n" % e)
+ if not autoclobber:
+ confirm = input("It appears the test user, %s, already exists. Type 'yes' to delete it, or 'no' to cancel: " % TEST_USER)
+ if autoclobber or confirm == 'yes':
+ try:
+ if verbosity >= 1:
+ print("Destroying old test user...")
+ self._destroy_test_user(cursor, parameters, verbosity)
+ if verbosity >= 1:
+ print("Creating test user...")
+ self._create_test_user(cursor, parameters, verbosity)
+ except Exception as e:
+ sys.stderr.write("Got an error recreating the test user: %s\n" % e)
+ sys.exit(2)
+ else:
+ print("Tests cancelled.")
+ sys.exit(1)
+
+ real_settings = settings.DATABASES[self.connection.alias]
+ real_settings['SAVED_USER'] = self.connection.settings_dict['SAVED_USER'] = self.connection.settings_dict['USER']
+ real_settings['SAVED_PASSWORD'] = self.connection.settings_dict['SAVED_PASSWORD'] = self.connection.settings_dict['PASSWORD']
+ real_settings['TEST_USER'] = real_settings['USER'] = self.connection.settings_dict['TEST_USER'] = self.connection.settings_dict['USER'] = TEST_USER
+ real_settings['PASSWORD'] = self.connection.settings_dict['PASSWORD'] = TEST_PASSWD
+
+ return self.connection.settings_dict['NAME']
+
+ def _destroy_test_db(self, test_database_name, verbosity=1):
+ """
+ Destroy a test database, prompting the user for confirmation if the
+ database already exists. Returns the name of the test database created.
+ """
+ TEST_NAME = self._test_database_name()
+ TEST_USER = self._test_database_user()
+ TEST_PASSWD = self._test_database_passwd()
+ TEST_TBLSPACE = self._test_database_tblspace()
+ TEST_TBLSPACE_TMP = self._test_database_tblspace_tmp()
+
+ self.connection.settings_dict['USER'] = self.connection.settings_dict['SAVED_USER']
+ self.connection.settings_dict['PASSWORD'] = self.connection.settings_dict['SAVED_PASSWORD']
+
+ parameters = {
+ 'dbname': TEST_NAME,
+ 'user': TEST_USER,
+ 'password': TEST_PASSWD,
+ 'tblspace': TEST_TBLSPACE,
+ 'tblspace_temp': TEST_TBLSPACE_TMP,
+ }
+
+ cursor = self.connection.cursor()
+ time.sleep(1) # To avoid "database is being accessed by other users" errors.
+ if self._test_user_create():
+ if verbosity >= 1:
+ print('Destroying test user...')
+ self._destroy_test_user(cursor, parameters, verbosity)
+ if self._test_database_create():
+ if verbosity >= 1:
+ print('Destroying test database tables...')
+ self._execute_test_db_destruction(cursor, parameters, verbosity)
+ self.connection.close()
+
+ def _execute_test_db_creation(self, cursor, parameters, verbosity):
+ if verbosity >= 2:
+ print("_create_test_db(): dbname = %s" % parameters['dbname'])
+ statements = [
+ """CREATE TABLESPACE %(tblspace)s
+ DATAFILE '%(tblspace)s.dbf' SIZE 20M
+ REUSE AUTOEXTEND ON NEXT 10M MAXSIZE 200M
+ """,
+ """CREATE TEMPORARY TABLESPACE %(tblspace_temp)s
+ TEMPFILE '%(tblspace_temp)s.dbf' SIZE 20M
+ REUSE AUTOEXTEND ON NEXT 10M MAXSIZE 100M
+ """,
+ ]
+ self._execute_statements(cursor, statements, parameters, verbosity)
+
+ def _create_test_user(self, cursor, parameters, verbosity):
+ if verbosity >= 2:
+ print("_create_test_user(): username = %s" % parameters['user'])
+ statements = [
+ """CREATE USER %(user)s
+ IDENTIFIED BY %(password)s
+ DEFAULT TABLESPACE %(tblspace)s
+ TEMPORARY TABLESPACE %(tblspace_temp)s
+ QUOTA UNLIMITED ON %(tblspace)s
+ """,
+ """GRANT CONNECT, RESOURCE TO %(user)s""",
+ ]
+ self._execute_statements(cursor, statements, parameters, verbosity)
+
+ def _execute_test_db_destruction(self, cursor, parameters, verbosity):
+ if verbosity >= 2:
+ print("_execute_test_db_destruction(): dbname=%s" % parameters['dbname'])
+ statements = [
+ 'DROP TABLESPACE %(tblspace)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS',
+ 'DROP TABLESPACE %(tblspace_temp)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS',
+ ]
+ self._execute_statements(cursor, statements, parameters, verbosity)
+
+ def _destroy_test_user(self, cursor, parameters, verbosity):
+ if verbosity >= 2:
+ print("_destroy_test_user(): user=%s" % parameters['user'])
+ print("Be patient. This can take some time...")
+ statements = [
+ 'DROP USER %(user)s CASCADE',
+ ]
+ self._execute_statements(cursor, statements, parameters, verbosity)
+
+ def _execute_statements(self, cursor, statements, parameters, verbosity):
+ for template in statements:
+ stmt = template % parameters
+ if verbosity >= 2:
+ print(stmt)
+ try:
+ cursor.execute(stmt)
+ except Exception as err:
+ sys.stderr.write("Failed (%s)\n" % (err))
+ raise
+
+ def _test_database_name(self):
+ name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
+ try:
+ if self.connection.settings_dict['TEST_NAME']:
+ name = self.connection.settings_dict['TEST_NAME']
+ except AttributeError:
+ pass
+ return name
+
+ def _test_database_create(self):
+ return self.connection.settings_dict.get('TEST_CREATE', True)
+
+ def _test_user_create(self):
+ return self.connection.settings_dict.get('TEST_USER_CREATE', True)
+
+ def _test_database_user(self):
+ name = TEST_DATABASE_PREFIX + self.connection.settings_dict['USER']
+ try:
+ if self.connection.settings_dict['TEST_USER']:
+ name = self.connection.settings_dict['TEST_USER']
+ except KeyError:
+ pass
+ return name
+
+ def _test_database_passwd(self):
+ name = PASSWORD
+ try:
+ if self.connection.settings_dict['TEST_PASSWD']:
+ name = self.connection.settings_dict['TEST_PASSWD']
+ except KeyError:
+ pass
+ return name
+
+ def _test_database_tblspace(self):
+ name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
+ try:
+ if self.connection.settings_dict['TEST_TBLSPACE']:
+ name = self.connection.settings_dict['TEST_TBLSPACE']
+ except KeyError:
+ pass
+ return name
+
+ def _test_database_tblspace_tmp(self):
+ name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME'] + '_temp'
+ try:
+ if self.connection.settings_dict['TEST_TBLSPACE_TMP']:
+ name = self.connection.settings_dict['TEST_TBLSPACE_TMP']
+ except KeyError:
+ pass
+ return name
+
+ def _get_test_db_name(self):
+ """
+ We need to return the 'production' DB name to get the test DB creation
+ machinery to work. This isn't a great deal in this case because DB
+ names as handled by Django haven't real counterparts in Oracle.
+ """
+ return self.connection.settings_dict['NAME']
+
+ def test_db_signature(self):
+ settings_dict = self.connection.settings_dict
+ return (
+ settings_dict['HOST'],
+ settings_dict['PORT'],
+ settings_dict['ENGINE'],
+ settings_dict['NAME'],
+ self._test_database_user(),
+ )
diff --git a/lib/python2.7/site-packages/django/db/backends/oracle/introspection.py b/lib/python2.7/site-packages/django/db/backends/oracle/introspection.py
new file mode 100644
index 0000000..3ea3a08
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/oracle/introspection.py
@@ -0,0 +1,138 @@
+from django.db.backends import BaseDatabaseIntrospection, FieldInfo
+from django.utils.encoding import force_text
+import cx_Oracle
+import re
+
+foreign_key_re = re.compile(r"\sCONSTRAINT `[^`]*` FOREIGN KEY \(`([^`]*)`\) REFERENCES `([^`]*)` \(`([^`]*)`\)")
+
+class DatabaseIntrospection(BaseDatabaseIntrospection):
+ # Maps type objects to Django Field types.
+ data_types_reverse = {
+ cx_Oracle.BLOB: 'BinaryField',
+ cx_Oracle.CLOB: 'TextField',
+ cx_Oracle.DATETIME: 'DateField',
+ cx_Oracle.FIXED_CHAR: 'CharField',
+ cx_Oracle.NCLOB: 'TextField',
+ cx_Oracle.NUMBER: 'DecimalField',
+ cx_Oracle.STRING: 'CharField',
+ cx_Oracle.TIMESTAMP: 'DateTimeField',
+ }
+
+ try:
+ data_types_reverse[cx_Oracle.NATIVE_FLOAT] = 'FloatField'
+ except AttributeError:
+ pass
+
+ try:
+ data_types_reverse[cx_Oracle.UNICODE] = 'CharField'
+ except AttributeError:
+ pass
+
+ def get_field_type(self, data_type, description):
+ # If it's a NUMBER with scale == 0, consider it an IntegerField
+ if data_type == cx_Oracle.NUMBER:
+ precision, scale = description[4:6]
+ if scale == 0:
+ if precision > 11:
+ return 'BigIntegerField'
+ elif precision == 1:
+ return 'BooleanField'
+ else:
+ return 'IntegerField'
+ elif scale == -127:
+ return 'FloatField'
+
+ return super(DatabaseIntrospection, self).get_field_type(data_type, description)
+
+ def get_table_list(self, cursor):
+ "Returns a list of table names in the current database."
+ cursor.execute("SELECT TABLE_NAME FROM USER_TABLES")
+ return [row[0].lower() for row in cursor.fetchall()]
+
+ def get_table_description(self, cursor, table_name):
+ "Returns a description of the table, with the DB-API cursor.description interface."
+ cursor.execute("SELECT * FROM %s WHERE ROWNUM < 2" % self.connection.ops.quote_name(table_name))
+ description = []
+ for desc in cursor.description:
+ name = force_text(desc[0]) # cx_Oracle always returns a 'str' on both Python 2 and 3
+ name = name % {} # cx_Oracle, for some reason, doubles percent signs.
+ description.append(FieldInfo(*(name.lower(),) + desc[1:]))
+ return description
+
+ def table_name_converter(self, name):
+ "Table name comparison is case insensitive under Oracle"
+ return name.lower()
+
+ def _name_to_index(self, cursor, table_name):
+ """
+ Returns a dictionary of {field_name: field_index} for the given table.
+ Indexes are 0-based.
+ """
+ return dict([(d[0], i) for i, d in enumerate(self.get_table_description(cursor, table_name))])
+
+ def get_relations(self, cursor, table_name):
+ """
+ Returns a dictionary of {field_index: (field_index_other_table, other_table)}
+ representing all relationships to the given table. Indexes are 0-based.
+ """
+ table_name = table_name.upper()
+ cursor.execute("""
+ SELECT ta.column_id - 1, tb.table_name, tb.column_id - 1
+ FROM user_constraints, USER_CONS_COLUMNS ca, USER_CONS_COLUMNS cb,
+ user_tab_cols ta, user_tab_cols tb
+ WHERE user_constraints.table_name = %s AND
+ ta.table_name = user_constraints.table_name AND
+ ta.column_name = ca.column_name AND
+ ca.table_name = ta.table_name AND
+ user_constraints.constraint_name = ca.constraint_name AND
+ user_constraints.r_constraint_name = cb.constraint_name AND
+ cb.table_name = tb.table_name AND
+ cb.column_name = tb.column_name AND
+ ca.position = cb.position""", [table_name])
+
+ relations = {}
+ for row in cursor.fetchall():
+ relations[row[0]] = (row[2], row[1].lower())
+ return relations
+
+ def get_key_columns(self, cursor, table_name):
+ cursor.execute("""
+ SELECT ccol.column_name, rcol.table_name AS referenced_table, rcol.column_name AS referenced_column
+ FROM user_constraints c
+ JOIN user_cons_columns ccol
+ ON ccol.constraint_name = c.constraint_name
+ JOIN user_cons_columns rcol
+ ON rcol.constraint_name = c.r_constraint_name
+ WHERE c.table_name = %s AND c.constraint_type = 'R'""" , [table_name.upper()])
+ return [tuple(cell.lower() for cell in row)
+ for row in cursor.fetchall()]
+
+ def get_indexes(self, cursor, table_name):
+ sql = """
+ SELECT LOWER(uic1.column_name) AS column_name,
+ CASE user_constraints.constraint_type
+ WHEN 'P' THEN 1 ELSE 0
+ END AS is_primary_key,
+ CASE user_indexes.uniqueness
+ WHEN 'UNIQUE' THEN 1 ELSE 0
+ END AS is_unique
+ FROM user_constraints, user_indexes, user_ind_columns uic1
+ WHERE user_constraints.constraint_type (+) = 'P'
+ AND user_constraints.index_name (+) = uic1.index_name
+ AND user_indexes.uniqueness (+) = 'UNIQUE'
+ AND user_indexes.index_name (+) = uic1.index_name
+ AND uic1.table_name = UPPER(%s)
+ AND uic1.column_position = 1
+ AND NOT EXISTS (
+ SELECT 1
+ FROM user_ind_columns uic2
+ WHERE uic2.index_name = uic1.index_name
+ AND uic2.column_position = 2
+ )
+ """
+ cursor.execute(sql, [table_name])
+ indexes = {}
+ for row in cursor.fetchall():
+ indexes[row[0]] = {'primary_key': bool(row[1]),
+ 'unique': bool(row[2])}
+ return indexes
diff --git a/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/__init__.py b/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/__init__.py
diff --git a/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/base.py b/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/base.py
new file mode 100644
index 0000000..9aa8b47
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/base.py
@@ -0,0 +1,184 @@
+"""
+PostgreSQL database backend for Django.
+
+Requires psycopg 2: http://initd.org/projects/psycopg2
+"""
+
+import sys
+
+from django.db.backends import *
+from django.db.backends.postgresql_psycopg2.operations import DatabaseOperations
+from django.db.backends.postgresql_psycopg2.client import DatabaseClient
+from django.db.backends.postgresql_psycopg2.creation import DatabaseCreation
+from django.db.backends.postgresql_psycopg2.version import get_version
+from django.db.backends.postgresql_psycopg2.introspection import DatabaseIntrospection
+from django.utils.encoding import force_str
+from django.utils.functional import cached_property
+from django.utils.safestring import SafeText, SafeBytes
+from django.utils.timezone import utc
+
+try:
+ import psycopg2 as Database
+ import psycopg2.extensions
+except ImportError as e:
+ from django.core.exceptions import ImproperlyConfigured
+ raise ImproperlyConfigured("Error loading psycopg2 module: %s" % e)
+
+DatabaseError = Database.DatabaseError
+IntegrityError = Database.IntegrityError
+
+psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
+psycopg2.extensions.register_adapter(SafeBytes, psycopg2.extensions.QuotedString)
+psycopg2.extensions.register_adapter(SafeText, psycopg2.extensions.QuotedString)
+
+def utc_tzinfo_factory(offset):
+ if offset != 0:
+ raise AssertionError("database connection isn't set to UTC")
+ return utc
+
+class DatabaseFeatures(BaseDatabaseFeatures):
+ needs_datetime_string_cast = False
+ can_return_id_from_insert = True
+ requires_rollback_on_dirty_transaction = True
+ has_real_datatype = True
+ can_defer_constraint_checks = True
+ has_select_for_update = True
+ has_select_for_update_nowait = True
+ has_bulk_insert = True
+ uses_savepoints = True
+ supports_tablespaces = True
+ supports_transactions = True
+ can_distinct_on_fields = True
+
+class DatabaseWrapper(BaseDatabaseWrapper):
+ vendor = 'postgresql'
+ operators = {
+ 'exact': '= %s',
+ 'iexact': '= UPPER(%s)',
+ 'contains': 'LIKE %s',
+ 'icontains': 'LIKE UPPER(%s)',
+ 'regex': '~ %s',
+ 'iregex': '~* %s',
+ 'gt': '> %s',
+ 'gte': '>= %s',
+ 'lt': '< %s',
+ 'lte': '<= %s',
+ 'startswith': 'LIKE %s',
+ 'endswith': 'LIKE %s',
+ 'istartswith': 'LIKE UPPER(%s)',
+ 'iendswith': 'LIKE UPPER(%s)',
+ }
+
+ Database = Database
+
+ def __init__(self, *args, **kwargs):
+ super(DatabaseWrapper, self).__init__(*args, **kwargs)
+
+ opts = self.settings_dict["OPTIONS"]
+ RC = psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED
+ self.isolation_level = opts.get('isolation_level', RC)
+
+ self.features = DatabaseFeatures(self)
+ self.ops = DatabaseOperations(self)
+ self.client = DatabaseClient(self)
+ self.creation = DatabaseCreation(self)
+ self.introspection = DatabaseIntrospection(self)
+ self.validation = BaseDatabaseValidation(self)
+
+ def get_connection_params(self):
+ settings_dict = self.settings_dict
+ if not settings_dict['NAME']:
+ from django.core.exceptions import ImproperlyConfigured
+ raise ImproperlyConfigured(
+ "settings.DATABASES is improperly configured. "
+ "Please supply the NAME value.")
+ conn_params = {
+ 'database': settings_dict['NAME'],
+ }
+ conn_params.update(settings_dict['OPTIONS'])
+ if 'autocommit' in conn_params:
+ del conn_params['autocommit']
+ if 'isolation_level' in conn_params:
+ del conn_params['isolation_level']
+ if settings_dict['USER']:
+ conn_params['user'] = settings_dict['USER']
+ if settings_dict['PASSWORD']:
+ conn_params['password'] = force_str(settings_dict['PASSWORD'])
+ if settings_dict['HOST']:
+ conn_params['host'] = settings_dict['HOST']
+ if settings_dict['PORT']:
+ conn_params['port'] = settings_dict['PORT']
+ return conn_params
+
+ def get_new_connection(self, conn_params):
+ return Database.connect(**conn_params)
+
+ def init_connection_state(self):
+ settings_dict = self.settings_dict
+ self.connection.set_client_encoding('UTF8')
+ tz = 'UTC' if settings.USE_TZ else settings_dict.get('TIME_ZONE')
+ if tz:
+ try:
+ get_parameter_status = self.connection.get_parameter_status
+ except AttributeError:
+ # psycopg2 < 2.0.12 doesn't have get_parameter_status
+ conn_tz = None
+ else:
+ conn_tz = get_parameter_status('TimeZone')
+
+ if conn_tz != tz:
+ self.connection.cursor().execute(
+ self.ops.set_time_zone_sql(), [tz])
+ # Commit after setting the time zone (see #17062)
+ self.connection.commit()
+ self.connection.set_isolation_level(self.isolation_level)
+
+ def create_cursor(self):
+ cursor = self.connection.cursor()
+ cursor.tzinfo_factory = utc_tzinfo_factory if settings.USE_TZ else None
+ return cursor
+
+ def _set_isolation_level(self, isolation_level):
+ assert isolation_level in range(1, 5) # Use set_autocommit for level = 0
+ if self.psycopg2_version >= (2, 4, 2):
+ self.connection.set_session(isolation_level=isolation_level)
+ else:
+ self.connection.set_isolation_level(isolation_level)
+
+ def _set_autocommit(self, autocommit):
+ with self.wrap_database_errors:
+ if self.psycopg2_version >= (2, 4, 2):
+ self.connection.autocommit = autocommit
+ else:
+ if autocommit:
+ level = psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT
+ else:
+ level = self.isolation_level
+ self.connection.set_isolation_level(level)
+
+ def check_constraints(self, table_names=None):
+ """
+ To check constraints, we set constraints to immediate. Then, when, we're done we must ensure they
+ are returned to deferred.
+ """
+ self.cursor().execute('SET CONSTRAINTS ALL IMMEDIATE')
+ self.cursor().execute('SET CONSTRAINTS ALL DEFERRED')
+
+ def is_usable(self):
+ try:
+ # Use a psycopg cursor directly, bypassing Django's utilities.
+ self.connection.cursor().execute("SELECT 1")
+ except Database.Error:
+ return False
+ else:
+ return True
+
+ @cached_property
+ def psycopg2_version(self):
+ version = psycopg2.__version__.split(' ', 1)[0]
+ return tuple(int(v) for v in version.split('.'))
+
+ @cached_property
+ def pg_version(self):
+ with self.temporary_connection():
+ return get_version(self.connection)
diff --git a/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/client.py b/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/client.py
new file mode 100644
index 0000000..a5c0296
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/client.py
@@ -0,0 +1,23 @@
+import os
+import sys
+
+from django.db.backends import BaseDatabaseClient
+
+class DatabaseClient(BaseDatabaseClient):
+ executable_name = 'psql'
+
+ def runshell(self):
+ settings_dict = self.connection.settings_dict
+ args = [self.executable_name]
+ if settings_dict['USER']:
+ args += ["-U", settings_dict['USER']]
+ if settings_dict['HOST']:
+ args.extend(["-h", settings_dict['HOST']])
+ if settings_dict['PORT']:
+ args.extend(["-p", str(settings_dict['PORT'])])
+ args += [settings_dict['NAME']]
+ if os.name == 'nt':
+ sys.exit(os.system(" ".join(args)))
+ else:
+ os.execvp(self.executable_name, args)
+
diff --git a/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/creation.py b/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/creation.py
new file mode 100644
index 0000000..d4260e0
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/creation.py
@@ -0,0 +1,77 @@
+from django.db.backends.creation import BaseDatabaseCreation
+from django.db.backends.util import truncate_name
+
+
+class DatabaseCreation(BaseDatabaseCreation):
+ # This dictionary maps Field objects to their associated PostgreSQL column
+ # types, as strings. Column-type strings can contain format strings; they'll
+ # be interpolated against the values of Field.__dict__ before being output.
+ # If a column type is set to None, it won't be included in the output.
+ data_types = {
+ 'AutoField': 'serial',
+ 'BinaryField': 'bytea',
+ 'BooleanField': 'boolean',
+ 'CharField': 'varchar(%(max_length)s)',
+ 'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
+ 'DateField': 'date',
+ 'DateTimeField': 'timestamp with time zone',
+ 'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
+ 'FileField': 'varchar(%(max_length)s)',
+ 'FilePathField': 'varchar(%(max_length)s)',
+ 'FloatField': 'double precision',
+ 'IntegerField': 'integer',
+ 'BigIntegerField': 'bigint',
+ 'IPAddressField': 'inet',
+ 'GenericIPAddressField': 'inet',
+ 'NullBooleanField': 'boolean',
+ 'OneToOneField': 'integer',
+ 'PositiveIntegerField': 'integer CHECK ("%(column)s" >= 0)',
+ 'PositiveSmallIntegerField': 'smallint CHECK ("%(column)s" >= 0)',
+ 'SlugField': 'varchar(%(max_length)s)',
+ 'SmallIntegerField': 'smallint',
+ 'TextField': 'text',
+ 'TimeField': 'time',
+ }
+
+ def sql_table_creation_suffix(self):
+ assert self.connection.settings_dict['TEST_COLLATION'] is None, "PostgreSQL does not support collation setting at database creation time."
+ if self.connection.settings_dict['TEST_CHARSET']:
+ return "WITH ENCODING '%s'" % self.connection.settings_dict['TEST_CHARSET']
+ return ''
+
+ def sql_indexes_for_field(self, model, f, style):
+ output = []
+ if f.db_index or f.unique:
+ qn = self.connection.ops.quote_name
+ db_table = model._meta.db_table
+ tablespace = f.db_tablespace or model._meta.db_tablespace
+ if tablespace:
+ tablespace_sql = self.connection.ops.tablespace_sql(tablespace)
+ if tablespace_sql:
+ tablespace_sql = ' ' + tablespace_sql
+ else:
+ tablespace_sql = ''
+
+ def get_index_sql(index_name, opclass=''):
+ return (style.SQL_KEYWORD('CREATE INDEX') + ' ' +
+ style.SQL_TABLE(qn(truncate_name(index_name,self.connection.ops.max_name_length()))) + ' ' +
+ style.SQL_KEYWORD('ON') + ' ' +
+ style.SQL_TABLE(qn(db_table)) + ' ' +
+ "(%s%s)" % (style.SQL_FIELD(qn(f.column)), opclass) +
+ "%s;" % tablespace_sql)
+
+ if not f.unique:
+ output = [get_index_sql('%s_%s' % (db_table, f.column))]
+
+ # Fields with database column types of `varchar` and `text` need
+ # a second index that specifies their operator class, which is
+ # needed when performing correct LIKE queries outside the
+ # C locale. See #12234.
+ db_type = f.db_type(connection=self.connection)
+ if db_type.startswith('varchar'):
+ output.append(get_index_sql('%s_%s_like' % (db_table, f.column),
+ ' varchar_pattern_ops'))
+ elif db_type.startswith('text'):
+ output.append(get_index_sql('%s_%s_like' % (db_table, f.column),
+ ' text_pattern_ops'))
+ return output
diff --git a/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/introspection.py b/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/introspection.py
new file mode 100644
index 0000000..ea4e3e1
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/introspection.py
@@ -0,0 +1,111 @@
+from __future__ import unicode_literals
+
+from django.db.backends import BaseDatabaseIntrospection, FieldInfo
+from django.utils.encoding import force_text
+
+
+class DatabaseIntrospection(BaseDatabaseIntrospection):
+ # Maps type codes to Django Field types.
+ data_types_reverse = {
+ 16: 'BooleanField',
+ 17: 'BinaryField',
+ 20: 'BigIntegerField',
+ 21: 'SmallIntegerField',
+ 23: 'IntegerField',
+ 25: 'TextField',
+ 700: 'FloatField',
+ 701: 'FloatField',
+ 869: 'GenericIPAddressField',
+ 1042: 'CharField', # blank-padded
+ 1043: 'CharField',
+ 1082: 'DateField',
+ 1083: 'TimeField',
+ 1114: 'DateTimeField',
+ 1184: 'DateTimeField',
+ 1266: 'TimeField',
+ 1700: 'DecimalField',
+ }
+
+ ignored_tables = []
+
+ def get_table_list(self, cursor):
+ "Returns a list of table names in the current database."
+ cursor.execute("""
+ SELECT c.relname
+ FROM pg_catalog.pg_class c
+ LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
+ WHERE c.relkind IN ('r', 'v', '')
+ AND n.nspname NOT IN ('pg_catalog', 'pg_toast')
+ AND pg_catalog.pg_table_is_visible(c.oid)""")
+ return [row[0] for row in cursor.fetchall() if row[0] not in self.ignored_tables]
+
+ def get_table_description(self, cursor, table_name):
+ "Returns a description of the table, with the DB-API cursor.description interface."
+ # As cursor.description does not return reliably the nullable property,
+ # we have to query the information_schema (#7783)
+ cursor.execute("""
+ SELECT column_name, is_nullable
+ FROM information_schema.columns
+ WHERE table_name = %s""", [table_name])
+ null_map = dict(cursor.fetchall())
+ cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name))
+ return [FieldInfo(*((force_text(line[0]),) + line[1:6] + (null_map[force_text(line[0])]=='YES',)))
+ for line in cursor.description]
+
+ def get_relations(self, cursor, table_name):
+ """
+ Returns a dictionary of {field_index: (field_index_other_table, other_table)}
+ representing all relationships to the given table. Indexes are 0-based.
+ """
+ cursor.execute("""
+ SELECT con.conkey, con.confkey, c2.relname
+ FROM pg_constraint con, pg_class c1, pg_class c2
+ WHERE c1.oid = con.conrelid
+ AND c2.oid = con.confrelid
+ AND c1.relname = %s
+ AND con.contype = 'f'""", [table_name])
+ relations = {}
+ for row in cursor.fetchall():
+ # row[0] and row[1] are single-item lists, so grab the single item.
+ relations[row[0][0] - 1] = (row[1][0] - 1, row[2])
+ return relations
+
+ def get_key_columns(self, cursor, table_name):
+ key_columns = []
+ cursor.execute("""
+ SELECT kcu.column_name, ccu.table_name AS referenced_table, ccu.column_name AS referenced_column
+ FROM information_schema.constraint_column_usage ccu
+ LEFT JOIN information_schema.key_column_usage kcu
+ ON ccu.constraint_catalog = kcu.constraint_catalog
+ AND ccu.constraint_schema = kcu.constraint_schema
+ AND ccu.constraint_name = kcu.constraint_name
+ LEFT JOIN information_schema.table_constraints tc
+ ON ccu.constraint_catalog = tc.constraint_catalog
+ AND ccu.constraint_schema = tc.constraint_schema
+ AND ccu.constraint_name = tc.constraint_name
+ WHERE kcu.table_name = %s AND tc.constraint_type = 'FOREIGN KEY'""" , [table_name])
+ key_columns.extend(cursor.fetchall())
+ return key_columns
+
+ def get_indexes(self, cursor, table_name):
+ # This query retrieves each index on the given table, including the
+ # first associated field name
+ cursor.execute("""
+ SELECT attr.attname, idx.indkey, idx.indisunique, idx.indisprimary
+ FROM pg_catalog.pg_class c, pg_catalog.pg_class c2,
+ pg_catalog.pg_index idx, pg_catalog.pg_attribute attr
+ WHERE c.oid = idx.indrelid
+ AND idx.indexrelid = c2.oid
+ AND attr.attrelid = c.oid
+ AND attr.attnum = idx.indkey[0]
+ AND c.relname = %s""", [table_name])
+ indexes = {}
+ for row in cursor.fetchall():
+ # row[1] (idx.indkey) is stored in the DB as an array. It comes out as
+ # a string of space-separated integers. This designates the field
+ # indexes (1-based) of the fields that have indexes on the table.
+ # Here, we skip any indexes across multiple fields.
+ if ' ' in row[1]:
+ continue
+ indexes[row[0]] = {'primary_key': row[3], 'unique': row[2]}
+ return indexes
diff --git a/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/operations.py b/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/operations.py
new file mode 100644
index 0000000..c5aab84
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/operations.py
@@ -0,0 +1,222 @@
+from __future__ import unicode_literals
+
+from django.conf import settings
+from django.db.backends import BaseDatabaseOperations
+
+
+class DatabaseOperations(BaseDatabaseOperations):
+ def __init__(self, connection):
+ super(DatabaseOperations, self).__init__(connection)
+
+ def date_extract_sql(self, lookup_type, field_name):
+ # http://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-EXTRACT
+ if lookup_type == 'week_day':
+ # For consistency across backends, we return Sunday=1, Saturday=7.
+ return "EXTRACT('dow' FROM %s) + 1" % field_name
+ else:
+ return "EXTRACT('%s' FROM %s)" % (lookup_type, field_name)
+
+ def date_interval_sql(self, sql, connector, timedelta):
+ """
+ implements the interval functionality for expressions
+ format for Postgres:
+ (datefield + interval '3 days 200 seconds 5 microseconds')
+ """
+ modifiers = []
+ if timedelta.days:
+ modifiers.append('%s days' % timedelta.days)
+ if timedelta.seconds:
+ modifiers.append('%s seconds' % timedelta.seconds)
+ if timedelta.microseconds:
+ modifiers.append('%s microseconds' % timedelta.microseconds)
+ mods = ' '.join(modifiers)
+ conn = ' %s ' % connector
+ return '(%s)' % conn.join([sql, 'interval \'%s\'' % mods])
+
+ def date_trunc_sql(self, lookup_type, field_name):
+ # http://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC
+ return "DATE_TRUNC('%s', %s)" % (lookup_type, field_name)
+
+ def datetime_extract_sql(self, lookup_type, field_name, tzname):
+ if settings.USE_TZ:
+ field_name = "%s AT TIME ZONE %%s" % field_name
+ params = [tzname]
+ else:
+ params = []
+ # http://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-EXTRACT
+ if lookup_type == 'week_day':
+ # For consistency across backends, we return Sunday=1, Saturday=7.
+ sql = "EXTRACT('dow' FROM %s) + 1" % field_name
+ else:
+ sql = "EXTRACT('%s' FROM %s)" % (lookup_type, field_name)
+ return sql, params
+
+ def datetime_trunc_sql(self, lookup_type, field_name, tzname):
+ if settings.USE_TZ:
+ field_name = "%s AT TIME ZONE %%s" % field_name
+ params = [tzname]
+ else:
+ params = []
+ # http://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC
+ sql = "DATE_TRUNC('%s', %s)" % (lookup_type, field_name)
+ return sql, params
+
+ def deferrable_sql(self):
+ return " DEFERRABLE INITIALLY DEFERRED"
+
+ def lookup_cast(self, lookup_type):
+ lookup = '%s'
+
+ # Cast text lookups to text to allow things like filter(x__contains=4)
+ if lookup_type in ('iexact', 'contains', 'icontains', 'startswith',
+ 'istartswith', 'endswith', 'iendswith', 'regex', 'iregex'):
+ lookup = "%s::text"
+
+ # Use UPPER(x) for case-insensitive lookups; it's faster.
+ if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
+ lookup = 'UPPER(%s)' % lookup
+
+ return lookup
+
+ def field_cast_sql(self, db_type, internal_type):
+ if internal_type == "GenericIPAddressField" or internal_type == "IPAddressField":
+ return 'HOST(%s)'
+ return '%s'
+
+ def last_insert_id(self, cursor, table_name, pk_name):
+ # Use pg_get_serial_sequence to get the underlying sequence name
+ # from the table name and column name (available since PostgreSQL 8)
+ cursor.execute("SELECT CURRVAL(pg_get_serial_sequence('%s','%s'))" % (
+ self.quote_name(table_name), pk_name))
+ return cursor.fetchone()[0]
+
+ def no_limit_value(self):
+ return None
+
+ def quote_name(self, name):
+ if name.startswith('"') and name.endswith('"'):
+ return name # Quoting once is enough.
+ return '"%s"' % name
+
+ def set_time_zone_sql(self):
+ return "SET TIME ZONE %s"
+
+ def sql_flush(self, style, tables, sequences, allow_cascade=False):
+ if tables:
+ # Perform a single SQL 'TRUNCATE x, y, z...;' statement. It allows
+ # us to truncate tables referenced by a foreign key in any other
+ # table.
+ tables_sql = ', '.join(
+ style.SQL_FIELD(self.quote_name(table)) for table in tables)
+ if allow_cascade:
+ sql = ['%s %s %s;' % (
+ style.SQL_KEYWORD('TRUNCATE'),
+ tables_sql,
+ style.SQL_KEYWORD('CASCADE'),
+ )]
+ else:
+ sql = ['%s %s;' % (
+ style.SQL_KEYWORD('TRUNCATE'),
+ tables_sql,
+ )]
+ sql.extend(self.sequence_reset_by_name_sql(style, sequences))
+ return sql
+ else:
+ return []
+
+ def sequence_reset_by_name_sql(self, style, sequences):
+ # 'ALTER SEQUENCE sequence_name RESTART WITH 1;'... style SQL statements
+ # to reset sequence indices
+ sql = []
+ for sequence_info in sequences:
+ table_name = sequence_info['table']
+ column_name = sequence_info['column']
+ if not (column_name and len(column_name) > 0):
+ # This will be the case if it's an m2m using an autogenerated
+ # intermediate table (see BaseDatabaseIntrospection.sequence_list)
+ column_name = 'id'
+ sql.append("%s setval(pg_get_serial_sequence('%s','%s'), 1, false);" % \
+ (style.SQL_KEYWORD('SELECT'),
+ style.SQL_TABLE(self.quote_name(table_name)),
+ style.SQL_FIELD(column_name))
+ )
+ return sql
+
+ def tablespace_sql(self, tablespace, inline=False):
+ if inline:
+ return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace)
+ else:
+ return "TABLESPACE %s" % self.quote_name(tablespace)
+
+ def sequence_reset_sql(self, style, model_list):
+ from django.db import models
+ output = []
+ qn = self.quote_name
+ for model in model_list:
+ # Use `coalesce` to set the sequence for each model to the max pk value if there are records,
+ # or 1 if there are none. Set the `is_called` property (the third argument to `setval`) to true
+ # if there are records (as the max pk value is already in use), otherwise set it to false.
+ # Use pg_get_serial_sequence to get the underlying sequence name from the table name
+ # and column name (available since PostgreSQL 8)
+
+ for f in model._meta.local_fields:
+ if isinstance(f, models.AutoField):
+ output.append("%s setval(pg_get_serial_sequence('%s','%s'), coalesce(max(%s), 1), max(%s) %s null) %s %s;" % \
+ (style.SQL_KEYWORD('SELECT'),
+ style.SQL_TABLE(qn(model._meta.db_table)),
+ style.SQL_FIELD(f.column),
+ style.SQL_FIELD(qn(f.column)),
+ style.SQL_FIELD(qn(f.column)),
+ style.SQL_KEYWORD('IS NOT'),
+ style.SQL_KEYWORD('FROM'),
+ style.SQL_TABLE(qn(model._meta.db_table))))
+ break # Only one AutoField is allowed per model, so don't bother continuing.
+ for f in model._meta.many_to_many:
+ if not f.rel.through:
+ output.append("%s setval(pg_get_serial_sequence('%s','%s'), coalesce(max(%s), 1), max(%s) %s null) %s %s;" % \
+ (style.SQL_KEYWORD('SELECT'),
+ style.SQL_TABLE(qn(f.m2m_db_table())),
+ style.SQL_FIELD('id'),
+ style.SQL_FIELD(qn('id')),
+ style.SQL_FIELD(qn('id')),
+ style.SQL_KEYWORD('IS NOT'),
+ style.SQL_KEYWORD('FROM'),
+ style.SQL_TABLE(qn(f.m2m_db_table()))))
+ return output
+
+ def prep_for_iexact_query(self, x):
+ return x
+
+ def max_name_length(self):
+ """
+ Returns the maximum length of an identifier.
+
+ Note that the maximum length of an identifier is 63 by default, but can
+ be changed by recompiling PostgreSQL after editing the NAMEDATALEN
+ macro in src/include/pg_config_manual.h .
+
+ This implementation simply returns 63, but can easily be overridden by a
+ custom database backend that inherits most of its behavior from this one.
+ """
+
+ return 63
+
+ def distinct_sql(self, fields):
+ if fields:
+ return 'DISTINCT ON (%s)' % ', '.join(fields)
+ else:
+ return 'DISTINCT'
+
+ def last_executed_query(self, cursor, sql, params):
+ # http://initd.org/psycopg/docs/cursor.html#cursor.query
+ # The query attribute is a Psycopg extension to the DB API 2.0.
+ if cursor.query is not None:
+ return cursor.query.decode('utf-8')
+ return None
+
+ def return_insert_id(self):
+ return "RETURNING %s", ()
+
+ def bulk_insert_sql(self, fields, num_values):
+ items_sql = "(%s)" % ", ".join(["%s"] * len(fields))
+ return "VALUES " + ", ".join([items_sql] * num_values)
diff --git a/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/version.py b/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/version.py
new file mode 100644
index 0000000..8ef5167
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/version.py
@@ -0,0 +1,43 @@
+"""
+Extracts the version of the PostgreSQL server.
+"""
+
+import re
+
+# This reg-exp is intentionally fairly flexible here.
+# Needs to be able to handle stuff like:
+# PostgreSQL 8.3.6
+# EnterpriseDB 8.3
+# PostgreSQL 8.3 beta4
+# PostgreSQL 8.4beta1
+VERSION_RE = re.compile(r'\S+ (\d+)\.(\d+)\.?(\d+)?')
+
+
+def _parse_version(text):
+ "Internal parsing method. Factored out for testing purposes."
+ major, major2, minor = VERSION_RE.search(text).groups()
+ try:
+ return int(major) * 10000 + int(major2) * 100 + int(minor)
+ except (ValueError, TypeError):
+ return int(major) * 10000 + int(major2) * 100
+
+def get_version(connection):
+ """
+ Returns an integer representing the major, minor and revision number of the
+ server. Format is the one used for the return value of libpq
+ PQServerVersion()/``server_version`` connection attribute (available in
+ newer psycopg2 versions.)
+
+ For example, 80304 for 8.3.4. The last two digits will be 00 in the case of
+ releases (e.g., 80400 for 'PostgreSQL 8.4') or in the case of beta and
+ prereleases (e.g. 90100 for 'PostgreSQL 9.1beta2').
+
+ PQServerVersion()/``server_version`` doesn't execute a query so try that
+ first, then fallback to a ``SELECT version()`` query.
+ """
+ if hasattr(connection, 'server_version'):
+ return connection.server_version
+ else:
+ cursor = connection.cursor()
+ cursor.execute("SELECT version()")
+ return _parse_version(cursor.fetchone()[0])
diff --git a/lib/python2.7/site-packages/django/db/backends/signals.py b/lib/python2.7/site-packages/django/db/backends/signals.py
new file mode 100644
index 0000000..c16a63f
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/signals.py
@@ -0,0 +1,3 @@
+from django.dispatch import Signal
+
+connection_created = Signal(providing_args=["connection"])
diff --git a/lib/python2.7/site-packages/django/db/backends/sqlite3/__init__.py b/lib/python2.7/site-packages/django/db/backends/sqlite3/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/sqlite3/__init__.py
diff --git a/lib/python2.7/site-packages/django/db/backends/sqlite3/base.py b/lib/python2.7/site-packages/django/db/backends/sqlite3/base.py
new file mode 100644
index 0000000..a219178
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/sqlite3/base.py
@@ -0,0 +1,533 @@
+"""
+SQLite3 backend for django.
+
+Works with either the pysqlite2 module or the sqlite3 module in the
+standard library.
+"""
+from __future__ import unicode_literals
+
+import datetime
+import decimal
+import warnings
+import re
+
+from django.db import utils
+from django.db.backends import *
+from django.db.backends.sqlite3.client import DatabaseClient
+from django.db.backends.sqlite3.creation import DatabaseCreation
+from django.db.backends.sqlite3.introspection import DatabaseIntrospection
+from django.db.models import fields
+from django.db.models.sql import aggregates
+from django.utils.dateparse import parse_date, parse_datetime, parse_time
+from django.utils.encoding import force_text
+from django.utils.functional import cached_property
+from django.utils.safestring import SafeBytes
+from django.utils import six
+from django.utils import timezone
+
+try:
+ try:
+ from pysqlite2 import dbapi2 as Database
+ except ImportError:
+ from sqlite3 import dbapi2 as Database
+except ImportError as exc:
+ from django.core.exceptions import ImproperlyConfigured
+ raise ImproperlyConfigured("Error loading either pysqlite2 or sqlite3 modules (tried in that order): %s" % exc)
+
+try:
+ import pytz
+except ImportError:
+ pytz = None
+
+DatabaseError = Database.DatabaseError
+IntegrityError = Database.IntegrityError
+
+def parse_datetime_with_timezone_support(value):
+ dt = parse_datetime(value)
+ # Confirm that dt is naive before overwriting its tzinfo.
+ if dt is not None and settings.USE_TZ and timezone.is_naive(dt):
+ dt = dt.replace(tzinfo=timezone.utc)
+ return dt
+
+def adapt_datetime_with_timezone_support(value):
+ # Equivalent to DateTimeField.get_db_prep_value. Used only by raw SQL.
+ if settings.USE_TZ:
+ if timezone.is_naive(value):
+ warnings.warn("SQLite received a naive datetime (%s)"
+ " while time zone support is active." % value,
+ RuntimeWarning)
+ default_timezone = timezone.get_default_timezone()
+ value = timezone.make_aware(value, default_timezone)
+ value = value.astimezone(timezone.utc).replace(tzinfo=None)
+ return value.isoformat(str(" "))
+
+def decoder(conv_func):
+ """ The Python sqlite3 interface returns always byte strings.
+ This function converts the received value to a regular string before
+ passing it to the receiver function.
+ """
+ return lambda s: conv_func(s.decode('utf-8'))
+
+Database.register_converter(str("bool"), decoder(lambda s: s == '1'))
+Database.register_converter(str("time"), decoder(parse_time))
+Database.register_converter(str("date"), decoder(parse_date))
+Database.register_converter(str("datetime"), decoder(parse_datetime_with_timezone_support))
+Database.register_converter(str("timestamp"), decoder(parse_datetime_with_timezone_support))
+Database.register_converter(str("TIMESTAMP"), decoder(parse_datetime_with_timezone_support))
+Database.register_converter(str("decimal"), decoder(util.typecast_decimal))
+
+Database.register_adapter(datetime.datetime, adapt_datetime_with_timezone_support)
+Database.register_adapter(decimal.Decimal, util.rev_typecast_decimal)
+if six.PY2 and Database.version_info >= (2, 4, 1):
+ # Starting in 2.4.1, the str type is not accepted anymore, therefore,
+ # we convert all str objects to Unicode
+ # As registering a adapter for a primitive type causes a small
+ # slow-down, this adapter is only registered for sqlite3 versions
+ # needing it (Python 2.6 and up).
+ Database.register_adapter(str, lambda s: s.decode('utf-8'))
+ Database.register_adapter(SafeBytes, lambda s: s.decode('utf-8'))
+
+class DatabaseFeatures(BaseDatabaseFeatures):
+ # SQLite cannot handle us only partially reading from a cursor's result set
+ # and then writing the same rows to the database in another cursor. This
+ # setting ensures we always read result sets fully into memory all in one
+ # go.
+ can_use_chunked_reads = False
+ test_db_allows_multiple_connections = False
+ supports_unspecified_pk = True
+ supports_timezones = False
+ supports_1000_query_parameters = False
+ supports_mixed_date_datetime_comparisons = False
+ has_bulk_insert = True
+ can_combine_inserts_with_and_without_auto_increment_pk = False
+ autocommits_when_autocommit_is_off = True
+ atomic_transactions = False
+ supports_paramstyle_pyformat = False
+
+ @cached_property
+ def uses_savepoints(self):
+ return Database.sqlite_version_info >= (3, 6, 8)
+
+ @cached_property
+ def supports_stddev(self):
+ """Confirm support for STDDEV and related stats functions
+
+ SQLite supports STDDEV as an extension package; so
+ connection.ops.check_aggregate_support() can't unilaterally
+ rule out support for STDDEV. We need to manually check
+ whether the call works.
+ """
+ cursor = self.connection.cursor()
+ cursor.execute('CREATE TABLE STDDEV_TEST (X INT)')
+ try:
+ cursor.execute('SELECT STDDEV(*) FROM STDDEV_TEST')
+ has_support = True
+ except utils.DatabaseError:
+ has_support = False
+ cursor.execute('DROP TABLE STDDEV_TEST')
+ return has_support
+
+ @cached_property
+ def has_zoneinfo_database(self):
+ return pytz is not None
+
+class DatabaseOperations(BaseDatabaseOperations):
+ def bulk_batch_size(self, fields, objs):
+ """
+ SQLite has a compile-time default (SQLITE_LIMIT_VARIABLE_NUMBER) of
+ 999 variables per query.
+
+ If there is just single field to insert, then we can hit another
+ limit, SQLITE_MAX_COMPOUND_SELECT which defaults to 500.
+ """
+ limit = 999 if len(fields) > 1 else 500
+ return (limit // len(fields)) if len(fields) > 0 else len(objs)
+
+ def check_aggregate_support(self, aggregate):
+ bad_fields = (fields.DateField, fields.DateTimeField, fields.TimeField)
+ bad_aggregates = (aggregates.Sum, aggregates.Avg,
+ aggregates.Variance, aggregates.StdDev)
+ if (isinstance(aggregate.source, bad_fields) and
+ isinstance(aggregate, bad_aggregates)):
+ raise NotImplementedError(
+ 'You cannot use Sum, Avg, StdDev and Variance aggregations '
+ 'on date/time fields in sqlite3 '
+ 'since date/time is saved as text.')
+
+ def date_extract_sql(self, lookup_type, field_name):
+ # sqlite doesn't support extract, so we fake it with the user-defined
+ # function django_date_extract that's registered in connect(). Note that
+ # single quotes are used because this is a string (and could otherwise
+ # cause a collision with a field name).
+ return "django_date_extract('%s', %s)" % (lookup_type.lower(), field_name)
+
+ def date_interval_sql(self, sql, connector, timedelta):
+ # It would be more straightforward if we could use the sqlite strftime
+ # function, but it does not allow for keeping six digits of fractional
+ # second information, nor does it allow for formatting date and datetime
+ # values differently. So instead we register our own function that
+ # formats the datetime combined with the delta in a manner suitable
+ # for comparisons.
+ return 'django_format_dtdelta(%s, "%s", "%d", "%d", "%d")' % (sql,
+ connector, timedelta.days, timedelta.seconds, timedelta.microseconds)
+
+ def date_trunc_sql(self, lookup_type, field_name):
+ # sqlite doesn't support DATE_TRUNC, so we fake it with a user-defined
+ # function django_date_trunc that's registered in connect(). Note that
+ # single quotes are used because this is a string (and could otherwise
+ # cause a collision with a field name).
+ return "django_date_trunc('%s', %s)" % (lookup_type.lower(), field_name)
+
+ def datetime_extract_sql(self, lookup_type, field_name, tzname):
+ # Same comment as in date_extract_sql.
+ if settings.USE_TZ:
+ if pytz is None:
+ from django.core.exceptions import ImproperlyConfigured
+ raise ImproperlyConfigured("This query requires pytz, "
+ "but it isn't installed.")
+ return "django_datetime_extract('%s', %s, %%s)" % (
+ lookup_type.lower(), field_name), [tzname]
+
+ def datetime_trunc_sql(self, lookup_type, field_name, tzname):
+ # Same comment as in date_trunc_sql.
+ if settings.USE_TZ:
+ if pytz is None:
+ from django.core.exceptions import ImproperlyConfigured
+ raise ImproperlyConfigured("This query requires pytz, "
+ "but it isn't installed.")
+ return "django_datetime_trunc('%s', %s, %%s)" % (
+ lookup_type.lower(), field_name), [tzname]
+
+ def drop_foreignkey_sql(self):
+ return ""
+
+ def pk_default_value(self):
+ return "NULL"
+
+ def quote_name(self, name):
+ if name.startswith('"') and name.endswith('"'):
+ return name # Quoting once is enough.
+ return '"%s"' % name
+
+ def no_limit_value(self):
+ return -1
+
+ def sql_flush(self, style, tables, sequences, allow_cascade=False):
+ # NB: The generated SQL below is specific to SQLite
+ # Note: The DELETE FROM... SQL generated below works for SQLite databases
+ # because constraints don't exist
+ sql = ['%s %s %s;' % (
+ style.SQL_KEYWORD('DELETE'),
+ style.SQL_KEYWORD('FROM'),
+ style.SQL_FIELD(self.quote_name(table))
+ ) for table in tables]
+ # Note: No requirement for reset of auto-incremented indices (cf. other
+ # sql_flush() implementations). Just return SQL at this point
+ return sql
+
+ def value_to_db_datetime(self, value):
+ if value is None:
+ return None
+
+ # SQLite doesn't support tz-aware datetimes
+ if timezone.is_aware(value):
+ if settings.USE_TZ:
+ value = value.astimezone(timezone.utc).replace(tzinfo=None)
+ else:
+ raise ValueError("SQLite backend does not support timezone-aware datetimes when USE_TZ is False.")
+
+ return six.text_type(value)
+
+ def value_to_db_time(self, value):
+ if value is None:
+ return None
+
+ # SQLite doesn't support tz-aware datetimes
+ if timezone.is_aware(value):
+ raise ValueError("SQLite backend does not support timezone-aware times.")
+
+ return six.text_type(value)
+
+ def convert_values(self, value, field):
+ """SQLite returns floats when it should be returning decimals,
+ and gets dates and datetimes wrong.
+ For consistency with other backends, coerce when required.
+ """
+ if value is None:
+ return None
+
+ internal_type = field.get_internal_type()
+ if internal_type == 'DecimalField':
+ return util.typecast_decimal(field.format_number(value))
+ elif internal_type and internal_type.endswith('IntegerField') or internal_type == 'AutoField':
+ return int(value)
+ elif internal_type == 'DateField':
+ return parse_date(value)
+ elif internal_type == 'DateTimeField':
+ return parse_datetime_with_timezone_support(value)
+ elif internal_type == 'TimeField':
+ return parse_time(value)
+
+ # No field, or the field isn't known to be a decimal or integer
+ return value
+
+ def bulk_insert_sql(self, fields, num_values):
+ res = []
+ res.append("SELECT %s" % ", ".join(
+ "%%s AS %s" % self.quote_name(f.column) for f in fields
+ ))
+ res.extend(["UNION ALL SELECT %s" % ", ".join(["%s"] * len(fields))] * (num_values - 1))
+ return " ".join(res)
+
+class DatabaseWrapper(BaseDatabaseWrapper):
+ vendor = 'sqlite'
+ # SQLite requires LIKE statements to include an ESCAPE clause if the value
+ # being escaped has a percent or underscore in it.
+ # See http://www.sqlite.org/lang_expr.html for an explanation.
+ operators = {
+ 'exact': '= %s',
+ 'iexact': "LIKE %s ESCAPE '\\'",
+ 'contains': "LIKE %s ESCAPE '\\'",
+ 'icontains': "LIKE %s ESCAPE '\\'",
+ 'regex': 'REGEXP %s',
+ 'iregex': "REGEXP '(?i)' || %s",
+ 'gt': '> %s',
+ 'gte': '>= %s',
+ 'lt': '< %s',
+ 'lte': '<= %s',
+ 'startswith': "LIKE %s ESCAPE '\\'",
+ 'endswith': "LIKE %s ESCAPE '\\'",
+ 'istartswith': "LIKE %s ESCAPE '\\'",
+ 'iendswith': "LIKE %s ESCAPE '\\'",
+ }
+
+ Database = Database
+
+ def __init__(self, *args, **kwargs):
+ super(DatabaseWrapper, self).__init__(*args, **kwargs)
+
+ self.features = DatabaseFeatures(self)
+ self.ops = DatabaseOperations(self)
+ self.client = DatabaseClient(self)
+ self.creation = DatabaseCreation(self)
+ self.introspection = DatabaseIntrospection(self)
+ self.validation = BaseDatabaseValidation(self)
+
+ def get_connection_params(self):
+ settings_dict = self.settings_dict
+ if not settings_dict['NAME']:
+ from django.core.exceptions import ImproperlyConfigured
+ raise ImproperlyConfigured(
+ "settings.DATABASES is improperly configured. "
+ "Please supply the NAME value.")
+ kwargs = {
+ 'database': settings_dict['NAME'],
+ 'detect_types': Database.PARSE_DECLTYPES | Database.PARSE_COLNAMES,
+ }
+ kwargs.update(settings_dict['OPTIONS'])
+ # Always allow the underlying SQLite connection to be shareable
+ # between multiple threads. The safe-guarding will be handled at a
+ # higher level by the `BaseDatabaseWrapper.allow_thread_sharing`
+ # property. This is necessary as the shareability is disabled by
+ # default in pysqlite and it cannot be changed once a connection is
+ # opened.
+ if 'check_same_thread' in kwargs and kwargs['check_same_thread']:
+ warnings.warn(
+ 'The `check_same_thread` option was provided and set to '
+ 'True. It will be overriden with False. Use the '
+ '`DatabaseWrapper.allow_thread_sharing` property instead '
+ 'for controlling thread shareability.',
+ RuntimeWarning
+ )
+ kwargs.update({'check_same_thread': False})
+ return kwargs
+
+ def get_new_connection(self, conn_params):
+ conn = Database.connect(**conn_params)
+ conn.create_function("django_date_extract", 2, _sqlite_date_extract)
+ conn.create_function("django_date_trunc", 2, _sqlite_date_trunc)
+ conn.create_function("django_datetime_extract", 3, _sqlite_datetime_extract)
+ conn.create_function("django_datetime_trunc", 3, _sqlite_datetime_trunc)
+ conn.create_function("regexp", 2, _sqlite_regexp)
+ conn.create_function("django_format_dtdelta", 5, _sqlite_format_dtdelta)
+ return conn
+
+ def init_connection_state(self):
+ pass
+
+ def create_cursor(self):
+ return self.connection.cursor(factory=SQLiteCursorWrapper)
+
+ def close(self):
+ self.validate_thread_sharing()
+ # If database is in memory, closing the connection destroys the
+ # database. To prevent accidental data loss, ignore close requests on
+ # an in-memory db.
+ if self.settings_dict['NAME'] != ":memory:":
+ BaseDatabaseWrapper.close(self)
+
+ def _savepoint_allowed(self):
+ # Two conditions are required here:
+ # - A sufficiently recent version of SQLite to support savepoints,
+ # - Being in a transaction, which can only happen inside 'atomic'.
+
+ # When 'isolation_level' is not None, sqlite3 commits before each
+ # savepoint; it's a bug. When it is None, savepoints don't make sense
+ # because autocommit is enabled. The only exception is inside 'atomic'
+ # blocks. To work around that bug, on SQLite, 'atomic' starts a
+ # transaction explicitly rather than simply disable autocommit.
+ return self.features.uses_savepoints and self.in_atomic_block
+
+ def _set_autocommit(self, autocommit):
+ if autocommit:
+ level = None
+ else:
+ # sqlite3's internal default is ''. It's different from None.
+ # See Modules/_sqlite/connection.c.
+ level = ''
+ # 'isolation_level' is a misleading API.
+ # SQLite always runs at the SERIALIZABLE isolation level.
+ with self.wrap_database_errors:
+ self.connection.isolation_level = level
+
+ def check_constraints(self, table_names=None):
+ """
+ Checks each table name in `table_names` for rows with invalid foreign key references. This method is
+ intended to be used in conjunction with `disable_constraint_checking()` and `enable_constraint_checking()`, to
+ determine if rows with invalid references were entered while constraint checks were off.
+
+ Raises an IntegrityError on the first invalid foreign key reference encountered (if any) and provides
+ detailed information about the invalid reference in the error message.
+
+ Backends can override this method if they can more directly apply constraint checking (e.g. via "SET CONSTRAINTS
+ ALL IMMEDIATE")
+ """
+ cursor = self.cursor()
+ if table_names is None:
+ table_names = self.introspection.table_names(cursor)
+ for table_name in table_names:
+ primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
+ if not primary_key_column_name:
+ continue
+ key_columns = self.introspection.get_key_columns(cursor, table_name)
+ for column_name, referenced_table_name, referenced_column_name in key_columns:
+ cursor.execute("""
+ SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING
+ LEFT JOIN `%s` as REFERRED
+ ON (REFERRING.`%s` = REFERRED.`%s`)
+ WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL"""
+ % (primary_key_column_name, column_name, table_name, referenced_table_name,
+ column_name, referenced_column_name, column_name, referenced_column_name))
+ for bad_row in cursor.fetchall():
+ raise utils.IntegrityError("The row in table '%s' with primary key '%s' has an invalid "
+ "foreign key: %s.%s contains a value '%s' that does not have a corresponding value in %s.%s."
+ % (table_name, bad_row[0], table_name, column_name, bad_row[1],
+ referenced_table_name, referenced_column_name))
+
+ def is_usable(self):
+ return True
+
+ def _start_transaction_under_autocommit(self):
+ """
+ Start a transaction explicitly in autocommit mode.
+
+ Staying in autocommit mode works around a bug of sqlite3 that breaks
+ savepoints when autocommit is disabled.
+ """
+ self.cursor().execute("BEGIN")
+
+FORMAT_QMARK_REGEX = re.compile(r'(?<!%)%s')
+
+class SQLiteCursorWrapper(Database.Cursor):
+ """
+ Django uses "format" style placeholders, but pysqlite2 uses "qmark" style.
+ This fixes it -- but note that if you want to use a literal "%s" in a query,
+ you'll need to use "%%s".
+ """
+ def execute(self, query, params=None):
+ if params is None:
+ return Database.Cursor.execute(self, query)
+ query = self.convert_query(query)
+ return Database.Cursor.execute(self, query, params)
+
+ def executemany(self, query, param_list):
+ query = self.convert_query(query)
+ return Database.Cursor.executemany(self, query, param_list)
+
+ def convert_query(self, query):
+ return FORMAT_QMARK_REGEX.sub('?', query).replace('%%', '%')
+
+def _sqlite_date_extract(lookup_type, dt):
+ if dt is None:
+ return None
+ try:
+ dt = util.typecast_timestamp(dt)
+ except (ValueError, TypeError):
+ return None
+ if lookup_type == 'week_day':
+ return (dt.isoweekday() % 7) + 1
+ else:
+ return getattr(dt, lookup_type)
+
+def _sqlite_date_trunc(lookup_type, dt):
+ try:
+ dt = util.typecast_timestamp(dt)
+ except (ValueError, TypeError):
+ return None
+ if lookup_type == 'year':
+ return "%i-01-01" % dt.year
+ elif lookup_type == 'month':
+ return "%i-%02i-01" % (dt.year, dt.month)
+ elif lookup_type == 'day':
+ return "%i-%02i-%02i" % (dt.year, dt.month, dt.day)
+
+def _sqlite_datetime_extract(lookup_type, dt, tzname):
+ if dt is None:
+ return None
+ try:
+ dt = util.typecast_timestamp(dt)
+ except (ValueError, TypeError):
+ return None
+ if tzname is not None:
+ dt = timezone.localtime(dt, pytz.timezone(tzname))
+ if lookup_type == 'week_day':
+ return (dt.isoweekday() % 7) + 1
+ else:
+ return getattr(dt, lookup_type)
+
+def _sqlite_datetime_trunc(lookup_type, dt, tzname):
+ try:
+ dt = util.typecast_timestamp(dt)
+ except (ValueError, TypeError):
+ return None
+ if tzname is not None:
+ dt = timezone.localtime(dt, pytz.timezone(tzname))
+ if lookup_type == 'year':
+ return "%i-01-01 00:00:00" % dt.year
+ elif lookup_type == 'month':
+ return "%i-%02i-01 00:00:00" % (dt.year, dt.month)
+ elif lookup_type == 'day':
+ return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day)
+ elif lookup_type == 'hour':
+ return "%i-%02i-%02i %02i:00:00" % (dt.year, dt.month, dt.day, dt.hour)
+ elif lookup_type == 'minute':
+ return "%i-%02i-%02i %02i:%02i:00" % (dt.year, dt.month, dt.day, dt.hour, dt.minute)
+ elif lookup_type == 'second':
+ return "%i-%02i-%02i %02i:%02i:%02i" % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
+
+def _sqlite_format_dtdelta(dt, conn, days, secs, usecs):
+ try:
+ dt = util.typecast_timestamp(dt)
+ delta = datetime.timedelta(int(days), int(secs), int(usecs))
+ if conn.strip() == '+':
+ dt = dt + delta
+ else:
+ dt = dt - delta
+ except (ValueError, TypeError):
+ return None
+ # typecast_timestamp returns a date or a datetime without timezone.
+ # It will be formatted as "%Y-%m-%d" or "%Y-%m-%d %H:%M:%S[.%f]"
+ return str(dt)
+
+def _sqlite_regexp(re_pattern, re_string):
+ return bool(re.search(re_pattern, force_text(re_string))) if re_string is not None else False
diff --git a/lib/python2.7/site-packages/django/db/backends/sqlite3/client.py b/lib/python2.7/site-packages/django/db/backends/sqlite3/client.py
new file mode 100644
index 0000000..5b5b732
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/sqlite3/client.py
@@ -0,0 +1,16 @@
+import os
+import sys
+
+from django.db.backends import BaseDatabaseClient
+
+class DatabaseClient(BaseDatabaseClient):
+ executable_name = 'sqlite3'
+
+ def runshell(self):
+ args = [self.executable_name,
+ self.connection.settings_dict['NAME']]
+ if os.name == 'nt':
+ sys.exit(os.system(" ".join(args)))
+ else:
+ os.execvp(self.executable_name, args)
+
diff --git a/lib/python2.7/site-packages/django/db/backends/sqlite3/creation.py b/lib/python2.7/site-packages/django/db/backends/sqlite3/creation.py
new file mode 100644
index 0000000..a9fb273
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/sqlite3/creation.py
@@ -0,0 +1,88 @@
+import os
+import sys
+from django.db.backends.creation import BaseDatabaseCreation
+from django.utils.six.moves import input
+
+class DatabaseCreation(BaseDatabaseCreation):
+ # SQLite doesn't actually support most of these types, but it "does the right
+ # thing" given more verbose field definitions, so leave them as is so that
+ # schema inspection is more useful.
+ data_types = {
+ 'AutoField': 'integer',
+ 'BinaryField': 'BLOB',
+ 'BooleanField': 'bool',
+ 'CharField': 'varchar(%(max_length)s)',
+ 'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
+ 'DateField': 'date',
+ 'DateTimeField': 'datetime',
+ 'DecimalField': 'decimal',
+ 'FileField': 'varchar(%(max_length)s)',
+ 'FilePathField': 'varchar(%(max_length)s)',
+ 'FloatField': 'real',
+ 'IntegerField': 'integer',
+ 'BigIntegerField': 'bigint',
+ 'IPAddressField': 'char(15)',
+ 'GenericIPAddressField': 'char(39)',
+ 'NullBooleanField': 'bool',
+ 'OneToOneField': 'integer',
+ 'PositiveIntegerField': 'integer unsigned',
+ 'PositiveSmallIntegerField': 'smallint unsigned',
+ 'SlugField': 'varchar(%(max_length)s)',
+ 'SmallIntegerField': 'smallint',
+ 'TextField': 'text',
+ 'TimeField': 'time',
+ }
+
+ def sql_for_pending_references(self, model, style, pending_references):
+ "SQLite3 doesn't support constraints"
+ return []
+
+ def sql_remove_table_constraints(self, model, references_to_delete, style):
+ "SQLite3 doesn't support constraints"
+ return []
+
+ def _get_test_db_name(self):
+ test_database_name = self.connection.settings_dict['TEST_NAME']
+ if test_database_name and test_database_name != ':memory:':
+ return test_database_name
+ return ':memory:'
+
+ def _create_test_db(self, verbosity, autoclobber):
+ test_database_name = self._get_test_db_name()
+ if test_database_name != ':memory:':
+ # Erase the old test database
+ if verbosity >= 1:
+ print("Destroying old test database '%s'..." % self.connection.alias)
+ if os.access(test_database_name, os.F_OK):
+ if not autoclobber:
+ confirm = input("Type 'yes' if you would like to try deleting the test database '%s', or 'no' to cancel: " % test_database_name)
+ if autoclobber or confirm == 'yes':
+ try:
+ os.remove(test_database_name)
+ except Exception as e:
+ sys.stderr.write("Got an error deleting the old test database: %s\n" % e)
+ sys.exit(2)
+ else:
+ print("Tests cancelled.")
+ sys.exit(1)
+ return test_database_name
+
+ def _destroy_test_db(self, test_database_name, verbosity):
+ if test_database_name and test_database_name != ":memory:":
+ # Remove the SQLite database file
+ os.remove(test_database_name)
+
+ def test_db_signature(self):
+ """
+ Returns a tuple that uniquely identifies a test database.
+
+ This takes into account the special cases of ":memory:" and "" for
+ SQLite since the databases will be distinct despite having the same
+ TEST_NAME. See http://www.sqlite.org/inmemorydb.html
+ """
+ settings_dict = self.connection.settings_dict
+ test_dbname = self._get_test_db_name()
+ sig = [self.connection.settings_dict['NAME']]
+ if test_dbname == ':memory:':
+ sig.append(self.connection.alias)
+ return tuple(sig)
diff --git a/lib/python2.7/site-packages/django/db/backends/sqlite3/introspection.py b/lib/python2.7/site-packages/django/db/backends/sqlite3/introspection.py
new file mode 100644
index 0000000..431e112
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/sqlite3/introspection.py
@@ -0,0 +1,185 @@
+import re
+from django.db.backends import BaseDatabaseIntrospection, FieldInfo
+
+field_size_re = re.compile(r'^\s*(?:var)?char\s*\(\s*(\d+)\s*\)\s*$')
+
+def get_field_size(name):
+ """ Extract the size number from a "varchar(11)" type name """
+ m = field_size_re.search(name)
+ return int(m.group(1)) if m else None
+
+
+# This light wrapper "fakes" a dictionary interface, because some SQLite data
+# types include variables in them -- e.g. "varchar(30)" -- and can't be matched
+# as a simple dictionary lookup.
+class FlexibleFieldLookupDict(object):
+ # Maps SQL types to Django Field types. Some of the SQL types have multiple
+ # entries here because SQLite allows for anything and doesn't normalize the
+ # field type; it uses whatever was given.
+ base_data_types_reverse = {
+ 'bool': 'BooleanField',
+ 'boolean': 'BooleanField',
+ 'smallint': 'SmallIntegerField',
+ 'smallint unsigned': 'PositiveSmallIntegerField',
+ 'smallinteger': 'SmallIntegerField',
+ 'int': 'IntegerField',
+ 'integer': 'IntegerField',
+ 'bigint': 'BigIntegerField',
+ 'integer unsigned': 'PositiveIntegerField',
+ 'decimal': 'DecimalField',
+ 'real': 'FloatField',
+ 'text': 'TextField',
+ 'char': 'CharField',
+ 'blob': 'BinaryField',
+ 'date': 'DateField',
+ 'datetime': 'DateTimeField',
+ 'time': 'TimeField',
+ }
+
+ def __getitem__(self, key):
+ key = key.lower()
+ try:
+ return self.base_data_types_reverse[key]
+ except KeyError:
+ size = get_field_size(key)
+ if size is not None:
+ return ('CharField', {'max_length': size})
+ raise KeyError
+
+class DatabaseIntrospection(BaseDatabaseIntrospection):
+ data_types_reverse = FlexibleFieldLookupDict()
+
+ def get_table_list(self, cursor):
+ "Returns a list of table names in the current database."
+ # Skip the sqlite_sequence system table used for autoincrement key
+ # generation.
+ cursor.execute("""
+ SELECT name FROM sqlite_master
+ WHERE type='table' AND NOT name='sqlite_sequence'
+ ORDER BY name""")
+ return [row[0] for row in cursor.fetchall()]
+
+ def get_table_description(self, cursor, table_name):
+ "Returns a description of the table, with the DB-API cursor.description interface."
+ return [FieldInfo(info['name'], info['type'], None, info['size'], None, None,
+ info['null_ok']) for info in self._table_info(cursor, table_name)]
+
+ def get_relations(self, cursor, table_name):
+ """
+ Returns a dictionary of {field_index: (field_index_other_table, other_table)}
+ representing all relationships to the given table. Indexes are 0-based.
+ """
+
+ # Dictionary of relations to return
+ relations = {}
+
+ # Schema for this table
+ cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
+ results = cursor.fetchone()[0].strip()
+ results = results[results.index('(')+1:results.rindex(')')]
+
+ # Walk through and look for references to other tables. SQLite doesn't
+ # really have enforced references, but since it echoes out the SQL used
+ # to create the table we can look for REFERENCES statements used there.
+ for field_index, field_desc in enumerate(results.split(',')):
+ field_desc = field_desc.strip()
+ if field_desc.startswith("UNIQUE"):
+ continue
+
+ m = re.search('references (.*) \(["|](.*)["|]\)', field_desc, re.I)
+ if not m:
+ continue
+
+ table, column = [s.strip('"') for s in m.groups()]
+
+ cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s", [table])
+ result = cursor.fetchall()[0]
+ other_table_results = result[0].strip()
+ li, ri = other_table_results.index('('), other_table_results.rindex(')')
+ other_table_results = other_table_results[li+1:ri]
+
+
+ for other_index, other_desc in enumerate(other_table_results.split(',')):
+ other_desc = other_desc.strip()
+ if other_desc.startswith('UNIQUE'):
+ continue
+
+ name = other_desc.split(' ', 1)[0].strip('"')
+ if name == column:
+ relations[field_index] = (other_index, table)
+ break
+
+ return relations
+
+ def get_key_columns(self, cursor, table_name):
+ """
+ Returns a list of (column_name, referenced_table_name, referenced_column_name) for all
+ key columns in given table.
+ """
+ key_columns = []
+
+ # Schema for this table
+ cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
+ results = cursor.fetchone()[0].strip()
+ results = results[results.index('(')+1:results.rindex(')')]
+
+ # Walk through and look for references to other tables. SQLite doesn't
+ # really have enforced references, but since it echoes out the SQL used
+ # to create the table we can look for REFERENCES statements used there.
+ for field_index, field_desc in enumerate(results.split(',')):
+ field_desc = field_desc.strip()
+ if field_desc.startswith("UNIQUE"):
+ continue
+
+ m = re.search('"(.*)".*references (.*) \(["|](.*)["|]\)', field_desc, re.I)
+ if not m:
+ continue
+
+ # This will append (column_name, referenced_table_name, referenced_column_name) to key_columns
+ key_columns.append(tuple([s.strip('"') for s in m.groups()]))
+
+ return key_columns
+
+ def get_indexes(self, cursor, table_name):
+ indexes = {}
+ for info in self._table_info(cursor, table_name):
+ if info['pk'] != 0:
+ indexes[info['name']] = {'primary_key': True,
+ 'unique': False}
+ cursor.execute('PRAGMA index_list(%s)' % self.connection.ops.quote_name(table_name))
+ # seq, name, unique
+ for index, unique in [(field[1], field[2]) for field in cursor.fetchall()]:
+ cursor.execute('PRAGMA index_info(%s)' % self.connection.ops.quote_name(index))
+ info = cursor.fetchall()
+ # Skip indexes across multiple fields
+ if len(info) != 1:
+ continue
+ name = info[0][2] # seqno, cid, name
+ indexes[name] = {'primary_key': False,
+ 'unique': unique}
+ return indexes
+
+ def get_primary_key_column(self, cursor, table_name):
+ """
+ Get the column name of the primary key for the given table.
+ """
+ # Don't use PRAGMA because that causes issues with some transactions
+ cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
+ results = cursor.fetchone()[0].strip()
+ results = results[results.index('(')+1:results.rindex(')')]
+ for field_desc in results.split(','):
+ field_desc = field_desc.strip()
+ m = re.search('"(.*)".*PRIMARY KEY$', field_desc)
+ if m:
+ return m.groups()[0]
+ return None
+
+ def _table_info(self, cursor, name):
+ cursor.execute('PRAGMA table_info(%s)' % self.connection.ops.quote_name(name))
+ # cid, name, type, notnull, dflt_value, pk
+ return [{'name': field[1],
+ 'type': field[2],
+ 'size': get_field_size(field[2]),
+ 'null_ok': not field[3],
+ 'pk': field[5] # undocumented
+ } for field in cursor.fetchall()]
diff --git a/lib/python2.7/site-packages/django/db/backends/util.py b/lib/python2.7/site-packages/django/db/backends/util.py
new file mode 100644
index 0000000..2820007
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/backends/util.py
@@ -0,0 +1,179 @@
+from __future__ import unicode_literals
+
+import datetime
+import decimal
+import hashlib
+import logging
+from time import time
+
+from django.conf import settings
+from django.utils.encoding import force_bytes
+from django.utils.timezone import utc
+
+
+logger = logging.getLogger('django.db.backends')
+
+
+class CursorWrapper(object):
+ def __init__(self, cursor, db):
+ self.cursor = cursor
+ self.db = db
+
+ WRAP_ERROR_ATTRS = frozenset(['fetchone', 'fetchmany', 'fetchall', 'nextset'])
+
+ def __getattr__(self, attr):
+ cursor_attr = getattr(self.cursor, attr)
+ if attr in CursorWrapper.WRAP_ERROR_ATTRS:
+ return self.db.wrap_database_errors(cursor_attr)
+ else:
+ return cursor_attr
+
+ def __iter__(self):
+ return iter(self.cursor)
+
+ # The following methods cannot be implemented in __getattr__, because the
+ # code must run when the method is invoked, not just when it is accessed.
+
+ def callproc(self, procname, params=None):
+ self.db.validate_no_broken_transaction()
+ self.db.set_dirty()
+ with self.db.wrap_database_errors:
+ if params is None:
+ return self.cursor.callproc(procname)
+ else:
+ return self.cursor.callproc(procname, params)
+
+ def execute(self, sql, params=None):
+ self.db.validate_no_broken_transaction()
+ self.db.set_dirty()
+ with self.db.wrap_database_errors:
+ if params is None:
+ return self.cursor.execute(sql)
+ else:
+ return self.cursor.execute(sql, params)
+
+ def executemany(self, sql, param_list):
+ self.db.validate_no_broken_transaction()
+ self.db.set_dirty()
+ with self.db.wrap_database_errors:
+ return self.cursor.executemany(sql, param_list)
+
+
+class CursorDebugWrapper(CursorWrapper):
+
+ # XXX callproc isn't instrumented at this time.
+
+ def execute(self, sql, params=None):
+ start = time()
+ try:
+ return super(CursorDebugWrapper, self).execute(sql, params)
+ finally:
+ stop = time()
+ duration = stop - start
+ sql = self.db.ops.last_executed_query(self.cursor, sql, params)
+ self.db.queries.append({
+ 'sql': sql,
+ 'time': "%.3f" % duration,
+ })
+ logger.debug('(%.3f) %s; args=%s' % (duration, sql, params),
+ extra={'duration': duration, 'sql': sql, 'params': params}
+ )
+
+ def executemany(self, sql, param_list):
+ start = time()
+ try:
+ return super(CursorDebugWrapper, self).executemany(sql, param_list)
+ finally:
+ stop = time()
+ duration = stop - start
+ try:
+ times = len(param_list)
+ except TypeError: # param_list could be an iterator
+ times = '?'
+ self.db.queries.append({
+ 'sql': '%s times: %s' % (times, sql),
+ 'time': "%.3f" % duration,
+ })
+ logger.debug('(%.3f) %s; args=%s' % (duration, sql, param_list),
+ extra={'duration': duration, 'sql': sql, 'params': param_list}
+ )
+
+
+###############################################
+# Converters from database (string) to Python #
+###############################################
+
+def typecast_date(s):
+ return datetime.date(*map(int, s.split('-'))) if s else None # returns None if s is null
+
+def typecast_time(s): # does NOT store time zone information
+ if not s: return None
+ hour, minutes, seconds = s.split(':')
+ if '.' in seconds: # check whether seconds have a fractional part
+ seconds, microseconds = seconds.split('.')
+ else:
+ microseconds = '0'
+ return datetime.time(int(hour), int(minutes), int(seconds), int(float('.'+microseconds) * 1000000))
+
+def typecast_timestamp(s): # does NOT store time zone information
+ # "2005-07-29 15:48:00.590358-05"
+ # "2005-07-29 09:56:00-05"
+ if not s: return None
+ if not ' ' in s: return typecast_date(s)
+ d, t = s.split()
+ # Extract timezone information, if it exists. Currently we just throw
+ # it away, but in the future we may make use of it.
+ if '-' in t:
+ t, tz = t.split('-', 1)
+ tz = '-' + tz
+ elif '+' in t:
+ t, tz = t.split('+', 1)
+ tz = '+' + tz
+ else:
+ tz = ''
+ dates = d.split('-')
+ times = t.split(':')
+ seconds = times[2]
+ if '.' in seconds: # check whether seconds have a fractional part
+ seconds, microseconds = seconds.split('.')
+ else:
+ microseconds = '0'
+ tzinfo = utc if settings.USE_TZ else None
+ return datetime.datetime(int(dates[0]), int(dates[1]), int(dates[2]),
+ int(times[0]), int(times[1]), int(seconds),
+ int((microseconds + '000000')[:6]), tzinfo)
+
+def typecast_decimal(s):
+ if s is None or s == '':
+ return None
+ return decimal.Decimal(s)
+
+###############################################
+# Converters from Python to database (string) #
+###############################################
+
+def rev_typecast_decimal(d):
+ if d is None:
+ return None
+ return str(d)
+
+def truncate_name(name, length=None, hash_len=4):
+ """Shortens a string to a repeatable mangled version with the given length.
+ """
+ if length is None or len(name) <= length:
+ return name
+
+ hsh = hashlib.md5(force_bytes(name)).hexdigest()[:hash_len]
+ return '%s%s' % (name[:length-hash_len], hsh)
+
+def format_number(value, max_digits, decimal_places):
+ """
+ Formats a number into a string with the requisite number of digits and
+ decimal places.
+ """
+ if isinstance(value, decimal.Decimal):
+ context = decimal.getcontext().copy()
+ context.prec = max_digits
+ return "{0:f}".format(value.quantize(decimal.Decimal(".1") ** decimal_places, context=context))
+ else:
+ return "%.*f" % (decimal_places, value)
diff --git a/lib/python2.7/site-packages/django/db/models/__init__.py b/lib/python2.7/site-packages/django/db/models/__init__.py
new file mode 100644
index 0000000..b5dd1a5
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/models/__init__.py
@@ -0,0 +1,33 @@
+from functools import wraps
+
+from django.core.exceptions import ObjectDoesNotExist, ImproperlyConfigured
+from django.db.models.loading import get_apps, get_app_paths, get_app, get_models, get_model, register_models, UnavailableApp
+from django.db.models.query import Q
+from django.db.models.expressions import F
+from django.db.models.manager import Manager
+from django.db.models.base import Model
+from django.db.models.aggregates import *
+from django.db.models.fields import *
+from django.db.models.fields.subclassing import SubfieldBase
+from django.db.models.fields.files import FileField, ImageField
+from django.db.models.fields.related import ForeignKey, ForeignObject, OneToOneField, ManyToManyField, ManyToOneRel, ManyToManyRel, OneToOneRel
+from django.db.models.deletion import CASCADE, PROTECT, SET, SET_NULL, SET_DEFAULT, DO_NOTHING, ProtectedError
+from django.db.models import signals
+
+
+def permalink(func):
+ """
+ Decorator that calls urlresolvers.reverse() to return a URL using
+ parameters returned by the decorated function "func".
+
+ "func" should be a function that returns a tuple in one of the
+ following formats:
+ (viewname, viewargs)
+ (viewname, viewargs, viewkwargs)
+ """
+ from django.core.urlresolvers import reverse
+ @wraps(func)
+ def inner(*args, **kwargs):
+ bits = func(*args, **kwargs)
+ return reverse(bits[0], None, *bits[1:3])
+ return inner
diff --git a/lib/python2.7/site-packages/django/db/models/aggregates.py b/lib/python2.7/site-packages/django/db/models/aggregates.py
new file mode 100644
index 0000000..b89db1c
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/models/aggregates.py
@@ -0,0 +1,80 @@
+"""
+Classes to represent the definitions of aggregate functions.
+"""
+from django.db.models.constants import LOOKUP_SEP
+
+def refs_aggregate(lookup_parts, aggregates):
+ """
+ A little helper method to check if the lookup_parts contains references
+ to the given aggregates set. Because the LOOKUP_SEP is contained in the
+ default annotation names we must check each prefix of the lookup_parts
+ for match.
+ """
+ for i in range(len(lookup_parts) + 1):
+ if LOOKUP_SEP.join(lookup_parts[0:i]) in aggregates:
+ return True
+ return False
+
+class Aggregate(object):
+ """
+ Default Aggregate definition.
+ """
+ def __init__(self, lookup, **extra):
+ """Instantiate a new aggregate.
+
+ * lookup is the field on which the aggregate operates.
+ * extra is a dictionary of additional data to provide for the
+ aggregate definition
+
+ Also utilizes the class variables:
+ * name, the identifier for this aggregate function.
+ """
+ self.lookup = lookup
+ self.extra = extra
+
+ def _default_alias(self):
+ return '%s__%s' % (self.lookup, self.name.lower())
+ default_alias = property(_default_alias)
+
+ def add_to_query(self, query, alias, col, source, is_summary):
+ """Add the aggregate to the nominated query.
+
+ This method is used to convert the generic Aggregate definition into a
+ backend-specific definition.
+
+ * query is the backend-specific query instance to which the aggregate
+ is to be added.
+ * col is a column reference describing the subject field
+ of the aggregate. It can be an alias, or a tuple describing
+ a table and column name.
+ * source is the underlying field or aggregate definition for
+ the column reference. If the aggregate is not an ordinal or
+ computed type, this reference is used to determine the coerced
+ output type of the aggregate.
+ * is_summary is a boolean that is set True if the aggregate is a
+ summary value rather than an annotation.
+ """
+ klass = getattr(query.aggregates_module, self.name)
+ aggregate = klass(col, source=source, is_summary=is_summary, **self.extra)
+ query.aggregates[alias] = aggregate
+
+class Avg(Aggregate):
+ name = 'Avg'
+
+class Count(Aggregate):
+ name = 'Count'
+
+class Max(Aggregate):
+ name = 'Max'
+
+class Min(Aggregate):
+ name = 'Min'
+
+class StdDev(Aggregate):
+ name = 'StdDev'
+
+class Sum(Aggregate):
+ name = 'Sum'
+
+class Variance(Aggregate):
+ name = 'Variance'
diff --git a/lib/python2.7/site-packages/django/db/models/base.py b/lib/python2.7/site-packages/django/db/models/base.py
new file mode 100644
index 0000000..f6001b4
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/models/base.py
@@ -0,0 +1,1042 @@
+from __future__ import unicode_literals
+
+import copy
+import sys
+from functools import update_wrapper
+from django.utils.six.moves import zip
+
+import django.db.models.manager # Imported to register signal handler.
+from django.conf import settings
+from django.core.exceptions import (ObjectDoesNotExist,
+ MultipleObjectsReturned, FieldError, ValidationError, NON_FIELD_ERRORS)
+from django.db.models.fields import AutoField, FieldDoesNotExist
+from django.db.models.fields.related import (ForeignObjectRel, ManyToOneRel,
+ OneToOneField, add_lazy_relation)
+from django.db import (router, transaction, DatabaseError,
+ DEFAULT_DB_ALIAS)
+from django.db.models.query import Q
+from django.db.models.query_utils import DeferredAttribute, deferred_class_factory
+from django.db.models.deletion import Collector
+from django.db.models.options import Options
+from django.db.models import signals
+from django.db.models.loading import register_models, get_model
+from django.utils.translation import ugettext_lazy as _
+from django.utils.functional import curry
+from django.utils.encoding import force_str, force_text
+from django.utils import six
+from django.utils.text import get_text_list, capfirst
+
+
+def subclass_exception(name, parents, module, attached_to=None):
+ """
+ Create exception subclass. Used by ModelBase below.
+
+ If 'attached_to' is supplied, the exception will be created in a way that
+ allows it to be pickled, assuming the returned exception class will be added
+ as an attribute to the 'attached_to' class.
+ """
+ class_dict = {'__module__': module}
+ if attached_to is not None:
+ def __reduce__(self):
+ # Exceptions are special - they've got state that isn't
+ # in self.__dict__. We assume it is all in self.args.
+ return (unpickle_inner_exception, (attached_to, name), self.args)
+
+ def __setstate__(self, args):
+ self.args = args
+
+ class_dict['__reduce__'] = __reduce__
+ class_dict['__setstate__'] = __setstate__
+
+ return type(name, parents, class_dict)
+
+
+class ModelBase(type):
+ """
+ Metaclass for all models.
+ """
+ def __new__(cls, name, bases, attrs):
+ super_new = super(ModelBase, cls).__new__
+
+ # six.with_metaclass() inserts an extra class called 'NewBase' in the
+ # inheritance tree: Model -> NewBase -> object. But the initialization
+ # should be executed only once for a given model class.
+
+ # attrs will never be empty for classes declared in the standard way
+ # (ie. with the `class` keyword). This is quite robust.
+ if name == 'NewBase' and attrs == {}:
+ return super_new(cls, name, bases, attrs)
+
+ # Also ensure initialization is only performed for subclasses of Model
+ # (excluding Model class itself).
+ parents = [b for b in bases if isinstance(b, ModelBase) and
+ not (b.__name__ == 'NewBase' and b.__mro__ == (b, object))]
+ if not parents:
+ return super_new(cls, name, bases, attrs)
+
+ # Create the class.
+ module = attrs.pop('__module__')
+ new_class = super_new(cls, name, bases, {'__module__': module})
+ attr_meta = attrs.pop('Meta', None)
+ abstract = getattr(attr_meta, 'abstract', False)
+ if not attr_meta:
+ meta = getattr(new_class, 'Meta', None)
+ else:
+ meta = attr_meta
+ base_meta = getattr(new_class, '_meta', None)
+
+ if getattr(meta, 'app_label', None) is None:
+ # Figure out the app_label by looking one level up.
+ # For 'django.contrib.sites.models', this would be 'sites'.
+ model_module = sys.modules[new_class.__module__]
+ kwargs = {"app_label": model_module.__name__.split('.')[-2]}
+ else:
+ kwargs = {}
+
+ new_class.add_to_class('_meta', Options(meta, **kwargs))
+ if not abstract:
+ new_class.add_to_class('DoesNotExist', subclass_exception(str('DoesNotExist'),
+ tuple(x.DoesNotExist
+ for x in parents if hasattr(x, '_meta') and not x._meta.abstract)
+ or (ObjectDoesNotExist,),
+ module, attached_to=new_class))
+ new_class.add_to_class('MultipleObjectsReturned', subclass_exception(str('MultipleObjectsReturned'),
+ tuple(x.MultipleObjectsReturned
+ for x in parents if hasattr(x, '_meta') and not x._meta.abstract)
+ or (MultipleObjectsReturned,),
+ module, attached_to=new_class))
+ if base_meta and not base_meta.abstract:
+ # Non-abstract child classes inherit some attributes from their
+ # non-abstract parent (unless an ABC comes before it in the
+ # method resolution order).
+ if not hasattr(meta, 'ordering'):
+ new_class._meta.ordering = base_meta.ordering
+ if not hasattr(meta, 'get_latest_by'):
+ new_class._meta.get_latest_by = base_meta.get_latest_by
+
+ is_proxy = new_class._meta.proxy
+
+ # If the model is a proxy, ensure that the base class
+ # hasn't been swapped out.
+ if is_proxy and base_meta and base_meta.swapped:
+ raise TypeError("%s cannot proxy the swapped model '%s'." % (name, base_meta.swapped))
+
+ if getattr(new_class, '_default_manager', None):
+ if not is_proxy:
+ # Multi-table inheritance doesn't inherit default manager from
+ # parents.
+ new_class._default_manager = None
+ new_class._base_manager = None
+ else:
+ # Proxy classes do inherit parent's default manager, if none is
+ # set explicitly.
+ new_class._default_manager = new_class._default_manager._copy_to_model(new_class)
+ new_class._base_manager = new_class._base_manager._copy_to_model(new_class)
+
+ # Bail out early if we have already created this class.
+ m = get_model(new_class._meta.app_label, name,
+ seed_cache=False, only_installed=False)
+ if m is not None:
+ return m
+
+ # Add all attributes to the class.
+ for obj_name, obj in attrs.items():
+ new_class.add_to_class(obj_name, obj)
+
+ # All the fields of any type declared on this model
+ new_fields = new_class._meta.local_fields + \
+ new_class._meta.local_many_to_many + \
+ new_class._meta.virtual_fields
+ field_names = set([f.name for f in new_fields])
+
+ # Basic setup for proxy models.
+ if is_proxy:
+ base = None
+ for parent in [cls for cls in parents if hasattr(cls, '_meta')]:
+ if parent._meta.abstract:
+ if parent._meta.fields:
+ raise TypeError("Abstract base class containing model fields not permitted for proxy model '%s'." % name)
+ else:
+ continue
+ if base is not None:
+ raise TypeError("Proxy model '%s' has more than one non-abstract model base class." % name)
+ else:
+ base = parent
+ if base is None:
+ raise TypeError("Proxy model '%s' has no non-abstract model base class." % name)
+ if (new_class._meta.local_fields or
+ new_class._meta.local_many_to_many):
+ raise FieldError("Proxy model '%s' contains model fields." % name)
+ new_class._meta.setup_proxy(base)
+ new_class._meta.concrete_model = base._meta.concrete_model
+ else:
+ new_class._meta.concrete_model = new_class
+
+ # Do the appropriate setup for any model parents.
+ o2o_map = dict([(f.rel.to, f) for f in new_class._meta.local_fields
+ if isinstance(f, OneToOneField)])
+
+ for base in parents:
+ original_base = base
+ if not hasattr(base, '_meta'):
+ # Things without _meta aren't functional models, so they're
+ # uninteresting parents.
+ continue
+
+ parent_fields = base._meta.local_fields + base._meta.local_many_to_many
+ # Check for clashes between locally declared fields and those
+ # on the base classes (we cannot handle shadowed fields at the
+ # moment).
+ for field in parent_fields:
+ if field.name in field_names:
+ raise FieldError('Local field %r in class %r clashes '
+ 'with field of similar name from '
+ 'base class %r' %
+ (field.name, name, base.__name__))
+ if not base._meta.abstract:
+ # Concrete classes...
+ base = base._meta.concrete_model
+ if base in o2o_map:
+ field = o2o_map[base]
+ elif not is_proxy:
+ attr_name = '%s_ptr' % base._meta.model_name
+ field = OneToOneField(base, name=attr_name,
+ auto_created=True, parent_link=True)
+ new_class.add_to_class(attr_name, field)
+ else:
+ field = None
+ new_class._meta.parents[base] = field
+ else:
+ # .. and abstract ones.
+ for field in parent_fields:
+ new_class.add_to_class(field.name, copy.deepcopy(field))
+
+ # Pass any non-abstract parent classes onto child.
+ new_class._meta.parents.update(base._meta.parents)
+
+ # Inherit managers from the abstract base classes.
+ new_class.copy_managers(base._meta.abstract_managers)
+
+ # Proxy models inherit the non-abstract managers from their base,
+ # unless they have redefined any of them.
+ if is_proxy:
+ new_class.copy_managers(original_base._meta.concrete_managers)
+
+ # Inherit virtual fields (like GenericForeignKey) from the parent
+ # class
+ for field in base._meta.virtual_fields:
+ if base._meta.abstract and field.name in field_names:
+ raise FieldError('Local field %r in class %r clashes '\
+ 'with field of similar name from '\
+ 'abstract base class %r' % \
+ (field.name, name, base.__name__))
+ new_class.add_to_class(field.name, copy.deepcopy(field))
+
+ if abstract:
+ # Abstract base models can't be instantiated and don't appear in
+ # the list of models for an app. We do the final setup for them a
+ # little differently from normal models.
+ attr_meta.abstract = False
+ new_class.Meta = attr_meta
+ return new_class
+
+ new_class._prepare()
+ register_models(new_class._meta.app_label, new_class)
+
+ # Because of the way imports happen (recursively), we may or may not be
+ # the first time this model tries to register with the framework. There
+ # should only be one class for each model, so we always return the
+ # registered version.
+ return get_model(new_class._meta.app_label, name,
+ seed_cache=False, only_installed=False)
+
+ def copy_managers(cls, base_managers):
+ # This is in-place sorting of an Options attribute, but that's fine.
+ base_managers.sort()
+ for _, mgr_name, manager in base_managers:
+ val = getattr(cls, mgr_name, None)
+ if not val or val is manager:
+ new_manager = manager._copy_to_model(cls)
+ cls.add_to_class(mgr_name, new_manager)
+
+ def add_to_class(cls, name, value):
+ if hasattr(value, 'contribute_to_class'):
+ value.contribute_to_class(cls, name)
+ else:
+ setattr(cls, name, value)
+
+ def _prepare(cls):
+ """
+ Creates some methods once self._meta has been populated.
+ """
+ opts = cls._meta
+ opts._prepare(cls)
+
+ if opts.order_with_respect_to:
+ cls.get_next_in_order = curry(cls._get_next_or_previous_in_order, is_next=True)
+ cls.get_previous_in_order = curry(cls._get_next_or_previous_in_order, is_next=False)
+
+ # defer creating accessors on the foreign class until we are
+ # certain it has been created
+ def make_foreign_order_accessors(field, model, cls):
+ setattr(
+ field.rel.to,
+ 'get_%s_order' % cls.__name__.lower(),
+ curry(method_get_order, cls)
+ )
+ setattr(
+ field.rel.to,
+ 'set_%s_order' % cls.__name__.lower(),
+ curry(method_set_order, cls)
+ )
+ add_lazy_relation(
+ cls,
+ opts.order_with_respect_to,
+ opts.order_with_respect_to.rel.to,
+ make_foreign_order_accessors
+ )
+
+ # Give the class a docstring -- its definition.
+ if cls.__doc__ is None:
+ cls.__doc__ = "%s(%s)" % (cls.__name__, ", ".join([f.attname for f in opts.fields]))
+
+ if hasattr(cls, 'get_absolute_url'):
+ cls.get_absolute_url = update_wrapper(curry(get_absolute_url, opts, cls.get_absolute_url),
+ cls.get_absolute_url)
+
+ signals.class_prepared.send(sender=cls)
+
+
+class ModelState(object):
+ """
+ A class for storing instance state
+ """
+ def __init__(self, db=None):
+ self.db = db
+ # If true, uniqueness validation checks will consider this a new, as-yet-unsaved object.
+ # Necessary for correct validation of new instances of objects with explicit (non-auto) PKs.
+ # This impacts validation only; it has no effect on the actual save.
+ self.adding = True
+
+
+class Model(six.with_metaclass(ModelBase)):
+ _deferred = False
+
+ def __init__(self, *args, **kwargs):
+ signals.pre_init.send(sender=self.__class__, args=args, kwargs=kwargs)
+
+ # Set up the storage for instance state
+ self._state = ModelState()
+
+ # There is a rather weird disparity here; if kwargs, it's set, then args
+ # overrides it. It should be one or the other; don't duplicate the work
+ # The reason for the kwargs check is that standard iterator passes in by
+ # args, and instantiation for iteration is 33% faster.
+ args_len = len(args)
+ if args_len > len(self._meta.concrete_fields):
+ # Daft, but matches old exception sans the err msg.
+ raise IndexError("Number of args exceeds number of fields")
+
+ if not kwargs:
+ fields_iter = iter(self._meta.concrete_fields)
+ # The ordering of the zip calls matter - zip throws StopIteration
+ # when an iter throws it. So if the first iter throws it, the second
+ # is *not* consumed. We rely on this, so don't change the order
+ # without changing the logic.
+ for val, field in zip(args, fields_iter):
+ setattr(self, field.attname, val)
+ else:
+ # Slower, kwargs-ready version.
+ fields_iter = iter(self._meta.fields)
+ for val, field in zip(args, fields_iter):
+ setattr(self, field.attname, val)
+ kwargs.pop(field.name, None)
+ # Maintain compatibility with existing calls.
+ if isinstance(field.rel, ManyToOneRel):
+ kwargs.pop(field.attname, None)
+
+ # Now we're left with the unprocessed fields that *must* come from
+ # keywords, or default.
+
+ for field in fields_iter:
+ is_related_object = False
+ # This slightly odd construct is so that we can access any
+ # data-descriptor object (DeferredAttribute) without triggering its
+ # __get__ method.
+ if (field.attname not in kwargs and
+ (isinstance(self.__class__.__dict__.get(field.attname), DeferredAttribute)
+ or field.column is None)):
+ # This field will be populated on request.
+ continue
+ if kwargs:
+ if isinstance(field.rel, ForeignObjectRel):
+ try:
+ # Assume object instance was passed in.
+ rel_obj = kwargs.pop(field.name)
+ is_related_object = True
+ except KeyError:
+ try:
+ # Object instance wasn't passed in -- must be an ID.
+ val = kwargs.pop(field.attname)
+ except KeyError:
+ val = field.get_default()
+ else:
+ # Object instance was passed in. Special case: You can
+ # pass in "None" for related objects if it's allowed.
+ if rel_obj is None and field.null:
+ val = None
+ else:
+ try:
+ val = kwargs.pop(field.attname)
+ except KeyError:
+ # This is done with an exception rather than the
+ # default argument on pop because we don't want
+ # get_default() to be evaluated, and then not used.
+ # Refs #12057.
+ val = field.get_default()
+ else:
+ val = field.get_default()
+
+ if is_related_object:
+ # If we are passed a related instance, set it using the
+ # field.name instead of field.attname (e.g. "user" instead of
+ # "user_id") so that the object gets properly cached (and type
+ # checked) by the RelatedObjectDescriptor.
+ setattr(self, field.name, rel_obj)
+ else:
+ setattr(self, field.attname, val)
+
+ if kwargs:
+ for prop in list(kwargs):
+ try:
+ if isinstance(getattr(self.__class__, prop), property):
+ setattr(self, prop, kwargs.pop(prop))
+ except AttributeError:
+ pass
+ if kwargs:
+ raise TypeError("'%s' is an invalid keyword argument for this function" % list(kwargs)[0])
+ super(Model, self).__init__()
+ signals.post_init.send(sender=self.__class__, instance=self)
+
+ def __repr__(self):
+ try:
+ u = six.text_type(self)
+ except (UnicodeEncodeError, UnicodeDecodeError):
+ u = '[Bad Unicode data]'
+ return force_str('<%s: %s>' % (self.__class__.__name__, u))
+
+ def __str__(self):
+ if six.PY2 and hasattr(self, '__unicode__'):
+ return force_text(self).encode('utf-8')
+ return '%s object' % self.__class__.__name__
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self._get_pk_val() == other._get_pk_val()
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __hash__(self):
+ return hash(self._get_pk_val())
+
+ def __reduce__(self):
+ """
+ Provides pickling support. Normally, this just dispatches to Python's
+ standard handling. However, for models with deferred field loading, we
+ need to do things manually, as they're dynamically created classes and
+ only module-level classes can be pickled by the default path.
+ """
+ data = self.__dict__
+ if not self._deferred:
+ class_id = self._meta.app_label, self._meta.object_name
+ return model_unpickle, (class_id, [], simple_class_factory), data
+ defers = []
+ for field in self._meta.fields:
+ if isinstance(self.__class__.__dict__.get(field.attname),
+ DeferredAttribute):
+ defers.append(field.attname)
+ model = self._meta.proxy_for_model
+ class_id = model._meta.app_label, model._meta.object_name
+ return (model_unpickle, (class_id, defers, deferred_class_factory), data)
+
+ def _get_pk_val(self, meta=None):
+ if not meta:
+ meta = self._meta
+ return getattr(self, meta.pk.attname)
+
+ def _set_pk_val(self, value):
+ return setattr(self, self._meta.pk.attname, value)
+
+ pk = property(_get_pk_val, _set_pk_val)
+
+ def serializable_value(self, field_name):
+ """
+ Returns the value of the field name for this instance. If the field is
+ a foreign key, returns the id value, instead of the object. If there's
+ no Field object with this name on the model, the model attribute's
+ value is returned directly.
+
+ Used to serialize a field's value (in the serializer, or form output,
+ for example). Normally, you would just access the attribute directly
+ and not use this method.
+ """
+ try:
+ field = self._meta.get_field_by_name(field_name)[0]
+ except FieldDoesNotExist:
+ return getattr(self, field_name)
+ return getattr(self, field.attname)
+
+ def save(self, force_insert=False, force_update=False, using=None,
+ update_fields=None):
+ """
+ Saves the current instance. Override this in a subclass if you want to
+ control the saving process.
+
+ The 'force_insert' and 'force_update' parameters can be used to insist
+ that the "save" must be an SQL insert or update (or equivalent for
+ non-SQL backends), respectively. Normally, they should not be set.
+ """
+ using = using or router.db_for_write(self.__class__, instance=self)
+ if force_insert and (force_update or update_fields):
+ raise ValueError("Cannot force both insert and updating in model saving.")
+
+ if update_fields is not None:
+ # If update_fields is empty, skip the save. We do also check for
+ # no-op saves later on for inheritance cases. This bailout is
+ # still needed for skipping signal sending.
+ if len(update_fields) == 0:
+ return
+
+ update_fields = frozenset(update_fields)
+ field_names = set()
+
+ for field in self._meta.fields:
+ if not field.primary_key:
+ field_names.add(field.name)
+
+ if field.name != field.attname:
+ field_names.add(field.attname)
+
+ non_model_fields = update_fields.difference(field_names)
+
+ if non_model_fields:
+ raise ValueError("The following fields do not exist in this "
+ "model or are m2m fields: %s"
+ % ', '.join(non_model_fields))
+
+ # If saving to the same database, and this model is deferred, then
+ # automatically do a "update_fields" save on the loaded fields.
+ elif not force_insert and self._deferred and using == self._state.db:
+ field_names = set()
+ for field in self._meta.concrete_fields:
+ if not field.primary_key and not hasattr(field, 'through'):
+ field_names.add(field.attname)
+ deferred_fields = [
+ f.attname for f in self._meta.fields
+ if f.attname not in self.__dict__
+ and isinstance(self.__class__.__dict__[f.attname],
+ DeferredAttribute)]
+
+ loaded_fields = field_names.difference(deferred_fields)
+ if loaded_fields:
+ update_fields = frozenset(loaded_fields)
+
+ self.save_base(using=using, force_insert=force_insert,
+ force_update=force_update, update_fields=update_fields)
+ save.alters_data = True
+
+ def save_base(self, raw=False, force_insert=False,
+ force_update=False, using=None, update_fields=None):
+ """
+ Handles the parts of saving which should be done only once per save,
+ yet need to be done in raw saves, too. This includes some sanity
+ checks and signal sending.
+
+ The 'raw' argument is telling save_base not to save any parent
+ models and not to do any changes to the values before save. This
+ is used by fixture loading.
+ """
+ using = using or router.db_for_write(self.__class__, instance=self)
+ assert not (force_insert and (force_update or update_fields))
+ assert update_fields is None or len(update_fields) > 0
+ cls = origin = self.__class__
+ # Skip proxies, but keep the origin as the proxy model.
+ if cls._meta.proxy:
+ cls = cls._meta.concrete_model
+ meta = cls._meta
+ if not meta.auto_created:
+ signals.pre_save.send(sender=origin, instance=self, raw=raw, using=using,
+ update_fields=update_fields)
+ with transaction.commit_on_success_unless_managed(using=using, savepoint=False):
+ if not raw:
+ self._save_parents(cls, using, update_fields)
+ updated = self._save_table(raw, cls, force_insert, force_update, using, update_fields)
+ # Store the database on which the object was saved
+ self._state.db = using
+ # Once saved, this is no longer a to-be-added instance.
+ self._state.adding = False
+
+ # Signal that the save is complete
+ if not meta.auto_created:
+ signals.post_save.send(sender=origin, instance=self, created=(not updated),
+ update_fields=update_fields, raw=raw, using=using)
+
+ save_base.alters_data = True
+
+ def _save_parents(self, cls, using, update_fields):
+ """
+ Saves all the parents of cls using values from self.
+ """
+ meta = cls._meta
+ for parent, field in meta.parents.items():
+ # Make sure the link fields are synced between parent and self.
+ if (field and getattr(self, parent._meta.pk.attname) is None
+ and getattr(self, field.attname) is not None):
+ setattr(self, parent._meta.pk.attname, getattr(self, field.attname))
+ self._save_parents(cls=parent, using=using, update_fields=update_fields)
+ self._save_table(cls=parent, using=using, update_fields=update_fields)
+ # Set the parent's PK value to self.
+ if field:
+ setattr(self, field.attname, self._get_pk_val(parent._meta))
+ # Since we didn't have an instance of the parent handy set
+ # attname directly, bypassing the descriptor. Invalidate
+ # the related object cache, in case it's been accidentally
+ # populated. A fresh instance will be re-built from the
+ # database if necessary.
+ cache_name = field.get_cache_name()
+ if hasattr(self, cache_name):
+ delattr(self, cache_name)
+
+ def _save_table(self, raw=False, cls=None, force_insert=False,
+ force_update=False, using=None, update_fields=None):
+ """
+ Does the heavy-lifting involved in saving. Updates or inserts the data
+ for a single table.
+ """
+ meta = cls._meta
+ non_pks = [f for f in meta.local_concrete_fields if not f.primary_key]
+
+ if update_fields:
+ non_pks = [f for f in non_pks
+ if f.name in update_fields or f.attname in update_fields]
+
+ pk_val = self._get_pk_val(meta)
+ pk_set = pk_val is not None
+ if not pk_set and (force_update or update_fields):
+ raise ValueError("Cannot force an update in save() with no primary key.")
+ updated = False
+ # If possible, try an UPDATE. If that doesn't update anything, do an INSERT.
+ if pk_set and not force_insert:
+ base_qs = cls._base_manager.using(using)
+ values = [(f, None, (getattr(self, f.attname) if raw else f.pre_save(self, False)))
+ for f in non_pks]
+ forced_update = update_fields or force_update
+ updated = self._do_update(base_qs, using, pk_val, values, update_fields,
+ forced_update)
+ if force_update and not updated:
+ raise DatabaseError("Forced update did not affect any rows.")
+ if update_fields and not updated:
+ raise DatabaseError("Save with update_fields did not affect any rows.")
+ if not updated:
+ if meta.order_with_respect_to:
+ # If this is a model with an order_with_respect_to
+ # autopopulate the _order field
+ field = meta.order_with_respect_to
+ order_value = cls._base_manager.using(using).filter(
+ **{field.name: getattr(self, field.attname)}).count()
+ self._order = order_value
+
+ fields = meta.local_concrete_fields
+ if not pk_set:
+ fields = [f for f in fields if not isinstance(f, AutoField)]
+
+ update_pk = bool(meta.has_auto_field and not pk_set)
+ result = self._do_insert(cls._base_manager, using, fields, update_pk, raw)
+ if update_pk:
+ setattr(self, meta.pk.attname, result)
+ return updated
+
+ def _do_update(self, base_qs, using, pk_val, values, update_fields, forced_update):
+ """
+ This method will try to update the model. If the model was updated (in
+ the sense that an update query was done and a matching row was found
+ from the DB) the method will return True.
+ """
+ filtered = base_qs.filter(pk=pk_val)
+ if not values:
+ # We can end up here when saving a model in inheritance chain where
+ # update_fields doesn't target any field in current model. In that
+ # case we just say the update succeeded. Another case ending up here
+ # is a model with just PK - in that case check that the PK still
+ # exists.
+ return update_fields is not None or filtered.exists()
+ if self._meta.select_on_save and not forced_update:
+ if filtered.exists():
+ filtered._update(values)
+ return True
+ else:
+ return False
+ return filtered._update(values) > 0
+
+ def _do_insert(self, manager, using, fields, update_pk, raw):
+ """
+ Do an INSERT. If update_pk is defined then this method should return
+ the new pk for the model.
+ """
+ return manager._insert([self], fields=fields, return_id=update_pk,
+ using=using, raw=raw)
+
+ def delete(self, using=None):
+ using = using or router.db_for_write(self.__class__, instance=self)
+ assert self._get_pk_val() is not None, "%s object can't be deleted because its %s attribute is set to None." % (self._meta.object_name, self._meta.pk.attname)
+
+ collector = Collector(using=using)
+ collector.collect([self])
+ collector.delete()
+
+ delete.alters_data = True
+
+ def _get_FIELD_display(self, field):
+ value = getattr(self, field.attname)
+ return force_text(dict(field.flatchoices).get(value, value), strings_only=True)
+
+ def _get_next_or_previous_by_FIELD(self, field, is_next, **kwargs):
+ if not self.pk:
+ raise ValueError("get_next/get_previous cannot be used on unsaved objects.")
+ op = 'gt' if is_next else 'lt'
+ order = '' if is_next else '-'
+ param = force_text(getattr(self, field.attname))
+ q = Q(**{'%s__%s' % (field.name, op): param})
+ q = q | Q(**{field.name: param, 'pk__%s' % op: self.pk})
+ qs = self.__class__._default_manager.using(self._state.db).filter(**kwargs).filter(q).order_by('%s%s' % (order, field.name), '%spk' % order)
+ try:
+ return qs[0]
+ except IndexError:
+ raise self.DoesNotExist("%s matching query does not exist." % self.__class__._meta.object_name)
+
+ def _get_next_or_previous_in_order(self, is_next):
+ cachename = "__%s_order_cache" % is_next
+ if not hasattr(self, cachename):
+ op = 'gt' if is_next else 'lt'
+ order = '_order' if is_next else '-_order'
+ order_field = self._meta.order_with_respect_to
+ obj = self._default_manager.filter(**{
+ order_field.name: getattr(self, order_field.attname)
+ }).filter(**{
+ '_order__%s' % op: self._default_manager.values('_order').filter(**{
+ self._meta.pk.name: self.pk
+ })
+ }).order_by(order)[:1].get()
+ setattr(self, cachename, obj)
+ return getattr(self, cachename)
+
+ def prepare_database_save(self, unused):
+ if self.pk is None:
+ raise ValueError("Unsaved model instance %r cannot be used in an ORM query." % self)
+ return self.pk
+
+ def clean(self):
+ """
+ Hook for doing any extra model-wide validation after clean() has been
+ called on every field by self.clean_fields. Any ValidationError raised
+ by this method will not be associated with a particular field; it will
+ have a special-case association with the field defined by NON_FIELD_ERRORS.
+ """
+ pass
+
+ def validate_unique(self, exclude=None):
+ """
+ Checks unique constraints on the model and raises ``ValidationError``
+ if any failed.
+ """
+ unique_checks, date_checks = self._get_unique_checks(exclude=exclude)
+
+ errors = self._perform_unique_checks(unique_checks)
+ date_errors = self._perform_date_checks(date_checks)
+
+ for k, v in date_errors.items():
+ errors.setdefault(k, []).extend(v)
+
+ if errors:
+ raise ValidationError(errors)
+
+ def _get_unique_checks(self, exclude=None):
+ """
+ Gather a list of checks to perform. Since validate_unique could be
+ called from a ModelForm, some fields may have been excluded; we can't
+ perform a unique check on a model that is missing fields involved
+ in that check.
+ Fields that did not validate should also be excluded, but they need
+ to be passed in via the exclude argument.
+ """
+ if exclude is None:
+ exclude = []
+ unique_checks = []
+
+ unique_togethers = [(self.__class__, self._meta.unique_together)]
+ for parent_class in self._meta.parents.keys():
+ if parent_class._meta.unique_together:
+ unique_togethers.append((parent_class, parent_class._meta.unique_together))
+
+ for model_class, unique_together in unique_togethers:
+ for check in unique_together:
+ for name in check:
+ # If this is an excluded field, don't add this check.
+ if name in exclude:
+ break
+ else:
+ unique_checks.append((model_class, tuple(check)))
+
+ # These are checks for the unique_for_<date/year/month>.
+ date_checks = []
+
+ # Gather a list of checks for fields declared as unique and add them to
+ # the list of checks.
+
+ fields_with_class = [(self.__class__, self._meta.local_fields)]
+ for parent_class in self._meta.parents.keys():
+ fields_with_class.append((parent_class, parent_class._meta.local_fields))
+
+ for model_class, fields in fields_with_class:
+ for f in fields:
+ name = f.name
+ if name in exclude:
+ continue
+ if f.unique:
+ unique_checks.append((model_class, (name,)))
+ if f.unique_for_date and f.unique_for_date not in exclude:
+ date_checks.append((model_class, 'date', name, f.unique_for_date))
+ if f.unique_for_year and f.unique_for_year not in exclude:
+ date_checks.append((model_class, 'year', name, f.unique_for_year))
+ if f.unique_for_month and f.unique_for_month not in exclude:
+ date_checks.append((model_class, 'month', name, f.unique_for_month))
+ return unique_checks, date_checks
+
+ def _perform_unique_checks(self, unique_checks):
+ errors = {}
+
+ for model_class, unique_check in unique_checks:
+ # Try to look up an existing object with the same values as this
+ # object's values for all the unique field.
+
+ lookup_kwargs = {}
+ for field_name in unique_check:
+ f = self._meta.get_field(field_name)
+ lookup_value = getattr(self, f.attname)
+ if lookup_value is None:
+ # no value, skip the lookup
+ continue
+ if f.primary_key and not self._state.adding:
+ # no need to check for unique primary key when editing
+ continue
+ lookup_kwargs[str(field_name)] = lookup_value
+
+ # some fields were skipped, no reason to do the check
+ if len(unique_check) != len(lookup_kwargs):
+ continue
+
+ qs = model_class._default_manager.filter(**lookup_kwargs)
+
+ # Exclude the current object from the query if we are editing an
+ # instance (as opposed to creating a new one)
+ # Note that we need to use the pk as defined by model_class, not
+ # self.pk. These can be different fields because model inheritance
+ # allows single model to have effectively multiple primary keys.
+ # Refs #17615.
+ model_class_pk = self._get_pk_val(model_class._meta)
+ if not self._state.adding and model_class_pk is not None:
+ qs = qs.exclude(pk=model_class_pk)
+ if qs.exists():
+ if len(unique_check) == 1:
+ key = unique_check[0]
+ else:
+ key = NON_FIELD_ERRORS
+ errors.setdefault(key, []).append(self.unique_error_message(model_class, unique_check))
+
+ return errors
+
+ def _perform_date_checks(self, date_checks):
+ errors = {}
+ for model_class, lookup_type, field, unique_for in date_checks:
+ lookup_kwargs = {}
+ # there's a ticket to add a date lookup, we can remove this special
+ # case if that makes it's way in
+ date = getattr(self, unique_for)
+ if date is None:
+ continue
+ if lookup_type == 'date':
+ lookup_kwargs['%s__day' % unique_for] = date.day
+ lookup_kwargs['%s__month' % unique_for] = date.month
+ lookup_kwargs['%s__year' % unique_for] = date.year
+ else:
+ lookup_kwargs['%s__%s' % (unique_for, lookup_type)] = getattr(date, lookup_type)
+ lookup_kwargs[field] = getattr(self, field)
+
+ qs = model_class._default_manager.filter(**lookup_kwargs)
+ # Exclude the current object from the query if we are editing an
+ # instance (as opposed to creating a new one)
+ if not self._state.adding and self.pk is not None:
+ qs = qs.exclude(pk=self.pk)
+
+ if qs.exists():
+ errors.setdefault(field, []).append(
+ self.date_error_message(lookup_type, field, unique_for)
+ )
+ return errors
+
+ def date_error_message(self, lookup_type, field, unique_for):
+ opts = self._meta
+ return _("%(field_name)s must be unique for %(date_field)s %(lookup)s.") % {
+ 'field_name': six.text_type(capfirst(opts.get_field(field).verbose_name)),
+ 'date_field': six.text_type(capfirst(opts.get_field(unique_for).verbose_name)),
+ 'lookup': lookup_type,
+ }
+
+ def unique_error_message(self, model_class, unique_check):
+ opts = model_class._meta
+ model_name = capfirst(opts.verbose_name)
+
+ # A unique field
+ if len(unique_check) == 1:
+ field_name = unique_check[0]
+ field = opts.get_field(field_name)
+ field_label = capfirst(field.verbose_name)
+ # Insert the error into the error dict, very sneaky
+ return field.error_messages['unique'] % {
+ 'model_name': six.text_type(model_name),
+ 'field_label': six.text_type(field_label)
+ }
+ # unique_together
+ else:
+ field_labels = [capfirst(opts.get_field(f).verbose_name) for f in unique_check]
+ field_labels = get_text_list(field_labels, _('and'))
+ return _("%(model_name)s with this %(field_label)s already exists.") % {
+ 'model_name': six.text_type(model_name),
+ 'field_label': six.text_type(field_labels)
+ }
+
+ def full_clean(self, exclude=None, validate_unique=True):
+ """
+ Calls clean_fields, clean, and validate_unique, on the model,
+ and raises a ``ValidationError`` for any errors that occurred.
+ """
+ errors = {}
+ if exclude is None:
+ exclude = []
+
+ try:
+ self.clean_fields(exclude=exclude)
+ except ValidationError as e:
+ errors = e.update_error_dict(errors)
+
+ # Form.clean() is run even if other validation fails, so do the
+ # same with Model.clean() for consistency.
+ try:
+ self.clean()
+ except ValidationError as e:
+ errors = e.update_error_dict(errors)
+
+ # Run unique checks, but only for fields that passed validation.
+ if validate_unique:
+ for name in errors.keys():
+ if name != NON_FIELD_ERRORS and name not in exclude:
+ exclude.append(name)
+ try:
+ self.validate_unique(exclude=exclude)
+ except ValidationError as e:
+ errors = e.update_error_dict(errors)
+
+ if errors:
+ raise ValidationError(errors)
+
+ def clean_fields(self, exclude=None):
+ """
+ Cleans all fields and raises a ValidationError containing message_dict
+ of all validation errors if any occur.
+ """
+ if exclude is None:
+ exclude = []
+
+ errors = {}
+ for f in self._meta.fields:
+ if f.name in exclude:
+ continue
+ # Skip validation for empty fields with blank=True. The developer
+ # is responsible for making sure they have a valid value.
+ raw_value = getattr(self, f.attname)
+ if f.blank and raw_value in f.empty_values:
+ continue
+ try:
+ setattr(self, f.attname, f.clean(raw_value, self))
+ except ValidationError as e:
+ errors[f.name] = e.error_list
+
+ if errors:
+ raise ValidationError(errors)
+
+
+############################################
+# HELPER FUNCTIONS (CURRIED MODEL METHODS) #
+############################################
+
+# ORDERING METHODS #########################
+
+def method_set_order(ordered_obj, self, id_list, using=None):
+ if using is None:
+ using = DEFAULT_DB_ALIAS
+ rel_val = getattr(self, ordered_obj._meta.order_with_respect_to.rel.field_name)
+ order_name = ordered_obj._meta.order_with_respect_to.name
+ # FIXME: It would be nice if there was an "update many" version of update
+ # for situations like this.
+ with transaction.commit_on_success_unless_managed(using=using):
+ for i, j in enumerate(id_list):
+ ordered_obj.objects.filter(**{'pk': j, order_name: rel_val}).update(_order=i)
+
+
+def method_get_order(ordered_obj, self):
+ rel_val = getattr(self, ordered_obj._meta.order_with_respect_to.rel.field_name)
+ order_name = ordered_obj._meta.order_with_respect_to.name
+ pk_name = ordered_obj._meta.pk.name
+ return [r[pk_name] for r in
+ ordered_obj.objects.filter(**{order_name: rel_val}).values(pk_name)]
+
+
+##############################################
+# HELPER FUNCTIONS (CURRIED MODEL FUNCTIONS) #
+##############################################
+
+def get_absolute_url(opts, func, self, *args, **kwargs):
+ return settings.ABSOLUTE_URL_OVERRIDES.get('%s.%s' % (opts.app_label, opts.model_name), func)(self, *args, **kwargs)
+
+
+########
+# MISC #
+########
+
+class Empty(object):
+ pass
+
+def simple_class_factory(model, attrs):
+ """
+ Needed for dynamic classes.
+ """
+ return model
+
+def model_unpickle(model_id, attrs, factory):
+ """
+ Used to unpickle Model subclasses with deferred fields.
+ """
+ if isinstance(model_id, tuple):
+ model = get_model(*model_id)
+ else:
+ # Backwards compat - the model was cached directly in earlier versions.
+ model = model_id
+ cls = factory(model, attrs)
+ return cls.__new__(cls)
+model_unpickle.__safe_for_unpickle__ = True
+
+
+def unpickle_inner_exception(klass, exception_name):
+ # Get the exception class from the class it is attached to:
+ exception = getattr(klass, exception_name)
+ return exception.__new__(exception)
diff --git a/lib/python2.7/site-packages/django/db/models/constants.py b/lib/python2.7/site-packages/django/db/models/constants.py
new file mode 100644
index 0000000..a7e6c25
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/models/constants.py
@@ -0,0 +1,6 @@
+"""
+Constants used across the ORM in general.
+"""
+
+# Separator used to split filter strings apart.
+LOOKUP_SEP = '__'
diff --git a/lib/python2.7/site-packages/django/db/models/deletion.py b/lib/python2.7/site-packages/django/db/models/deletion.py
new file mode 100644
index 0000000..e0bfb9d
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/models/deletion.py
@@ -0,0 +1,292 @@
+from operator import attrgetter
+
+from django.db import connections, transaction, IntegrityError
+from django.db.models import signals, sql
+from django.utils.datastructures import SortedDict
+from django.utils import six
+
+
+class ProtectedError(IntegrityError):
+ def __init__(self, msg, protected_objects):
+ self.protected_objects = protected_objects
+ super(ProtectedError, self).__init__(msg, protected_objects)
+
+
+def CASCADE(collector, field, sub_objs, using):
+ collector.collect(sub_objs, source=field.rel.to,
+ source_attr=field.name, nullable=field.null)
+ if field.null and not connections[using].features.can_defer_constraint_checks:
+ collector.add_field_update(field, None, sub_objs)
+
+
+def PROTECT(collector, field, sub_objs, using):
+ raise ProtectedError("Cannot delete some instances of model '%s' because "
+ "they are referenced through a protected foreign key: '%s.%s'" % (
+ field.rel.to.__name__, sub_objs[0].__class__.__name__, field.name
+ ),
+ sub_objs
+ )
+
+
+def SET(value):
+ if callable(value):
+ def set_on_delete(collector, field, sub_objs, using):
+ collector.add_field_update(field, value(), sub_objs)
+ else:
+ def set_on_delete(collector, field, sub_objs, using):
+ collector.add_field_update(field, value, sub_objs)
+ return set_on_delete
+
+
+SET_NULL = SET(None)
+
+
+def SET_DEFAULT(collector, field, sub_objs, using):
+ collector.add_field_update(field, field.get_default(), sub_objs)
+
+
+def DO_NOTHING(collector, field, sub_objs, using):
+ pass
+
+
+class Collector(object):
+ def __init__(self, using):
+ self.using = using
+ # Initially, {model: set([instances])}, later values become lists.
+ self.data = {}
+ self.field_updates = {} # {model: {(field, value): set([instances])}}
+ # fast_deletes is a list of queryset-likes that can be deleted without
+ # fetching the objects into memory.
+ self.fast_deletes = []
+
+ # Tracks deletion-order dependency for databases without transactions
+ # or ability to defer constraint checks. Only concrete model classes
+ # should be included, as the dependencies exist only between actual
+ # database tables; proxy models are represented here by their concrete
+ # parent.
+ self.dependencies = {} # {model: set([models])}
+
+ def add(self, objs, source=None, nullable=False, reverse_dependency=False):
+ """
+ Adds 'objs' to the collection of objects to be deleted. If the call is
+ the result of a cascade, 'source' should be the model that caused it,
+ and 'nullable' should be set to True if the relation can be null.
+
+ Returns a list of all objects that were not already collected.
+ """
+ if not objs:
+ return []
+ new_objs = []
+ model = objs[0].__class__
+ instances = self.data.setdefault(model, set())
+ for obj in objs:
+ if obj not in instances:
+ new_objs.append(obj)
+ instances.update(new_objs)
+ # Nullable relationships can be ignored -- they are nulled out before
+ # deleting, and therefore do not affect the order in which objects have
+ # to be deleted.
+ if source is not None and not nullable:
+ if reverse_dependency:
+ source, model = model, source
+ self.dependencies.setdefault(
+ source._meta.concrete_model, set()).add(model._meta.concrete_model)
+ return new_objs
+
+ def add_field_update(self, field, value, objs):
+ """
+ Schedules a field update. 'objs' must be a homogenous iterable
+ collection of model instances (e.g. a QuerySet).
+ """
+ if not objs:
+ return
+ model = objs[0].__class__
+ self.field_updates.setdefault(
+ model, {}).setdefault(
+ (field, value), set()).update(objs)
+
+ def can_fast_delete(self, objs, from_field=None):
+ """
+ Determines if the objects in the given queryset-like can be
+ fast-deleted. This can be done if there are no cascades, no
+ parents and no signal listeners for the object class.
+
+ The 'from_field' tells where we are coming from - we need this to
+ determine if the objects are in fact to be deleted. Allows also
+ skipping parent -> child -> parent chain preventing fast delete of
+ the child.
+ """
+ if from_field and from_field.rel.on_delete is not CASCADE:
+ return False
+ if not (hasattr(objs, 'model') and hasattr(objs, '_raw_delete')):
+ return False
+ model = objs.model
+ if (signals.pre_delete.has_listeners(model)
+ or signals.post_delete.has_listeners(model)
+ or signals.m2m_changed.has_listeners(model)):
+ return False
+ # The use of from_field comes from the need to avoid cascade back to
+ # parent when parent delete is cascading to child.
+ opts = model._meta
+ if any(link != from_field for link in opts.concrete_model._meta.parents.values()):
+ return False
+ # Foreign keys pointing to this model, both from m2m and other
+ # models.
+ for related in opts.get_all_related_objects(
+ include_hidden=True, include_proxy_eq=True):
+ if related.field.rel.on_delete is not DO_NOTHING:
+ return False
+ # GFK deletes
+ for relation in opts.many_to_many:
+ if not relation.rel.through:
+ return False
+ return True
+
+ def collect(self, objs, source=None, nullable=False, collect_related=True,
+ source_attr=None, reverse_dependency=False):
+ """
+ Adds 'objs' to the collection of objects to be deleted as well as all
+ parent instances. 'objs' must be a homogenous iterable collection of
+ model instances (e.g. a QuerySet). If 'collect_related' is True,
+ related objects will be handled by their respective on_delete handler.
+
+ If the call is the result of a cascade, 'source' should be the model
+ that caused it and 'nullable' should be set to True, if the relation
+ can be null.
+
+ If 'reverse_dependency' is True, 'source' will be deleted before the
+ current model, rather than after. (Needed for cascading to parent
+ models, the one case in which the cascade follows the forwards
+ direction of an FK rather than the reverse direction.)
+ """
+ if self.can_fast_delete(objs):
+ self.fast_deletes.append(objs)
+ return
+ new_objs = self.add(objs, source, nullable,
+ reverse_dependency=reverse_dependency)
+ if not new_objs:
+ return
+
+ model = new_objs[0].__class__
+
+ # Recursively collect concrete model's parent models, but not their
+ # related objects. These will be found by meta.get_all_related_objects()
+ concrete_model = model._meta.concrete_model
+ for ptr in six.itervalues(concrete_model._meta.parents):
+ if ptr:
+ # FIXME: This seems to be buggy and execute a query for each
+ # parent object fetch. We have the parent data in the obj,
+ # but we don't have a nice way to turn that data into parent
+ # object instance.
+ parent_objs = [getattr(obj, ptr.name) for obj in new_objs]
+ self.collect(parent_objs, source=model,
+ source_attr=ptr.rel.related_name,
+ collect_related=False,
+ reverse_dependency=True)
+
+ if collect_related:
+ for related in model._meta.get_all_related_objects(
+ include_hidden=True, include_proxy_eq=True):
+ field = related.field
+ if field.rel.on_delete == DO_NOTHING:
+ continue
+ sub_objs = self.related_objects(related, new_objs)
+ if self.can_fast_delete(sub_objs, from_field=field):
+ self.fast_deletes.append(sub_objs)
+ elif sub_objs:
+ field.rel.on_delete(self, field, sub_objs, self.using)
+ for field in model._meta.virtual_fields:
+ if hasattr(field, 'bulk_related_objects'):
+ # Its something like generic foreign key.
+ sub_objs = field.bulk_related_objects(new_objs, self.using)
+ self.collect(sub_objs,
+ source=model,
+ source_attr=field.rel.related_name,
+ nullable=True)
+
+ def related_objects(self, related, objs):
+ """
+ Gets a QuerySet of objects related to ``objs`` via the relation ``related``.
+
+ """
+ return related.model._base_manager.using(self.using).filter(
+ **{"%s__in" % related.field.name: objs}
+ )
+
+ def instances_with_model(self):
+ for model, instances in six.iteritems(self.data):
+ for obj in instances:
+ yield model, obj
+
+ def sort(self):
+ sorted_models = []
+ concrete_models = set()
+ models = list(self.data)
+ while len(sorted_models) < len(models):
+ found = False
+ for model in models:
+ if model in sorted_models:
+ continue
+ dependencies = self.dependencies.get(model._meta.concrete_model)
+ if not (dependencies and dependencies.difference(concrete_models)):
+ sorted_models.append(model)
+ concrete_models.add(model._meta.concrete_model)
+ found = True
+ if not found:
+ return
+ self.data = SortedDict([(model, self.data[model])
+ for model in sorted_models])
+
+ def delete(self):
+ # sort instance collections
+ for model, instances in self.data.items():
+ self.data[model] = sorted(instances, key=attrgetter("pk"))
+
+ # if possible, bring the models in an order suitable for databases that
+ # don't support transactions or cannot defer constraint checks until the
+ # end of a transaction.
+ self.sort()
+
+ with transaction.commit_on_success_unless_managed(using=self.using):
+ # send pre_delete signals
+ for model, obj in self.instances_with_model():
+ if not model._meta.auto_created:
+ signals.pre_delete.send(
+ sender=model, instance=obj, using=self.using
+ )
+
+ # fast deletes
+ for qs in self.fast_deletes:
+ qs._raw_delete(using=self.using)
+
+ # update fields
+ for model, instances_for_fieldvalues in six.iteritems(self.field_updates):
+ query = sql.UpdateQuery(model)
+ for (field, value), instances in six.iteritems(instances_for_fieldvalues):
+ query.update_batch([obj.pk for obj in instances],
+ {field.name: value}, self.using)
+
+ # reverse instance collections
+ for instances in six.itervalues(self.data):
+ instances.reverse()
+
+ # delete instances
+ for model, instances in six.iteritems(self.data):
+ query = sql.DeleteQuery(model)
+ pk_list = [obj.pk for obj in instances]
+ query.delete_batch(pk_list, self.using)
+
+ if not model._meta.auto_created:
+ for obj in instances:
+ signals.post_delete.send(
+ sender=model, instance=obj, using=self.using
+ )
+
+ # update collected instances
+ for model, instances_for_fieldvalues in six.iteritems(self.field_updates):
+ for (field, value), instances in six.iteritems(instances_for_fieldvalues):
+ for obj in instances:
+ setattr(obj, field.attname, value)
+ for model, instances in six.iteritems(self.data):
+ for instance in instances:
+ setattr(instance, model._meta.pk.attname, None)
diff --git a/lib/python2.7/site-packages/django/db/models/expressions.py b/lib/python2.7/site-packages/django/db/models/expressions.py
new file mode 100644
index 0000000..6e0f3c4
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/models/expressions.py
@@ -0,0 +1,186 @@
+import datetime
+
+from django.db.models.aggregates import refs_aggregate
+from django.db.models.constants import LOOKUP_SEP
+from django.utils import tree
+
+class ExpressionNode(tree.Node):
+ """
+ Base class for all query expressions.
+ """
+ # Arithmetic connectors
+ ADD = '+'
+ SUB = '-'
+ MUL = '*'
+ DIV = '/'
+ MOD = '%%' # This is a quoted % operator - it is quoted
+ # because it can be used in strings that also
+ # have parameter substitution.
+
+ # Bitwise operators - note that these are generated by .bitand()
+ # and .bitor(), the '&' and '|' are reserved for boolean operator
+ # usage.
+ BITAND = '&'
+ BITOR = '|'
+
+ def __init__(self, children=None, connector=None, negated=False):
+ if children is not None and len(children) > 1 and connector is None:
+ raise TypeError('You have to specify a connector.')
+ super(ExpressionNode, self).__init__(children, connector, negated)
+
+ def _combine(self, other, connector, reversed, node=None):
+ if isinstance(other, datetime.timedelta):
+ return DateModifierNode([self, other], connector)
+
+ if reversed:
+ obj = ExpressionNode([other], connector)
+ obj.add(node or self, connector)
+ else:
+ obj = node or ExpressionNode([self], connector)
+ obj.add(other, connector)
+ return obj
+
+ def contains_aggregate(self, existing_aggregates):
+ if self.children:
+ return any(child.contains_aggregate(existing_aggregates)
+ for child in self.children
+ if hasattr(child, 'contains_aggregate'))
+ else:
+ return refs_aggregate(self.name.split(LOOKUP_SEP),
+ existing_aggregates)
+
+ def prepare_database_save(self, unused):
+ return self
+
+ ###################
+ # VISITOR METHODS #
+ ###################
+
+ def prepare(self, evaluator, query, allow_joins):
+ return evaluator.prepare_node(self, query, allow_joins)
+
+ def evaluate(self, evaluator, qn, connection):
+ return evaluator.evaluate_node(self, qn, connection)
+
+ #############
+ # OPERATORS #
+ #############
+
+ def __add__(self, other):
+ return self._combine(other, self.ADD, False)
+
+ def __sub__(self, other):
+ return self._combine(other, self.SUB, False)
+
+ def __mul__(self, other):
+ return self._combine(other, self.MUL, False)
+
+ def __truediv__(self, other):
+ return self._combine(other, self.DIV, False)
+
+ def __div__(self, other): # Python 2 compatibility
+ return type(self).__truediv__(self, other)
+
+ def __mod__(self, other):
+ return self._combine(other, self.MOD, False)
+
+ def __and__(self, other):
+ raise NotImplementedError(
+ "Use .bitand() and .bitor() for bitwise logical operations."
+ )
+
+ def bitand(self, other):
+ return self._combine(other, self.BITAND, False)
+
+ def __or__(self, other):
+ raise NotImplementedError(
+ "Use .bitand() and .bitor() for bitwise logical operations."
+ )
+
+ def bitor(self, other):
+ return self._combine(other, self.BITOR, False)
+
+ def __radd__(self, other):
+ return self._combine(other, self.ADD, True)
+
+ def __rsub__(self, other):
+ return self._combine(other, self.SUB, True)
+
+ def __rmul__(self, other):
+ return self._combine(other, self.MUL, True)
+
+ def __rtruediv__(self, other):
+ return self._combine(other, self.DIV, True)
+
+ def __rdiv__(self, other): # Python 2 compatibility
+ return type(self).__rtruediv__(self, other)
+
+ def __rmod__(self, other):
+ return self._combine(other, self.MOD, True)
+
+ def __rand__(self, other):
+ raise NotImplementedError(
+ "Use .bitand() and .bitor() for bitwise logical operations."
+ )
+
+ def __ror__(self, other):
+ raise NotImplementedError(
+ "Use .bitand() and .bitor() for bitwise logical operations."
+ )
+
+class F(ExpressionNode):
+ """
+ An expression representing the value of the given field.
+ """
+ def __init__(self, name):
+ super(F, self).__init__(None, None, False)
+ self.name = name
+
+ def __deepcopy__(self, memodict):
+ obj = super(F, self).__deepcopy__(memodict)
+ obj.name = self.name
+ return obj
+
+ def prepare(self, evaluator, query, allow_joins):
+ return evaluator.prepare_leaf(self, query, allow_joins)
+
+ def evaluate(self, evaluator, qn, connection):
+ return evaluator.evaluate_leaf(self, qn, connection)
+
+class DateModifierNode(ExpressionNode):
+ """
+ Node that implements the following syntax:
+ filter(end_date__gt=F('start_date') + datetime.timedelta(days=3, seconds=200))
+
+ which translates into:
+ POSTGRES:
+ WHERE end_date > (start_date + INTERVAL '3 days 200 seconds')
+
+ MYSQL:
+ WHERE end_date > (start_date + INTERVAL '3 0:0:200:0' DAY_MICROSECOND)
+
+ ORACLE:
+ WHERE end_date > (start_date + INTERVAL '3 00:03:20.000000' DAY(1) TO SECOND(6))
+
+ SQLITE:
+ WHERE end_date > django_format_dtdelta(start_date, "+" "3", "200", "0")
+ (A custom function is used in order to preserve six digits of fractional
+ second information on sqlite, and to format both date and datetime values.)
+
+ Note that microsecond comparisons are not well supported with MySQL, since
+ MySQL does not store microsecond information.
+
+ Only adding and subtracting timedeltas is supported, attempts to use other
+ operations raise a TypeError.
+ """
+ def __init__(self, children, connector, negated=False):
+ if len(children) != 2:
+ raise TypeError('Must specify a node and a timedelta.')
+ if not isinstance(children[1], datetime.timedelta):
+ raise TypeError('Second child must be a timedelta.')
+ if connector not in (self.ADD, self.SUB):
+ raise TypeError('Connector must be + or -, not %s' % connector)
+ super(DateModifierNode, self).__init__(children, connector, negated)
+
+ def evaluate(self, evaluator, qn, connection):
+ return evaluator.evaluate_date_modifier_node(self, qn, connection)
diff --git a/lib/python2.7/site-packages/django/db/models/fields/__init__.py b/lib/python2.7/site-packages/django/db/models/fields/__init__.py
new file mode 100644
index 0000000..c10e2b1
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/models/fields/__init__.py
@@ -0,0 +1,1438 @@
+from __future__ import unicode_literals
+
+import copy
+import datetime
+import decimal
+import math
+import warnings
+from base64 import b64decode, b64encode
+from itertools import tee
+
+from django.db import connection
+from django.db.models.loading import get_model
+from django.db.models.query_utils import QueryWrapper
+from django.conf import settings
+from django import forms
+from django.core import exceptions, validators
+from django.utils.datastructures import DictWrapper
+from django.utils.dateparse import parse_date, parse_datetime, parse_time
+from django.utils.functional import curry, total_ordering
+from django.utils.itercompat import is_iterator
+from django.utils.text import capfirst
+from django.utils import timezone
+from django.utils.translation import ugettext_lazy as _
+from django.utils.encoding import smart_text, force_text, force_bytes
+from django.utils.ipv6 import clean_ipv6_address
+from django.utils import six
+
+class Empty(object):
+ pass
+
+class NOT_PROVIDED:
+ pass
+
+# The values to use for "blank" in SelectFields. Will be appended to the start
+# of most "choices" lists.
+BLANK_CHOICE_DASH = [("", "---------")]
+
+def _load_field(app_label, model_name, field_name):
+ return get_model(app_label, model_name)._meta.get_field_by_name(field_name)[0]
+
+class FieldDoesNotExist(Exception):
+ pass
+
+# A guide to Field parameters:
+#
+# * name: The name of the field specifed in the model.
+# * attname: The attribute to use on the model object. This is the same as
+# "name", except in the case of ForeignKeys, where "_id" is
+# appended.
+# * db_column: The db_column specified in the model (or None).
+# * column: The database column for this field. This is the same as
+# "attname", except if db_column is specified.
+#
+# Code that introspects values, or does other dynamic things, should use
+# attname. For example, this gets the primary key value of object "obj":
+#
+# getattr(obj, opts.pk.attname)
+
+def _empty(of_cls):
+ new = Empty()
+ new.__class__ = of_cls
+ return new
+
+@total_ordering
+class Field(object):
+ """Base class for all field types"""
+
+ # Designates whether empty strings fundamentally are allowed at the
+ # database level.
+ empty_strings_allowed = True
+ empty_values = list(validators.EMPTY_VALUES)
+
+ # These track each time a Field instance is created. Used to retain order.
+ # The auto_creation_counter is used for fields that Django implicitly
+ # creates, creation_counter is used for all user-specified fields.
+ creation_counter = 0
+ auto_creation_counter = -1
+ default_validators = [] # Default set of validators
+ default_error_messages = {
+ 'invalid_choice': _('Value %(value)r is not a valid choice.'),
+ 'null': _('This field cannot be null.'),
+ 'blank': _('This field cannot be blank.'),
+ 'unique': _('%(model_name)s with this %(field_label)s '
+ 'already exists.'),
+ }
+
+ # Generic field type description, usually overriden by subclasses
+ def _description(self):
+ return _('Field of type: %(field_type)s') % {
+ 'field_type': self.__class__.__name__
+ }
+ description = property(_description)
+
+ def __init__(self, verbose_name=None, name=None, primary_key=False,
+ max_length=None, unique=False, blank=False, null=False,
+ db_index=False, rel=None, default=NOT_PROVIDED, editable=True,
+ serialize=True, unique_for_date=None, unique_for_month=None,
+ unique_for_year=None, choices=None, help_text='', db_column=None,
+ db_tablespace=None, auto_created=False, validators=[],
+ error_messages=None):
+ self.name = name
+ self.verbose_name = verbose_name
+ self.primary_key = primary_key
+ self.max_length, self._unique = max_length, unique
+ self.blank, self.null = blank, null
+ self.rel = rel
+ self.default = default
+ self.editable = editable
+ self.serialize = serialize
+ self.unique_for_date, self.unique_for_month = (unique_for_date,
+ unique_for_month)
+ self.unique_for_year = unique_for_year
+ self._choices = choices or []
+ self.help_text = help_text
+ self.db_column = db_column
+ self.db_tablespace = db_tablespace or settings.DEFAULT_INDEX_TABLESPACE
+ self.auto_created = auto_created
+
+ # Set db_index to True if the field has a relationship and doesn't
+ # explicitly set db_index.
+ self.db_index = db_index
+
+ # Adjust the appropriate creation counter, and save our local copy.
+ if auto_created:
+ self.creation_counter = Field.auto_creation_counter
+ Field.auto_creation_counter -= 1
+ else:
+ self.creation_counter = Field.creation_counter
+ Field.creation_counter += 1
+
+ self.validators = self.default_validators + validators
+
+ messages = {}
+ for c in reversed(self.__class__.__mro__):
+ messages.update(getattr(c, 'default_error_messages', {}))
+ messages.update(error_messages or {})
+ self.error_messages = messages
+
+ def __eq__(self, other):
+ # Needed for @total_ordering
+ if isinstance(other, Field):
+ return self.creation_counter == other.creation_counter
+ return NotImplemented
+
+ def __lt__(self, other):
+ # This is needed because bisect does not take a comparison function.
+ if isinstance(other, Field):
+ return self.creation_counter < other.creation_counter
+ return NotImplemented
+
+ def __hash__(self):
+ return hash(self.creation_counter)
+
+ def __deepcopy__(self, memodict):
+ # We don't have to deepcopy very much here, since most things are not
+ # intended to be altered after initial creation.
+ obj = copy.copy(self)
+ if self.rel:
+ obj.rel = copy.copy(self.rel)
+ if hasattr(self.rel, 'field') and self.rel.field is self:
+ obj.rel.field = obj
+ memodict[id(self)] = obj
+ return obj
+
+ def __copy__(self):
+ # We need to avoid hitting __reduce__, so define this
+ # slightly weird copy construct.
+ obj = Empty()
+ obj.__class__ = self.__class__
+ obj.__dict__ = self.__dict__.copy()
+ return obj
+
+ def __reduce__(self):
+ """
+ Pickling should return the model._meta.fields instance of the field,
+ not a new copy of that field. So, we use the app cache to load the
+ model and then the field back.
+ """
+ if not hasattr(self, 'model'):
+ # Fields are sometimes used without attaching them to models (for
+ # example in aggregation). In this case give back a plain field
+ # instance. The code below will create a new empty instance of
+ # class self.__class__, then update its dict with self.__dict__
+ # values - so, this is very close to normal pickle.
+ return _empty, (self.__class__,), self.__dict__
+ if self.model._deferred:
+ # Deferred model will not be found from the app cache. This could
+ # be fixed by reconstructing the deferred model on unpickle.
+ raise RuntimeError("Fields of deferred models can't be reduced")
+ return _load_field, (self.model._meta.app_label, self.model._meta.object_name,
+ self.name)
+
+ def to_python(self, value):
+ """
+ Converts the input value into the expected Python data type, raising
+ django.core.exceptions.ValidationError if the data can't be converted.
+ Returns the converted value. Subclasses should override this.
+ """
+ return value
+
+ def run_validators(self, value):
+ if value in self.empty_values:
+ return
+
+ errors = []
+ for v in self.validators:
+ try:
+ v(value)
+ except exceptions.ValidationError as e:
+ if hasattr(e, 'code') and e.code in self.error_messages:
+ e.message = self.error_messages[e.code]
+ errors.extend(e.error_list)
+
+ if errors:
+ raise exceptions.ValidationError(errors)
+
+ def validate(self, value, model_instance):
+ """
+ Validates value and throws ValidationError. Subclasses should override
+ this to provide validation logic.
+ """
+ if not self.editable:
+ # Skip validation for non-editable fields.
+ return
+
+ if self._choices and value not in self.empty_values:
+ for option_key, option_value in self.choices:
+ if isinstance(option_value, (list, tuple)):
+ # This is an optgroup, so look inside the group for
+ # options.
+ for optgroup_key, optgroup_value in option_value:
+ if value == optgroup_key:
+ return
+ elif value == option_key:
+ return
+ raise exceptions.ValidationError(
+ self.error_messages['invalid_choice'],
+ code='invalid_choice',
+ params={'value': value},
+ )
+
+ if value is None and not self.null:
+ raise exceptions.ValidationError(self.error_messages['null'], code='null')
+
+ if not self.blank and value in self.empty_values:
+ raise exceptions.ValidationError(self.error_messages['blank'], code='blank')
+
+ def clean(self, value, model_instance):
+ """
+ Convert the value's type and run validation. Validation errors
+ from to_python and validate are propagated. The correct value is
+ returned if no error is raised.
+ """
+ value = self.to_python(value)
+ self.validate(value, model_instance)
+ self.run_validators(value)
+ return value
+
+ def db_type(self, connection):
+ """
+ Returns the database column data type for this field, for the provided
+ connection.
+ """
+ # The default implementation of this method looks at the
+ # backend-specific DATA_TYPES dictionary, looking up the field by its
+ # "internal type".
+ #
+ # A Field class can implement the get_internal_type() method to specify
+ # which *preexisting* Django Field class it's most similar to -- i.e.,
+ # a custom field might be represented by a TEXT column type, which is
+ # the same as the TextField Django field type, which means the custom
+ # field's get_internal_type() returns 'TextField'.
+ #
+ # But the limitation of the get_internal_type() / data_types approach
+ # is that it cannot handle database column types that aren't already
+ # mapped to one of the built-in Django field types. In this case, you
+ # can implement db_type() instead of get_internal_type() to specify
+ # exactly which wacky database column type you want to use.
+ data = DictWrapper(self.__dict__, connection.ops.quote_name, "qn_")
+ try:
+ return (connection.creation.data_types[self.get_internal_type()]
+ % data)
+ except KeyError:
+ return None
+
+ @property
+ def unique(self):
+ return self._unique or self.primary_key
+
+ def set_attributes_from_name(self, name):
+ if not self.name:
+ self.name = name
+ self.attname, self.column = self.get_attname_column()
+ if self.verbose_name is None and self.name:
+ self.verbose_name = self.name.replace('_', ' ')
+
+ def contribute_to_class(self, cls, name, virtual_only=False):
+ self.set_attributes_from_name(name)
+ self.model = cls
+ if virtual_only:
+ cls._meta.add_virtual_field(self)
+ else:
+ cls._meta.add_field(self)
+ if self.choices:
+ setattr(cls, 'get_%s_display' % self.name,
+ curry(cls._get_FIELD_display, field=self))
+
+ def get_attname(self):
+ return self.name
+
+ def get_attname_column(self):
+ attname = self.get_attname()
+ column = self.db_column or attname
+ return attname, column
+
+ def get_cache_name(self):
+ return '_%s_cache' % self.name
+
+ def get_internal_type(self):
+ return self.__class__.__name__
+
+ def pre_save(self, model_instance, add):
+ """
+ Returns field's value just before saving.
+ """
+ return getattr(model_instance, self.attname)
+
+ def get_prep_value(self, value):
+ """
+ Perform preliminary non-db specific value checks and conversions.
+ """
+ return value
+
+ def get_db_prep_value(self, value, connection, prepared=False):
+ """Returns field's value prepared for interacting with the database
+ backend.
+
+ Used by the default implementations of ``get_db_prep_save``and
+ `get_db_prep_lookup```
+ """
+ if not prepared:
+ value = self.get_prep_value(value)
+ return value
+
+ def get_db_prep_save(self, value, connection):
+ """
+ Returns field's value prepared for saving into a database.
+ """
+ return self.get_db_prep_value(value, connection=connection,
+ prepared=False)
+
+ def get_prep_lookup(self, lookup_type, value):
+ """
+ Perform preliminary non-db specific lookup checks and conversions
+ """
+ if hasattr(value, 'prepare'):
+ return value.prepare()
+ if hasattr(value, '_prepare'):
+ return value._prepare()
+
+ if lookup_type in (
+ 'iexact', 'contains', 'icontains',
+ 'startswith', 'istartswith', 'endswith', 'iendswith',
+ 'month', 'day', 'week_day', 'hour', 'minute', 'second',
+ 'isnull', 'search', 'regex', 'iregex',
+ ):
+ return value
+ elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
+ return self.get_prep_value(value)
+ elif lookup_type in ('range', 'in'):
+ return [self.get_prep_value(v) for v in value]
+ elif lookup_type == 'year':
+ try:
+ return int(value)
+ except ValueError:
+ raise ValueError("The __year lookup type requires an integer "
+ "argument")
+
+ raise TypeError("Field has invalid lookup: %s" % lookup_type)
+
+ def get_db_prep_lookup(self, lookup_type, value, connection,
+ prepared=False):
+ """
+ Returns field's value prepared for database lookup.
+ """
+ if not prepared:
+ value = self.get_prep_lookup(lookup_type, value)
+ prepared = True
+ if hasattr(value, 'get_compiler'):
+ value = value.get_compiler(connection=connection)
+ if hasattr(value, 'as_sql') or hasattr(value, '_as_sql'):
+ # If the value has a relabeled_clone method it means the
+ # value will be handled later on.
+ if hasattr(value, 'relabeled_clone'):
+ return value
+ if hasattr(value, 'as_sql'):
+ sql, params = value.as_sql()
+ else:
+ sql, params = value._as_sql(connection=connection)
+ return QueryWrapper(('(%s)' % sql), params)
+
+ if lookup_type in ('month', 'day', 'week_day', 'hour', 'minute',
+ 'second', 'search', 'regex', 'iregex'):
+ return [value]
+ elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
+ return [self.get_db_prep_value(value, connection=connection,
+ prepared=prepared)]
+ elif lookup_type in ('range', 'in'):
+ return [self.get_db_prep_value(v, connection=connection,
+ prepared=prepared) for v in value]
+ elif lookup_type in ('contains', 'icontains'):
+ return ["%%%s%%" % connection.ops.prep_for_like_query(value)]
+ elif lookup_type == 'iexact':
+ return [connection.ops.prep_for_iexact_query(value)]
+ elif lookup_type in ('startswith', 'istartswith'):
+ return ["%s%%" % connection.ops.prep_for_like_query(value)]
+ elif lookup_type in ('endswith', 'iendswith'):
+ return ["%%%s" % connection.ops.prep_for_like_query(value)]
+ elif lookup_type == 'isnull':
+ return []
+ elif lookup_type == 'year':
+ if isinstance(self, DateTimeField):
+ return connection.ops.year_lookup_bounds_for_datetime_field(value)
+ elif isinstance(self, DateField):
+ return connection.ops.year_lookup_bounds_for_date_field(value)
+ else:
+ return [value] # this isn't supposed to happen
+
+ def has_default(self):
+ """
+ Returns a boolean of whether this field has a default value.
+ """
+ return self.default is not NOT_PROVIDED
+
+ def get_default(self):
+ """
+ Returns the default value for this field.
+ """
+ if self.has_default():
+ if callable(self.default):
+ return self.default()
+ return force_text(self.default, strings_only=True)
+ if (not self.empty_strings_allowed or (self.null and
+ not connection.features.interprets_empty_strings_as_nulls)):
+ return None
+ return ""
+
+ def get_validator_unique_lookup_type(self):
+ return '%s__exact' % self.name
+
+ def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH):
+ """Returns choices with a default blank choices included, for use
+ as SelectField choices for this field."""
+ first_choice = blank_choice if include_blank else []
+ if self.choices:
+ return first_choice + list(self.choices)
+ rel_model = self.rel.to
+ if hasattr(self.rel, 'get_related_field'):
+ lst = [(getattr(x, self.rel.get_related_field().attname),
+ smart_text(x))
+ for x in rel_model._default_manager.complex_filter(
+ self.rel.limit_choices_to)]
+ else:
+ lst = [(x._get_pk_val(), smart_text(x))
+ for x in rel_model._default_manager.complex_filter(
+ self.rel.limit_choices_to)]
+ return first_choice + lst
+
+ def get_choices_default(self):
+ return self.get_choices()
+
+ def get_flatchoices(self, include_blank=True,
+ blank_choice=BLANK_CHOICE_DASH):
+ """
+ Returns flattened choices with a default blank choice included.
+ """
+ first_choice = blank_choice if include_blank else []
+ return first_choice + list(self.flatchoices)
+
+ def _get_val_from_obj(self, obj):
+ if obj is not None:
+ return getattr(obj, self.attname)
+ else:
+ return self.get_default()
+
+ def value_to_string(self, obj):
+ """
+ Returns a string value of this field from the passed obj.
+ This is used by the serialization framework.
+ """
+ return smart_text(self._get_val_from_obj(obj))
+
+ def bind(self, fieldmapping, original, bound_field_class):
+ return bound_field_class(self, fieldmapping, original)
+
+ def _get_choices(self):
+ if is_iterator(self._choices):
+ choices, self._choices = tee(self._choices)
+ return choices
+ else:
+ return self._choices
+ choices = property(_get_choices)
+
+ def _get_flatchoices(self):
+ """Flattened version of choices tuple."""
+ flat = []
+ for choice, value in self.choices:
+ if isinstance(value, (list, tuple)):
+ flat.extend(value)
+ else:
+ flat.append((choice,value))
+ return flat
+ flatchoices = property(_get_flatchoices)
+
+ def save_form_data(self, instance, data):
+ setattr(instance, self.name, data)
+
+ def formfield(self, form_class=None, choices_form_class=None, **kwargs):
+ """
+ Returns a django.forms.Field instance for this database Field.
+ """
+ defaults = {'required': not self.blank,
+ 'label': capfirst(self.verbose_name),
+ 'help_text': self.help_text}
+ if self.has_default():
+ if callable(self.default):
+ defaults['initial'] = self.default
+ defaults['show_hidden_initial'] = True
+ else:
+ defaults['initial'] = self.get_default()
+ if self.choices:
+ # Fields with choices get special treatment.
+ include_blank = (self.blank or
+ not (self.has_default() or 'initial' in kwargs))
+ defaults['choices'] = self.get_choices(include_blank=include_blank)
+ defaults['coerce'] = self.to_python
+ if self.null:
+ defaults['empty_value'] = None
+ if choices_form_class is not None:
+ form_class = choices_form_class
+ else:
+ form_class = forms.TypedChoiceField
+ # Many of the subclass-specific formfield arguments (min_value,
+ # max_value) don't apply for choice fields, so be sure to only pass
+ # the values that TypedChoiceField will understand.
+ for k in list(kwargs):
+ if k not in ('coerce', 'empty_value', 'choices', 'required',
+ 'widget', 'label', 'initial', 'help_text',
+ 'error_messages', 'show_hidden_initial'):
+ del kwargs[k]
+ defaults.update(kwargs)
+ if form_class is None:
+ form_class = forms.CharField
+ return form_class(**defaults)
+
+ def value_from_object(self, obj):
+ """
+ Returns the value of this field in the given model instance.
+ """
+ return getattr(obj, self.attname)
+
+ def __repr__(self):
+ """
+ Displays the module, class and name of the field.
+ """
+ path = '%s.%s' % (self.__class__.__module__, self.__class__.__name__)
+ name = getattr(self, 'name', None)
+ if name is not None:
+ return '<%s: %s>' % (path, name)
+ return '<%s>' % path
+
+class AutoField(Field):
+ description = _("Integer")
+
+ empty_strings_allowed = False
+ default_error_messages = {
+ 'invalid': _("'%(value)s' value must be an integer."),
+ }
+
+ def __init__(self, *args, **kwargs):
+ assert kwargs.get('primary_key', False) is True, \
+ "%ss must have primary_key=True." % self.__class__.__name__
+ kwargs['blank'] = True
+ Field.__init__(self, *args, **kwargs)
+
+ def get_internal_type(self):
+ return "AutoField"
+
+ def to_python(self, value):
+ if value is None:
+ return value
+ try:
+ return int(value)
+ except (TypeError, ValueError):
+ raise exceptions.ValidationError(
+ self.error_messages['invalid'],
+ code='invalid',
+ params={'value': value},
+ )
+
+ def validate(self, value, model_instance):
+ pass
+
+ def get_db_prep_value(self, value, connection, prepared=False):
+ if not prepared:
+ value = self.get_prep_value(value)
+ value = connection.ops.validate_autopk_value(value)
+ return value
+
+ def get_prep_value(self, value):
+ if value is None:
+ return None
+ return int(value)
+
+ def contribute_to_class(self, cls, name):
+ assert not cls._meta.has_auto_field, \
+ "A model can't have more than one AutoField."
+ super(AutoField, self).contribute_to_class(cls, name)
+ cls._meta.has_auto_field = True
+ cls._meta.auto_field = self
+
+ def formfield(self, **kwargs):
+ return None
+
+class BooleanField(Field):
+ empty_strings_allowed = False
+ default_error_messages = {
+ 'invalid': _("'%(value)s' value must be either True or False."),
+ }
+ description = _("Boolean (Either True or False)")
+
+ def __init__(self, *args, **kwargs):
+ kwargs['blank'] = True
+ Field.__init__(self, *args, **kwargs)
+
+ def get_internal_type(self):
+ return "BooleanField"
+
+ def to_python(self, value):
+ if value in (True, False):
+ # if value is 1 or 0 than it's equal to True or False, but we want
+ # to return a true bool for semantic reasons.
+ return bool(value)
+ if value in ('t', 'True', '1'):
+ return True
+ if value in ('f', 'False', '0'):
+ return False
+ raise exceptions.ValidationError(
+ self.error_messages['invalid'],
+ code='invalid',
+ params={'value': value},
+ )
+
+ def get_prep_lookup(self, lookup_type, value):
+ # Special-case handling for filters coming from a Web request (e.g. the
+ # admin interface). Only works for scalar values (not lists). If you're
+ # passing in a list, you might as well make things the right type when
+ # constructing the list.
+ if value in ('1', '0'):
+ value = bool(int(value))
+ return super(BooleanField, self).get_prep_lookup(lookup_type, value)
+
+ def get_prep_value(self, value):
+ if value is None:
+ return None
+ return bool(value)
+
+ def formfield(self, **kwargs):
+ # Unlike most fields, BooleanField figures out include_blank from
+ # self.null instead of self.blank.
+ if self.choices:
+ include_blank = (self.null or
+ not (self.has_default() or 'initial' in kwargs))
+ defaults = {'choices': self.get_choices(
+ include_blank=include_blank)}
+ else:
+ defaults = {'form_class': forms.BooleanField}
+ defaults.update(kwargs)
+ return super(BooleanField, self).formfield(**defaults)
+
+class CharField(Field):
+ description = _("String (up to %(max_length)s)")
+
+ def __init__(self, *args, **kwargs):
+ super(CharField, self).__init__(*args, **kwargs)
+ self.validators.append(validators.MaxLengthValidator(self.max_length))
+
+ def get_internal_type(self):
+ return "CharField"
+
+ def to_python(self, value):
+ if isinstance(value, six.string_types) or value is None:
+ return value
+ return smart_text(value)
+
+ def get_prep_value(self, value):
+ return self.to_python(value)
+
+ def formfield(self, **kwargs):
+ # Passing max_length to forms.CharField means that the value's length
+ # will be validated twice. This is considered acceptable since we want
+ # the value in the form field (to pass into widget for example).
+ defaults = {'max_length': self.max_length}
+ defaults.update(kwargs)
+ return super(CharField, self).formfield(**defaults)
+
+# TODO: Maybe move this into contrib, because it's specialized.
+class CommaSeparatedIntegerField(CharField):
+ default_validators = [validators.validate_comma_separated_integer_list]
+ description = _("Comma-separated integers")
+
+ def formfield(self, **kwargs):
+ defaults = {
+ 'error_messages': {
+ 'invalid': _('Enter only digits separated by commas.'),
+ }
+ }
+ defaults.update(kwargs)
+ return super(CommaSeparatedIntegerField, self).formfield(**defaults)
+
+class DateField(Field):
+ empty_strings_allowed = False
+ default_error_messages = {
+ 'invalid': _("'%(value)s' value has an invalid date format. It must be "
+ "in YYYY-MM-DD format."),
+ 'invalid_date': _("'%(value)s' value has the correct format (YYYY-MM-DD) "
+ "but it is an invalid date."),
+ }
+ description = _("Date (without time)")
+
+ def __init__(self, verbose_name=None, name=None, auto_now=False,
+ auto_now_add=False, **kwargs):
+ self.auto_now, self.auto_now_add = auto_now, auto_now_add
+ if auto_now or auto_now_add:
+ kwargs['editable'] = False
+ kwargs['blank'] = True
+ Field.__init__(self, verbose_name, name, **kwargs)
+
+ def get_internal_type(self):
+ return "DateField"
+
+ def to_python(self, value):
+ if value is None:
+ return value
+ if isinstance(value, datetime.datetime):
+ if settings.USE_TZ and timezone.is_aware(value):
+ # Convert aware datetimes to the default time zone
+ # before casting them to dates (#17742).
+ default_timezone = timezone.get_default_timezone()
+ value = timezone.make_naive(value, default_timezone)
+ return value.date()
+ if isinstance(value, datetime.date):
+ return value
+
+ try:
+ parsed = parse_date(value)
+ if parsed is not None:
+ return parsed
+ except ValueError:
+ raise exceptions.ValidationError(
+ self.error_messages['invalid_date'],
+ code='invalid_date',
+ params={'value': value},
+ )
+
+ raise exceptions.ValidationError(
+ self.error_messages['invalid'],
+ code='invalid',
+ params={'value': value},
+ )
+
+ def pre_save(self, model_instance, add):
+ if self.auto_now or (self.auto_now_add and add):
+ value = datetime.date.today()
+ setattr(model_instance, self.attname, value)
+ return value
+ else:
+ return super(DateField, self).pre_save(model_instance, add)
+
+ def contribute_to_class(self, cls, name):
+ super(DateField,self).contribute_to_class(cls, name)
+ if not self.null:
+ setattr(cls, 'get_next_by_%s' % self.name,
+ curry(cls._get_next_or_previous_by_FIELD, field=self,
+ is_next=True))
+ setattr(cls, 'get_previous_by_%s' % self.name,
+ curry(cls._get_next_or_previous_by_FIELD, field=self,
+ is_next=False))
+
+ def get_prep_lookup(self, lookup_type, value):
+ # For dates lookups, convert the value to an int
+ # so the database backend always sees a consistent type.
+ if lookup_type in ('month', 'day', 'week_day', 'hour', 'minute', 'second'):
+ return int(value)
+ return super(DateField, self).get_prep_lookup(lookup_type, value)
+
+ def get_prep_value(self, value):
+ return self.to_python(value)
+
+ def get_db_prep_value(self, value, connection, prepared=False):
+ # Casts dates into the format expected by the backend
+ if not prepared:
+ value = self.get_prep_value(value)
+ return connection.ops.value_to_db_date(value)
+
+ def value_to_string(self, obj):
+ val = self._get_val_from_obj(obj)
+ return '' if val is None else val.isoformat()
+
+ def formfield(self, **kwargs):
+ defaults = {'form_class': forms.DateField}
+ defaults.update(kwargs)
+ return super(DateField, self).formfield(**defaults)
+
+class DateTimeField(DateField):
+ empty_strings_allowed = False
+ default_error_messages = {
+ 'invalid': _("'%(value)s' value has an invalid format. It must be in "
+ "YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ] format."),
+ 'invalid_date': _("'%(value)s' value has the correct format "
+ "(YYYY-MM-DD) but it is an invalid date."),
+ 'invalid_datetime': _("'%(value)s' value has the correct format "
+ "(YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ]) "
+ "but it is an invalid date/time."),
+ }
+ description = _("Date (with time)")
+
+ # __init__ is inherited from DateField
+
+ def get_internal_type(self):
+ return "DateTimeField"
+
+ def to_python(self, value):
+ if value is None:
+ return value
+ if isinstance(value, datetime.datetime):
+ return value
+ if isinstance(value, datetime.date):
+ value = datetime.datetime(value.year, value.month, value.day)
+ if settings.USE_TZ:
+ # For backwards compatibility, interpret naive datetimes in
+ # local time. This won't work during DST change, but we can't
+ # do much about it, so we let the exceptions percolate up the
+ # call stack.
+ warnings.warn("DateTimeField %s.%s received a naive datetime "
+ "(%s) while time zone support is active." %
+ (self.model.__name__, self.name, value),
+ RuntimeWarning)
+ default_timezone = timezone.get_default_timezone()
+ value = timezone.make_aware(value, default_timezone)
+ return value
+
+ try:
+ parsed = parse_datetime(value)
+ if parsed is not None:
+ return parsed
+ except ValueError:
+ raise exceptions.ValidationError(
+ self.error_messages['invalid_datetime'],
+ code='invalid_datetime',
+ params={'value': value},
+ )
+
+ try:
+ parsed = parse_date(value)
+ if parsed is not None:
+ return datetime.datetime(parsed.year, parsed.month, parsed.day)
+ except ValueError:
+ raise exceptions.ValidationError(
+ self.error_messages['invalid_date'],
+ code='invalid_date',
+ params={'value': value},
+ )
+
+ raise exceptions.ValidationError(
+ self.error_messages['invalid'],
+ code='invalid',
+ params={'value': value},
+ )
+
+ def pre_save(self, model_instance, add):
+ if self.auto_now or (self.auto_now_add and add):
+ value = timezone.now()
+ setattr(model_instance, self.attname, value)
+ return value
+ else:
+ return super(DateTimeField, self).pre_save(model_instance, add)
+
+ # contribute_to_class is inherited from DateField, it registers
+ # get_next_by_FOO and get_prev_by_FOO
+
+ # get_prep_lookup is inherited from DateField
+
+ def get_prep_value(self, value):
+ value = self.to_python(value)
+ if value is not None and settings.USE_TZ and timezone.is_naive(value):
+ # For backwards compatibility, interpret naive datetimes in local
+ # time. This won't work during DST change, but we can't do much
+ # about it, so we let the exceptions percolate up the call stack.
+ warnings.warn("DateTimeField %s.%s received a naive datetime (%s)"
+ " while time zone support is active." %
+ (self.model.__name__, self.name, value),
+ RuntimeWarning)
+ default_timezone = timezone.get_default_timezone()
+ value = timezone.make_aware(value, default_timezone)
+ return value
+
+ def get_db_prep_value(self, value, connection, prepared=False):
+ # Casts datetimes into the format expected by the backend
+ if not prepared:
+ value = self.get_prep_value(value)
+ return connection.ops.value_to_db_datetime(value)
+
+ def value_to_string(self, obj):
+ val = self._get_val_from_obj(obj)
+ return '' if val is None else val.isoformat()
+
+ def formfield(self, **kwargs):
+ defaults = {'form_class': forms.DateTimeField}
+ defaults.update(kwargs)
+ return super(DateTimeField, self).formfield(**defaults)
+
+class DecimalField(Field):
+ empty_strings_allowed = False
+ default_error_messages = {
+ 'invalid': _("'%(value)s' value must be a decimal number."),
+ }
+ description = _("Decimal number")
+
+ def __init__(self, verbose_name=None, name=None, max_digits=None,
+ decimal_places=None, **kwargs):
+ self.max_digits, self.decimal_places = max_digits, decimal_places
+ Field.__init__(self, verbose_name, name, **kwargs)
+
+ def get_internal_type(self):
+ return "DecimalField"
+
+ def to_python(self, value):
+ if value is None:
+ return value
+ try:
+ return decimal.Decimal(value)
+ except decimal.InvalidOperation:
+ raise exceptions.ValidationError(
+ self.error_messages['invalid'],
+ code='invalid',
+ params={'value': value},
+ )
+
+ def _format(self, value):
+ if isinstance(value, six.string_types) or value is None:
+ return value
+ else:
+ return self.format_number(value)
+
+ def format_number(self, value):
+ """
+ Formats a number into a string with the requisite number of digits and
+ decimal places.
+ """
+ # Method moved to django.db.backends.util.
+ #
+ # It is preserved because it is used by the oracle backend
+ # (django.db.backends.oracle.query), and also for
+ # backwards-compatibility with any external code which may have used
+ # this method.
+ from django.db.backends import util
+ return util.format_number(value, self.max_digits, self.decimal_places)
+
+ def get_db_prep_save(self, value, connection):
+ return connection.ops.value_to_db_decimal(self.to_python(value),
+ self.max_digits, self.decimal_places)
+
+ def get_prep_value(self, value):
+ return self.to_python(value)
+
+ def formfield(self, **kwargs):
+ defaults = {
+ 'max_digits': self.max_digits,
+ 'decimal_places': self.decimal_places,
+ 'form_class': forms.DecimalField,
+ }
+ defaults.update(kwargs)
+ return super(DecimalField, self).formfield(**defaults)
+
+class EmailField(CharField):
+ default_validators = [validators.validate_email]
+ description = _("Email address")
+
+ def __init__(self, *args, **kwargs):
+ # max_length should be overridden to 254 characters to be fully
+ # compliant with RFCs 3696 and 5321
+
+ kwargs['max_length'] = kwargs.get('max_length', 75)
+ CharField.__init__(self, *args, **kwargs)
+
+ def formfield(self, **kwargs):
+ # As with CharField, this will cause email validation to be performed
+ # twice.
+ defaults = {
+ 'form_class': forms.EmailField,
+ }
+ defaults.update(kwargs)
+ return super(EmailField, self).formfield(**defaults)
+
+class FilePathField(Field):
+ description = _("File path")
+
+ def __init__(self, verbose_name=None, name=None, path='', match=None,
+ recursive=False, allow_files=True, allow_folders=False, **kwargs):
+ self.path, self.match, self.recursive = path, match, recursive
+ self.allow_files, self.allow_folders = allow_files, allow_folders
+ kwargs['max_length'] = kwargs.get('max_length', 100)
+ Field.__init__(self, verbose_name, name, **kwargs)
+
+ def get_prep_value(self, value):
+ value = super(FilePathField, self).get_prep_value(value)
+ if value is None:
+ return None
+ return six.text_type(value)
+
+ def formfield(self, **kwargs):
+ defaults = {
+ 'path': self.path,
+ 'match': self.match,
+ 'recursive': self.recursive,
+ 'form_class': forms.FilePathField,
+ 'allow_files': self.allow_files,
+ 'allow_folders': self.allow_folders,
+ }
+ defaults.update(kwargs)
+ return super(FilePathField, self).formfield(**defaults)
+
+ def get_internal_type(self):
+ return "FilePathField"
+
+class FloatField(Field):
+ empty_strings_allowed = False
+ default_error_messages = {
+ 'invalid': _("'%(value)s' value must be a float."),
+ }
+ description = _("Floating point number")
+
+ def get_prep_value(self, value):
+ if value is None:
+ return None
+ return float(value)
+
+ def get_internal_type(self):
+ return "FloatField"
+
+ def to_python(self, value):
+ if value is None:
+ return value
+ try:
+ return float(value)
+ except (TypeError, ValueError):
+ raise exceptions.ValidationError(
+ self.error_messages['invalid'],
+ code='invalid',
+ params={'value': value},
+ )
+
+ def formfield(self, **kwargs):
+ defaults = {'form_class': forms.FloatField}
+ defaults.update(kwargs)
+ return super(FloatField, self).formfield(**defaults)
+
+class IntegerField(Field):
+ empty_strings_allowed = False
+ default_error_messages = {
+ 'invalid': _("'%(value)s' value must be an integer."),
+ }
+ description = _("Integer")
+
+ def get_prep_value(self, value):
+ if value is None:
+ return None
+ return int(value)
+
+ def get_prep_lookup(self, lookup_type, value):
+ if ((lookup_type == 'gte' or lookup_type == 'lt')
+ and isinstance(value, float)):
+ value = math.ceil(value)
+ return super(IntegerField, self).get_prep_lookup(lookup_type, value)
+
+ def get_internal_type(self):
+ return "IntegerField"
+
+ def to_python(self, value):
+ if value is None:
+ return value
+ try:
+ return int(value)
+ except (TypeError, ValueError):
+ raise exceptions.ValidationError(
+ self.error_messages['invalid'],
+ code='invalid',
+ params={'value': value},
+ )
+
+ def formfield(self, **kwargs):
+ defaults = {'form_class': forms.IntegerField}
+ defaults.update(kwargs)
+ return super(IntegerField, self).formfield(**defaults)
+
+class BigIntegerField(IntegerField):
+ empty_strings_allowed = False
+ description = _("Big (8 byte) integer")
+ MAX_BIGINT = 9223372036854775807
+
+ def get_internal_type(self):
+ return "BigIntegerField"
+
+ def formfield(self, **kwargs):
+ defaults = {'min_value': -BigIntegerField.MAX_BIGINT - 1,
+ 'max_value': BigIntegerField.MAX_BIGINT}
+ defaults.update(kwargs)
+ return super(BigIntegerField, self).formfield(**defaults)
+
+class IPAddressField(Field):
+ empty_strings_allowed = False
+ description = _("IPv4 address")
+
+ def __init__(self, *args, **kwargs):
+ kwargs['max_length'] = 15
+ Field.__init__(self, *args, **kwargs)
+
+ def get_prep_value(self, value):
+ value = super(IPAddressField, self).get_prep_value(value)
+ if value is None:
+ return None
+ return six.text_type(value)
+
+ def get_internal_type(self):
+ return "IPAddressField"
+
+ def formfield(self, **kwargs):
+ defaults = {'form_class': forms.IPAddressField}
+ defaults.update(kwargs)
+ return super(IPAddressField, self).formfield(**defaults)
+
+class GenericIPAddressField(Field):
+ empty_strings_allowed = True
+ description = _("IP address")
+ default_error_messages = {}
+
+ def __init__(self, verbose_name=None, name=None, protocol='both',
+ unpack_ipv4=False, *args, **kwargs):
+ self.unpack_ipv4 = unpack_ipv4
+ self.protocol = protocol
+ self.default_validators, invalid_error_message = \
+ validators.ip_address_validators(protocol, unpack_ipv4)
+ self.default_error_messages['invalid'] = invalid_error_message
+ kwargs['max_length'] = 39
+ Field.__init__(self, verbose_name, name, *args, **kwargs)
+
+ def get_internal_type(self):
+ return "GenericIPAddressField"
+
+ def to_python(self, value):
+ if value and ':' in value:
+ return clean_ipv6_address(value,
+ self.unpack_ipv4, self.error_messages['invalid'])
+ return value
+
+ def get_db_prep_value(self, value, connection, prepared=False):
+ if not prepared:
+ value = self.get_prep_value(value)
+ return value or None
+
+ def get_prep_value(self, value):
+ if value is None:
+ return value
+ if value and ':' in value:
+ try:
+ return clean_ipv6_address(value, self.unpack_ipv4)
+ except exceptions.ValidationError:
+ pass
+ return six.text_type(value)
+
+ def formfield(self, **kwargs):
+ defaults = {
+ 'protocol': self.protocol,
+ 'form_class': forms.GenericIPAddressField,
+ }
+ defaults.update(kwargs)
+ return super(GenericIPAddressField, self).formfield(**defaults)
+
+
+class NullBooleanField(Field):
+ empty_strings_allowed = False
+ default_error_messages = {
+ 'invalid': _("'%(value)s' value must be either None, True or False."),
+ }
+ description = _("Boolean (Either True, False or None)")
+
+ def __init__(self, *args, **kwargs):
+ kwargs['null'] = True
+ kwargs['blank'] = True
+ Field.__init__(self, *args, **kwargs)
+
+ def get_internal_type(self):
+ return "NullBooleanField"
+
+ def to_python(self, value):
+ if value is None:
+ return None
+ if value in (True, False):
+ return bool(value)
+ if value in ('None',):
+ return None
+ if value in ('t', 'True', '1'):
+ return True
+ if value in ('f', 'False', '0'):
+ return False
+ raise exceptions.ValidationError(
+ self.error_messages['invalid'],
+ code='invalid',
+ params={'value': value},
+ )
+
+ def get_prep_lookup(self, lookup_type, value):
+ # Special-case handling for filters coming from a Web request (e.g. the
+ # admin interface). Only works for scalar values (not lists). If you're
+ # passing in a list, you might as well make things the right type when
+ # constructing the list.
+ if value in ('1', '0'):
+ value = bool(int(value))
+ return super(NullBooleanField, self).get_prep_lookup(lookup_type,
+ value)
+
+ def get_prep_value(self, value):
+ if value is None:
+ return None
+ return bool(value)
+
+ def formfield(self, **kwargs):
+ defaults = {
+ 'form_class': forms.NullBooleanField,
+ 'required': not self.blank,
+ 'label': capfirst(self.verbose_name),
+ 'help_text': self.help_text}
+ defaults.update(kwargs)
+ return super(NullBooleanField, self).formfield(**defaults)
+
+class PositiveIntegerField(IntegerField):
+ description = _("Positive integer")
+
+ def get_internal_type(self):
+ return "PositiveIntegerField"
+
+ def formfield(self, **kwargs):
+ defaults = {'min_value': 0}
+ defaults.update(kwargs)
+ return super(PositiveIntegerField, self).formfield(**defaults)
+
+class PositiveSmallIntegerField(IntegerField):
+ description = _("Positive small integer")
+
+ def get_internal_type(self):
+ return "PositiveSmallIntegerField"
+
+ def formfield(self, **kwargs):
+ defaults = {'min_value': 0}
+ defaults.update(kwargs)
+ return super(PositiveSmallIntegerField, self).formfield(**defaults)
+
+class SlugField(CharField):
+ default_validators = [validators.validate_slug]
+ description = _("Slug (up to %(max_length)s)")
+
+ def __init__(self, *args, **kwargs):
+ kwargs['max_length'] = kwargs.get('max_length', 50)
+ # Set db_index=True unless it's been set manually.
+ if 'db_index' not in kwargs:
+ kwargs['db_index'] = True
+ super(SlugField, self).__init__(*args, **kwargs)
+
+ def get_internal_type(self):
+ return "SlugField"
+
+ def formfield(self, **kwargs):
+ defaults = {'form_class': forms.SlugField}
+ defaults.update(kwargs)
+ return super(SlugField, self).formfield(**defaults)
+
+class SmallIntegerField(IntegerField):
+ description = _("Small integer")
+
+ def get_internal_type(self):
+ return "SmallIntegerField"
+
+class TextField(Field):
+ description = _("Text")
+
+ def get_internal_type(self):
+ return "TextField"
+
+ def get_prep_value(self, value):
+ if isinstance(value, six.string_types) or value is None:
+ return value
+ return smart_text(value)
+
+ def formfield(self, **kwargs):
+ defaults = {'widget': forms.Textarea}
+ defaults.update(kwargs)
+ return super(TextField, self).formfield(**defaults)
+
+class TimeField(Field):
+ empty_strings_allowed = False
+ default_error_messages = {
+ 'invalid': _("'%(value)s' value has an invalid format. It must be in "
+ "HH:MM[:ss[.uuuuuu]] format."),
+ 'invalid_time': _("'%(value)s' value has the correct format "
+ "(HH:MM[:ss[.uuuuuu]]) but it is an invalid time."),
+ }
+ description = _("Time")
+
+ def __init__(self, verbose_name=None, name=None, auto_now=False,
+ auto_now_add=False, **kwargs):
+ self.auto_now, self.auto_now_add = auto_now, auto_now_add
+ if auto_now or auto_now_add:
+ kwargs['editable'] = False
+ kwargs['blank'] = True
+ Field.__init__(self, verbose_name, name, **kwargs)
+
+ def get_internal_type(self):
+ return "TimeField"
+
+ def to_python(self, value):
+ if value is None:
+ return None
+ if isinstance(value, datetime.time):
+ return value
+ if isinstance(value, datetime.datetime):
+ # Not usually a good idea to pass in a datetime here (it loses
+ # information), but this can be a side-effect of interacting with a
+ # database backend (e.g. Oracle), so we'll be accommodating.
+ return value.time()
+
+ try:
+ parsed = parse_time(value)
+ if parsed is not None:
+ return parsed
+ except ValueError:
+ raise exceptions.ValidationError(
+ self.error_messages['invalid_time'],
+ code='invalid_time',
+ params={'value': value},
+ )
+
+ raise exceptions.ValidationError(
+ self.error_messages['invalid'],
+ code='invalid',
+ params={'value': value},
+ )
+
+ def pre_save(self, model_instance, add):
+ if self.auto_now or (self.auto_now_add and add):
+ value = datetime.datetime.now().time()
+ setattr(model_instance, self.attname, value)
+ return value
+ else:
+ return super(TimeField, self).pre_save(model_instance, add)
+
+ def get_prep_value(self, value):
+ return self.to_python(value)
+
+ def get_db_prep_value(self, value, connection, prepared=False):
+ # Casts times into the format expected by the backend
+ if not prepared:
+ value = self.get_prep_value(value)
+ return connection.ops.value_to_db_time(value)
+
+ def value_to_string(self, obj):
+ val = self._get_val_from_obj(obj)
+ return '' if val is None else val.isoformat()
+
+ def formfield(self, **kwargs):
+ defaults = {'form_class': forms.TimeField}
+ defaults.update(kwargs)
+ return super(TimeField, self).formfield(**defaults)
+
+class URLField(CharField):
+ default_validators = [validators.URLValidator()]
+ description = _("URL")
+
+ def __init__(self, verbose_name=None, name=None, **kwargs):
+ kwargs['max_length'] = kwargs.get('max_length', 200)
+ CharField.__init__(self, verbose_name, name, **kwargs)
+
+ def formfield(self, **kwargs):
+ # As with CharField, this will cause URL validation to be performed
+ # twice.
+ defaults = {
+ 'form_class': forms.URLField,
+ }
+ defaults.update(kwargs)
+ return super(URLField, self).formfield(**defaults)
+
+class BinaryField(Field):
+ description = _("Raw binary data")
+ empty_values = [None, b'']
+
+ def __init__(self, *args, **kwargs):
+ kwargs['editable'] = False
+ super(BinaryField, self).__init__(*args, **kwargs)
+ if self.max_length is not None:
+ self.validators.append(validators.MaxLengthValidator(self.max_length))
+
+ def get_internal_type(self):
+ return "BinaryField"
+
+ def get_default(self):
+ if self.has_default() and not callable(self.default):
+ return self.default
+ default = super(BinaryField, self).get_default()
+ if default == '':
+ return b''
+ return default
+
+ def get_db_prep_value(self, value, connection, prepared=False):
+ value = super(BinaryField, self
+ ).get_db_prep_value(value, connection, prepared)
+ if value is not None:
+ return connection.Database.Binary(value)
+ return value
+
+ def value_to_string(self, obj):
+ """Binary data is serialized as base64"""
+ return b64encode(force_bytes(self._get_val_from_obj(obj))).decode('ascii')
+
+ def to_python(self, value):
+ # If it's a string, it should be base64-encoded data
+ if isinstance(value, six.text_type):
+ return six.memoryview(b64decode(force_bytes(value)))
+ return value
diff --git a/lib/python2.7/site-packages/django/db/models/fields/files.py b/lib/python2.7/site-packages/django/db/models/fields/files.py
new file mode 100644
index 0000000..3b3c1ec
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/models/fields/files.py
@@ -0,0 +1,397 @@
+import datetime
+import os
+
+from django import forms
+from django.db.models.fields import Field
+from django.core.files.base import File
+from django.core.files.storage import default_storage
+from django.core.files.images import ImageFile
+from django.db.models import signals
+from django.utils.encoding import force_str, force_text
+from django.utils import six
+from django.utils.translation import ugettext_lazy as _
+
+class FieldFile(File):
+ def __init__(self, instance, field, name):
+ super(FieldFile, self).__init__(None, name)
+ self.instance = instance
+ self.field = field
+ self.storage = field.storage
+ self._committed = True
+
+ def __eq__(self, other):
+ # Older code may be expecting FileField values to be simple strings.
+ # By overriding the == operator, it can remain backwards compatibility.
+ if hasattr(other, 'name'):
+ return self.name == other.name
+ return self.name == other
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __hash__(self):
+ return hash(self.name)
+
+ # The standard File contains most of the necessary properties, but
+ # FieldFiles can be instantiated without a name, so that needs to
+ # be checked for here.
+
+ def _require_file(self):
+ if not self:
+ raise ValueError("The '%s' attribute has no file associated with it." % self.field.name)
+
+ def _get_file(self):
+ self._require_file()
+ if not hasattr(self, '_file') or self._file is None:
+ self._file = self.storage.open(self.name, 'rb')
+ return self._file
+
+ def _set_file(self, file):
+ self._file = file
+
+ def _del_file(self):
+ del self._file
+
+ file = property(_get_file, _set_file, _del_file)
+
+ def _get_path(self):
+ self._require_file()
+ return self.storage.path(self.name)
+ path = property(_get_path)
+
+ def _get_url(self):
+ self._require_file()
+ return self.storage.url(self.name)
+ url = property(_get_url)
+
+ def _get_size(self):
+ self._require_file()
+ if not self._committed:
+ return self.file.size
+ return self.storage.size(self.name)
+ size = property(_get_size)
+
+ def open(self, mode='rb'):
+ self._require_file()
+ self.file.open(mode)
+ # open() doesn't alter the file's contents, but it does reset the pointer
+ open.alters_data = True
+
+ # In addition to the standard File API, FieldFiles have extra methods
+ # to further manipulate the underlying file, as well as update the
+ # associated model instance.
+
+ def save(self, name, content, save=True):
+ name = self.field.generate_filename(self.instance, name)
+ self.name = self.storage.save(name, content)
+ setattr(self.instance, self.field.name, self.name)
+
+ # Update the filesize cache
+ self._size = content.size
+ self._committed = True
+
+ # Save the object because it has changed, unless save is False
+ if save:
+ self.instance.save()
+ save.alters_data = True
+
+ def delete(self, save=True):
+ if not self:
+ return
+ # Only close the file if it's already open, which we know by the
+ # presence of self._file
+ if hasattr(self, '_file'):
+ self.close()
+ del self.file
+
+ self.storage.delete(self.name)
+
+ self.name = None
+ setattr(self.instance, self.field.name, self.name)
+
+ # Delete the filesize cache
+ if hasattr(self, '_size'):
+ del self._size
+ self._committed = False
+
+ if save:
+ self.instance.save()
+ delete.alters_data = True
+
+ def _get_closed(self):
+ file = getattr(self, '_file', None)
+ return file is None or file.closed
+ closed = property(_get_closed)
+
+ def close(self):
+ file = getattr(self, '_file', None)
+ if file is not None:
+ file.close()
+
+ def __getstate__(self):
+ # FieldFile needs access to its associated model field and an instance
+ # it's attached to in order to work properly, but the only necessary
+ # data to be pickled is the file's name itself. Everything else will
+ # be restored later, by FileDescriptor below.
+ return {'name': self.name, 'closed': False, '_committed': True, '_file': None}
+
+class FileDescriptor(object):
+ """
+ The descriptor for the file attribute on the model instance. Returns a
+ FieldFile when accessed so you can do stuff like::
+
+ >>> instance.file.size
+
+ Assigns a file object on assignment so you can do::
+
+ >>> instance.file = File(...)
+
+ """
+ def __init__(self, field):
+ self.field = field
+
+ def __get__(self, instance=None, owner=None):
+ if instance is None:
+ raise AttributeError(
+ "The '%s' attribute can only be accessed from %s instances."
+ % (self.field.name, owner.__name__))
+
+ # This is slightly complicated, so worth an explanation.
+ # instance.file`needs to ultimately return some instance of `File`,
+ # probably a subclass. Additionally, this returned object needs to have
+ # the FieldFile API so that users can easily do things like
+ # instance.file.path and have that delegated to the file storage engine.
+ # Easy enough if we're strict about assignment in __set__, but if you
+ # peek below you can see that we're not. So depending on the current
+ # value of the field we have to dynamically construct some sort of
+ # "thing" to return.
+
+ # The instance dict contains whatever was originally assigned
+ # in __set__.
+ file = instance.__dict__[self.field.name]
+
+ # If this value is a string (instance.file = "path/to/file") or None
+ # then we simply wrap it with the appropriate attribute class according
+ # to the file field. [This is FieldFile for FileFields and
+ # ImageFieldFile for ImageFields; it's also conceivable that user
+ # subclasses might also want to subclass the attribute class]. This
+ # object understands how to convert a path to a file, and also how to
+ # handle None.
+ if isinstance(file, six.string_types) or file is None:
+ attr = self.field.attr_class(instance, self.field, file)
+ instance.__dict__[self.field.name] = attr
+
+ # Other types of files may be assigned as well, but they need to have
+ # the FieldFile interface added to the. Thus, we wrap any other type of
+ # File inside a FieldFile (well, the field's attr_class, which is
+ # usually FieldFile).
+ elif isinstance(file, File) and not isinstance(file, FieldFile):
+ file_copy = self.field.attr_class(instance, self.field, file.name)
+ file_copy.file = file
+ file_copy._committed = False
+ instance.__dict__[self.field.name] = file_copy
+
+ # Finally, because of the (some would say boneheaded) way pickle works,
+ # the underlying FieldFile might not actually itself have an associated
+ # file. So we need to reset the details of the FieldFile in those cases.
+ elif isinstance(file, FieldFile) and not hasattr(file, 'field'):
+ file.instance = instance
+ file.field = self.field
+ file.storage = self.field.storage
+
+ # That was fun, wasn't it?
+ return instance.__dict__[self.field.name]
+
+ def __set__(self, instance, value):
+ instance.__dict__[self.field.name] = value
+
+class FileField(Field):
+
+ # The class to wrap instance attributes in. Accessing the file object off
+ # the instance will always return an instance of attr_class.
+ attr_class = FieldFile
+
+ # The descriptor to use for accessing the attribute off of the class.
+ descriptor_class = FileDescriptor
+
+ description = _("File")
+
+ def __init__(self, verbose_name=None, name=None, upload_to='', storage=None, **kwargs):
+ for arg in ('primary_key', 'unique'):
+ if arg in kwargs:
+ raise TypeError("'%s' is not a valid argument for %s." % (arg, self.__class__))
+
+ self.storage = storage or default_storage
+ self.upload_to = upload_to
+ if callable(upload_to):
+ self.generate_filename = upload_to
+
+ kwargs['max_length'] = kwargs.get('max_length', 100)
+ super(FileField, self).__init__(verbose_name, name, **kwargs)
+
+ def get_internal_type(self):
+ return "FileField"
+
+ def get_prep_lookup(self, lookup_type, value):
+ if hasattr(value, 'name'):
+ value = value.name
+ return super(FileField, self).get_prep_lookup(lookup_type, value)
+
+ def get_prep_value(self, value):
+ "Returns field's value prepared for saving into a database."
+ # Need to convert File objects provided via a form to unicode for database insertion
+ if value is None:
+ return None
+ return six.text_type(value)
+
+ def pre_save(self, model_instance, add):
+ "Returns field's value just before saving."
+ file = super(FileField, self).pre_save(model_instance, add)
+ if file and not file._committed:
+ # Commit the file to storage prior to saving the model
+ file.save(file.name, file, save=False)
+ return file
+
+ def contribute_to_class(self, cls, name):
+ super(FileField, self).contribute_to_class(cls, name)
+ setattr(cls, self.name, self.descriptor_class(self))
+
+ def get_directory_name(self):
+ return os.path.normpath(force_text(datetime.datetime.now().strftime(force_str(self.upload_to))))
+
+ def get_filename(self, filename):
+ return os.path.normpath(self.storage.get_valid_name(os.path.basename(filename)))
+
+ def generate_filename(self, instance, filename):
+ return os.path.join(self.get_directory_name(), self.get_filename(filename))
+
+ def save_form_data(self, instance, data):
+ # Important: None means "no change", other false value means "clear"
+ # This subtle distinction (rather than a more explicit marker) is
+ # needed because we need to consume values that are also sane for a
+ # regular (non Model-) Form to find in its cleaned_data dictionary.
+ if data is not None:
+ # This value will be converted to unicode and stored in the
+ # database, so leaving False as-is is not acceptable.
+ if not data:
+ data = ''
+ setattr(instance, self.name, data)
+
+ def formfield(self, **kwargs):
+ defaults = {'form_class': forms.FileField, 'max_length': self.max_length}
+ # If a file has been provided previously, then the form doesn't require
+ # that a new file is provided this time.
+ # The code to mark the form field as not required is used by
+ # form_for_instance, but can probably be removed once form_for_instance
+ # is gone. ModelForm uses a different method to check for an existing file.
+ if 'initial' in kwargs:
+ defaults['required'] = False
+ defaults.update(kwargs)
+ return super(FileField, self).formfield(**defaults)
+
+class ImageFileDescriptor(FileDescriptor):
+ """
+ Just like the FileDescriptor, but for ImageFields. The only difference is
+ assigning the width/height to the width_field/height_field, if appropriate.
+ """
+ def __set__(self, instance, value):
+ previous_file = instance.__dict__.get(self.field.name)
+ super(ImageFileDescriptor, self).__set__(instance, value)
+
+ # To prevent recalculating image dimensions when we are instantiating
+ # an object from the database (bug #11084), only update dimensions if
+ # the field had a value before this assignment. Since the default
+ # value for FileField subclasses is an instance of field.attr_class,
+ # previous_file will only be None when we are called from
+ # Model.__init__(). The ImageField.update_dimension_fields method
+ # hooked up to the post_init signal handles the Model.__init__() cases.
+ # Assignment happening outside of Model.__init__() will trigger the
+ # update right here.
+ if previous_file is not None:
+ self.field.update_dimension_fields(instance, force=True)
+
+class ImageFieldFile(ImageFile, FieldFile):
+
+ def delete(self, save=True):
+ # Clear the image dimensions cache
+ if hasattr(self, '_dimensions_cache'):
+ del self._dimensions_cache
+ super(ImageFieldFile, self).delete(save)
+
+class ImageField(FileField):
+ attr_class = ImageFieldFile
+ descriptor_class = ImageFileDescriptor
+ description = _("Image")
+
+ def __init__(self, verbose_name=None, name=None, width_field=None,
+ height_field=None, **kwargs):
+ self.width_field, self.height_field = width_field, height_field
+ super(ImageField, self).__init__(verbose_name, name, **kwargs)
+
+ def contribute_to_class(self, cls, name):
+ super(ImageField, self).contribute_to_class(cls, name)
+ # Attach update_dimension_fields so that dimension fields declared
+ # after their corresponding image field don't stay cleared by
+ # Model.__init__, see bug #11196.
+ signals.post_init.connect(self.update_dimension_fields, sender=cls)
+
+ def update_dimension_fields(self, instance, force=False, *args, **kwargs):
+ """
+ Updates field's width and height fields, if defined.
+
+ This method is hooked up to model's post_init signal to update
+ dimensions after instantiating a model instance. However, dimensions
+ won't be updated if the dimensions fields are already populated. This
+ avoids unnecessary recalculation when loading an object from the
+ database.
+
+ Dimensions can be forced to update with force=True, which is how
+ ImageFileDescriptor.__set__ calls this method.
+ """
+ # Nothing to update if the field doesn't have have dimension fields.
+ has_dimension_fields = self.width_field or self.height_field
+ if not has_dimension_fields:
+ return
+
+ # getattr will call the ImageFileDescriptor's __get__ method, which
+ # coerces the assigned value into an instance of self.attr_class
+ # (ImageFieldFile in this case).
+ file = getattr(instance, self.attname)
+
+ # Nothing to update if we have no file and not being forced to update.
+ if not file and not force:
+ return
+
+ dimension_fields_filled = not(
+ (self.width_field and not getattr(instance, self.width_field))
+ or (self.height_field and not getattr(instance, self.height_field))
+ )
+ # When both dimension fields have values, we are most likely loading
+ # data from the database or updating an image field that already had
+ # an image stored. In the first case, we don't want to update the
+ # dimension fields because we are already getting their values from the
+ # database. In the second case, we do want to update the dimensions
+ # fields and will skip this return because force will be True since we
+ # were called from ImageFileDescriptor.__set__.
+ if dimension_fields_filled and not force:
+ return
+
+ # file should be an instance of ImageFieldFile or should be None.
+ if file:
+ width = file.width
+ height = file.height
+ else:
+ # No file, so clear dimensions fields.
+ width = None
+ height = None
+
+ # Update the width and height fields.
+ if self.width_field:
+ setattr(instance, self.width_field, width)
+ if self.height_field:
+ setattr(instance, self.height_field, height)
+
+ def formfield(self, **kwargs):
+ defaults = {'form_class': forms.ImageField}
+ defaults.update(kwargs)
+ return super(ImageField, self).formfield(**defaults)
diff --git a/lib/python2.7/site-packages/django/db/models/fields/proxy.py b/lib/python2.7/site-packages/django/db/models/fields/proxy.py
new file mode 100644
index 0000000..c0cc873
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/models/fields/proxy.py
@@ -0,0 +1,17 @@
+"""
+Field-like classes that aren't really fields. It's easier to use objects that
+have the same attributes as fields sometimes (avoids a lot of special casing).
+"""
+
+from django.db.models import fields
+
+class OrderWrt(fields.IntegerField):
+ """
+ A proxy for the _order database field that is used when
+ Meta.order_with_respect_to is specified.
+ """
+
+ def __init__(self, *args, **kwargs):
+ kwargs['name'] = '_order'
+ kwargs['editable'] = False
+ super(OrderWrt, self).__init__(*args, **kwargs)
diff --git a/lib/python2.7/site-packages/django/db/models/fields/related.py b/lib/python2.7/site-packages/django/db/models/fields/related.py
new file mode 100644
index 0000000..a4bc374
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/models/fields/related.py
@@ -0,0 +1,1545 @@
+from operator import attrgetter
+
+from django.db import connection, connections, router
+from django.db.backends import util
+from django.db.models import signals, get_model
+from django.db.models.fields import (AutoField, Field, IntegerField,
+ PositiveIntegerField, PositiveSmallIntegerField, FieldDoesNotExist)
+from django.db.models.related import RelatedObject, PathInfo
+from django.db.models.query import QuerySet
+from django.db.models.deletion import CASCADE
+from django.utils.encoding import smart_text
+from django.utils import six
+from django.utils.deprecation import RenameMethodsBase
+from django.utils.translation import ugettext_lazy as _
+from django.utils.functional import curry, cached_property
+from django.core import exceptions
+from django import forms
+
+RECURSIVE_RELATIONSHIP_CONSTANT = 'self'
+
+pending_lookups = {}
+
+
+def add_lazy_relation(cls, field, relation, operation):
+ """
+ Adds a lookup on ``cls`` when a related field is defined using a string,
+ i.e.::
+
+ class MyModel(Model):
+ fk = ForeignKey("AnotherModel")
+
+ This string can be:
+
+ * RECURSIVE_RELATIONSHIP_CONSTANT (i.e. "self") to indicate a recursive
+ relation.
+
+ * The name of a model (i.e "AnotherModel") to indicate another model in
+ the same app.
+
+ * An app-label and model name (i.e. "someapp.AnotherModel") to indicate
+ another model in a different app.
+
+ If the other model hasn't yet been loaded -- almost a given if you're using
+ lazy relationships -- then the relation won't be set up until the
+ class_prepared signal fires at the end of model initialization.
+
+ operation is the work that must be performed once the relation can be resolved.
+ """
+ # Check for recursive relations
+ if relation == RECURSIVE_RELATIONSHIP_CONSTANT:
+ app_label = cls._meta.app_label
+ model_name = cls.__name__
+
+ else:
+ # Look for an "app.Model" relation
+
+ if isinstance(relation, six.string_types):
+ try:
+ app_label, model_name = relation.split(".")
+ except ValueError:
+ # If we can't split, assume a model in current app
+ app_label = cls._meta.app_label
+ model_name = relation
+ else:
+ # it's actually a model class
+ app_label = relation._meta.app_label
+ model_name = relation._meta.object_name
+
+ # Try to look up the related model, and if it's already loaded resolve the
+ # string right away. If get_model returns None, it means that the related
+ # model isn't loaded yet, so we need to pend the relation until the class
+ # is prepared.
+ model = get_model(app_label, model_name,
+ seed_cache=False, only_installed=False)
+ if model:
+ operation(field, model, cls)
+ else:
+ key = (app_label, model_name)
+ value = (cls, field, operation)
+ pending_lookups.setdefault(key, []).append(value)
+
+
+def do_pending_lookups(sender, **kwargs):
+ """
+ Handle any pending relations to the sending model. Sent from class_prepared.
+ """
+ key = (sender._meta.app_label, sender.__name__)
+ for cls, field, operation in pending_lookups.pop(key, []):
+ operation(field, sender, cls)
+
+signals.class_prepared.connect(do_pending_lookups)
+
+
+#HACK
+class RelatedField(Field):
+ def db_type(self, connection):
+ '''By default related field will not have a column
+ as it relates columns to another table'''
+ return None
+
+ def contribute_to_class(self, cls, name, virtual_only=False):
+ sup = super(RelatedField, self)
+
+ # Store the opts for related_query_name()
+ self.opts = cls._meta
+
+ if hasattr(sup, 'contribute_to_class'):
+ sup.contribute_to_class(cls, name, virtual_only=virtual_only)
+
+ if not cls._meta.abstract and self.rel.related_name:
+ related_name = self.rel.related_name % {
+ 'class': cls.__name__.lower(),
+ 'app_label': cls._meta.app_label.lower()
+ }
+ self.rel.related_name = related_name
+ other = self.rel.to
+ if isinstance(other, six.string_types) or other._meta.pk is None:
+ def resolve_related_class(field, model, cls):
+ field.rel.to = model
+ field.do_related_class(model, cls)
+ add_lazy_relation(cls, self, other, resolve_related_class)
+ else:
+ self.do_related_class(other, cls)
+
+ def set_attributes_from_rel(self):
+ self.name = self.name or (self.rel.to._meta.model_name + '_' + self.rel.to._meta.pk.name)
+ if self.verbose_name is None:
+ self.verbose_name = self.rel.to._meta.verbose_name
+ self.rel.set_field_name()
+
+ def do_related_class(self, other, cls):
+ self.set_attributes_from_rel()
+ self.related = RelatedObject(other, cls, self)
+ if not cls._meta.abstract:
+ self.contribute_to_related_class(other, self.related)
+
+ def related_query_name(self):
+ # This method defines the name that can be used to identify this
+ # related object in a table-spanning query. It uses the lower-cased
+ # object_name by default, but this can be overridden with the
+ # "related_name" option.
+ return self.rel.related_query_name or self.rel.related_name or self.opts.model_name
+
+
+class RenameRelatedObjectDescriptorMethods(RenameMethodsBase):
+ renamed_methods = (
+ ('get_query_set', 'get_queryset', PendingDeprecationWarning),
+ ('get_prefetch_query_set', 'get_prefetch_queryset', PendingDeprecationWarning),
+ )
+
+
+class SingleRelatedObjectDescriptor(six.with_metaclass(RenameRelatedObjectDescriptorMethods)):
+ # This class provides the functionality that makes the related-object
+ # managers available as attributes on a model class, for fields that have
+ # a single "remote" value, on the class pointed to by a related field.
+ # In the example "place.restaurant", the restaurant attribute is a
+ # SingleRelatedObjectDescriptor instance.
+ def __init__(self, related):
+ self.related = related
+ self.cache_name = related.get_cache_name()
+
+ def is_cached(self, instance):
+ return hasattr(instance, self.cache_name)
+
+ def get_queryset(self, **db_hints):
+ db = router.db_for_read(self.related.model, **db_hints)
+ return self.related.model._base_manager.using(db)
+
+ def get_prefetch_queryset(self, instances):
+ rel_obj_attr = attrgetter(self.related.field.attname)
+ instance_attr = lambda obj: obj._get_pk_val()
+ instances_dict = dict((instance_attr(inst), inst) for inst in instances)
+ query = {'%s__in' % self.related.field.name: instances}
+ qs = self.get_queryset(instance=instances[0]).filter(**query)
+ # Since we're going to assign directly in the cache,
+ # we must manage the reverse relation cache manually.
+ rel_obj_cache_name = self.related.field.get_cache_name()
+ for rel_obj in qs:
+ instance = instances_dict[rel_obj_attr(rel_obj)]
+ setattr(rel_obj, rel_obj_cache_name, instance)
+ return qs, rel_obj_attr, instance_attr, True, self.cache_name
+
+ def __get__(self, instance, instance_type=None):
+ if instance is None:
+ return self
+ try:
+ rel_obj = getattr(instance, self.cache_name)
+ except AttributeError:
+ related_pk = instance._get_pk_val()
+ if related_pk is None:
+ rel_obj = None
+ else:
+ params = {}
+ for lh_field, rh_field in self.related.field.related_fields:
+ params['%s__%s' % (self.related.field.name, rh_field.name)] = getattr(instance, rh_field.attname)
+ try:
+ rel_obj = self.get_queryset(instance=instance).get(**params)
+ except self.related.model.DoesNotExist:
+ rel_obj = None
+ else:
+ setattr(rel_obj, self.related.field.get_cache_name(), instance)
+ setattr(instance, self.cache_name, rel_obj)
+ if rel_obj is None:
+ raise self.related.model.DoesNotExist("%s has no %s." % (
+ instance.__class__.__name__,
+ self.related.get_accessor_name()))
+ else:
+ return rel_obj
+
+ def __set__(self, instance, value):
+ # The similarity of the code below to the code in
+ # ReverseSingleRelatedObjectDescriptor is annoying, but there's a bunch
+ # of small differences that would make a common base class convoluted.
+
+ # If null=True, we can assign null here, but otherwise the value needs
+ # to be an instance of the related class.
+ if value is None and self.related.field.null == False:
+ raise ValueError('Cannot assign None: "%s.%s" does not allow null values.' %
+ (instance._meta.object_name, self.related.get_accessor_name()))
+ elif value is not None and not isinstance(value, self.related.model):
+ raise ValueError('Cannot assign "%r": "%s.%s" must be a "%s" instance.' %
+ (value, instance._meta.object_name,
+ self.related.get_accessor_name(), self.related.opts.object_name))
+ elif value is not None:
+ if instance._state.db is None:
+ instance._state.db = router.db_for_write(instance.__class__, instance=value)
+ elif value._state.db is None:
+ value._state.db = router.db_for_write(value.__class__, instance=instance)
+ elif value._state.db is not None and instance._state.db is not None:
+ if not router.allow_relation(value, instance):
+ raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value)
+
+ related_pk = tuple([getattr(instance, field.attname) for field in self.related.field.foreign_related_fields])
+ if None in related_pk:
+ raise ValueError('Cannot assign "%r": "%s" instance isn\'t saved in the database.' %
+ (value, instance._meta.object_name))
+
+ # Set the value of the related field to the value of the related object's related field
+ for index, field in enumerate(self.related.field.local_related_fields):
+ setattr(value, field.attname, related_pk[index])
+
+ # Since we already know what the related object is, seed the related
+ # object caches now, too. This avoids another db hit if you get the
+ # object you just set.
+ setattr(instance, self.cache_name, value)
+ setattr(value, self.related.field.get_cache_name(), instance)
+
+
+class ReverseSingleRelatedObjectDescriptor(six.with_metaclass(RenameRelatedObjectDescriptorMethods)):
+ # This class provides the functionality that makes the related-object
+ # managers available as attributes on a model class, for fields that have
+ # a single "remote" value, on the class that defines the related field.
+ # In the example "choice.poll", the poll attribute is a
+ # ReverseSingleRelatedObjectDescriptor instance.
+ def __init__(self, field_with_rel):
+ self.field = field_with_rel
+ self.cache_name = self.field.get_cache_name()
+
+ def is_cached(self, instance):
+ return hasattr(instance, self.cache_name)
+
+ def get_queryset(self, **db_hints):
+ db = router.db_for_read(self.field.rel.to, **db_hints)
+ rel_mgr = self.field.rel.to._default_manager
+ # If the related manager indicates that it should be used for
+ # related fields, respect that.
+ if getattr(rel_mgr, 'use_for_related_fields', False):
+ return rel_mgr.using(db)
+ else:
+ return QuerySet(self.field.rel.to).using(db)
+
+ def get_prefetch_queryset(self, instances):
+ rel_obj_attr = self.field.get_foreign_related_value
+ instance_attr = self.field.get_local_related_value
+ instances_dict = dict((instance_attr(inst), inst) for inst in instances)
+ related_field = self.field.foreign_related_fields[0]
+
+ # FIXME: This will need to be revisited when we introduce support for
+ # composite fields. In the meantime we take this practical approach to
+ # solve a regression on 1.6 when the reverse manager in hidden
+ # (related_name ends with a '+'). Refs #21410.
+ # The check for len(...) == 1 is a special case that allows the query
+ # to be join-less and smaller. Refs #21760.
+ if self.field.rel.is_hidden() or len(self.field.foreign_related_fields) == 1:
+ query = {'%s__in' % related_field.name: set(instance_attr(inst)[0] for inst in instances)}
+ else:
+ query = {'%s__in' % self.field.related_query_name(): instances}
+
+ qs = self.get_queryset(instance=instances[0]).filter(**query)
+ # Since we're going to assign directly in the cache,
+ # we must manage the reverse relation cache manually.
+ if not self.field.rel.multiple:
+ rel_obj_cache_name = self.field.related.get_cache_name()
+ for rel_obj in qs:
+ instance = instances_dict[rel_obj_attr(rel_obj)]
+ setattr(rel_obj, rel_obj_cache_name, instance)
+ return qs, rel_obj_attr, instance_attr, True, self.cache_name
+
+ def __get__(self, instance, instance_type=None):
+ if instance is None:
+ return self
+ try:
+ rel_obj = getattr(instance, self.cache_name)
+ except AttributeError:
+ val = self.field.get_local_related_value(instance)
+ if None in val:
+ rel_obj = None
+ else:
+ params = dict(
+ (rh_field.attname, getattr(instance, lh_field.attname))
+ for lh_field, rh_field in self.field.related_fields)
+ qs = self.get_queryset(instance=instance)
+ extra_filter = self.field.get_extra_descriptor_filter(instance)
+ if isinstance(extra_filter, dict):
+ params.update(extra_filter)
+ qs = qs.filter(**params)
+ else:
+ qs = qs.filter(extra_filter, **params)
+ # Assuming the database enforces foreign keys, this won't fail.
+ rel_obj = qs.get()
+ if not self.field.rel.multiple:
+ setattr(rel_obj, self.field.related.get_cache_name(), instance)
+ setattr(instance, self.cache_name, rel_obj)
+ if rel_obj is None and not self.field.null:
+ raise self.field.rel.to.DoesNotExist(
+ "%s has no %s." % (self.field.model.__name__, self.field.name))
+ else:
+ return rel_obj
+
+ def __set__(self, instance, value):
+ # If null=True, we can assign null here, but otherwise the value needs
+ # to be an instance of the related class.
+ if value is None and self.field.null == False:
+ raise ValueError('Cannot assign None: "%s.%s" does not allow null values.' %
+ (instance._meta.object_name, self.field.name))
+ elif value is not None and not isinstance(value, self.field.rel.to):
+ raise ValueError('Cannot assign "%r": "%s.%s" must be a "%s" instance.' %
+ (value, instance._meta.object_name,
+ self.field.name, self.field.rel.to._meta.object_name))
+ elif value is not None:
+ if instance._state.db is None:
+ instance._state.db = router.db_for_write(instance.__class__, instance=value)
+ elif value._state.db is None:
+ value._state.db = router.db_for_write(value.__class__, instance=instance)
+ elif value._state.db is not None and instance._state.db is not None:
+ if not router.allow_relation(value, instance):
+ raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value)
+
+ # If we're setting the value of a OneToOneField to None, we need to clear
+ # out the cache on any old related object. Otherwise, deleting the
+ # previously-related object will also cause this object to be deleted,
+ # which is wrong.
+ if value is None:
+ # Look up the previously-related object, which may still be available
+ # since we've not yet cleared out the related field.
+ # Use the cache directly, instead of the accessor; if we haven't
+ # populated the cache, then we don't care - we're only accessing
+ # the object to invalidate the accessor cache, so there's no
+ # need to populate the cache just to expire it again.
+ related = getattr(instance, self.cache_name, None)
+
+ # If we've got an old related object, we need to clear out its
+ # cache. This cache also might not exist if the related object
+ # hasn't been accessed yet.
+ if related is not None:
+ setattr(related, self.field.related.get_cache_name(), None)
+
+ # Set the value of the related field
+ for lh_field, rh_field in self.field.related_fields:
+ try:
+ setattr(instance, lh_field.attname, getattr(value, rh_field.attname))
+ except AttributeError:
+ setattr(instance, lh_field.attname, None)
+
+ # Since we already know what the related object is, seed the related
+ # object caches now, too. This avoids another db hit if you get the
+ # object you just set.
+ setattr(instance, self.cache_name, value)
+ if value is not None and not self.field.rel.multiple:
+ setattr(value, self.field.related.get_cache_name(), instance)
+
+
+class ForeignRelatedObjectsDescriptor(object):
+ # This class provides the functionality that makes the related-object
+ # managers available as attributes on a model class, for fields that have
+ # multiple "remote" values and have a ForeignKey pointed at them by
+ # some other model. In the example "poll.choice_set", the choice_set
+ # attribute is a ForeignRelatedObjectsDescriptor instance.
+ def __init__(self, related):
+ self.related = related # RelatedObject instance
+
+ def __get__(self, instance, instance_type=None):
+ if instance is None:
+ return self
+
+ return self.related_manager_cls(instance)
+
+ def __set__(self, instance, value):
+ manager = self.__get__(instance)
+ # If the foreign key can support nulls, then completely clear the related set.
+ # Otherwise, just move the named objects into the set.
+ if self.related.field.null:
+ manager.clear()
+ manager.add(*value)
+
+ @cached_property
+ def related_manager_cls(self):
+ # Dynamically create a class that subclasses the related model's default
+ # manager.
+ superclass = self.related.model._default_manager.__class__
+ rel_field = self.related.field
+ rel_model = self.related.model
+
+ class RelatedManager(superclass):
+ def __init__(self, instance):
+ super(RelatedManager, self).__init__()
+ self.instance = instance
+ self.core_filters= {'%s__exact' % rel_field.name: instance}
+ self.model = rel_model
+
+ def get_queryset(self):
+ try:
+ return self.instance._prefetched_objects_cache[rel_field.related_query_name()]
+ except (AttributeError, KeyError):
+ db = self._db or router.db_for_read(self.model, instance=self.instance)
+ qs = super(RelatedManager, self).get_queryset().using(db).filter(**self.core_filters)
+ empty_strings_as_null = connections[db].features.interprets_empty_strings_as_nulls
+ for field in rel_field.foreign_related_fields:
+ val = getattr(self.instance, field.attname)
+ if val is None or (val == '' and empty_strings_as_null):
+ return qs.none()
+ qs._known_related_objects = {rel_field: {self.instance.pk: self.instance}}
+ return qs
+
+ def get_prefetch_queryset(self, instances):
+ rel_obj_attr = rel_field.get_local_related_value
+ instance_attr = rel_field.get_foreign_related_value
+ instances_dict = dict((instance_attr(inst), inst) for inst in instances)
+ db = self._db or router.db_for_read(self.model, instance=instances[0])
+ query = {'%s__in' % rel_field.name: instances}
+ qs = super(RelatedManager, self).get_queryset().using(db).filter(**query)
+ # Since we just bypassed this class' get_queryset(), we must manage
+ # the reverse relation manually.
+ for rel_obj in qs:
+ instance = instances_dict[rel_obj_attr(rel_obj)]
+ setattr(rel_obj, rel_field.name, instance)
+ cache_name = rel_field.related_query_name()
+ return qs, rel_obj_attr, instance_attr, False, cache_name
+
+ def add(self, *objs):
+ for obj in objs:
+ if not isinstance(obj, self.model):
+ raise TypeError("'%s' instance expected, got %r" % (self.model._meta.object_name, obj))
+ setattr(obj, rel_field.name, self.instance)
+ obj.save()
+ add.alters_data = True
+
+ def create(self, **kwargs):
+ kwargs[rel_field.name] = self.instance
+ db = router.db_for_write(self.model, instance=self.instance)
+ return super(RelatedManager, self.db_manager(db)).create(**kwargs)
+ create.alters_data = True
+
+ def get_or_create(self, **kwargs):
+ # Update kwargs with the related object that this
+ # ForeignRelatedObjectsDescriptor knows about.
+ kwargs[rel_field.name] = self.instance
+ db = router.db_for_write(self.model, instance=self.instance)
+ return super(RelatedManager, self.db_manager(db)).get_or_create(**kwargs)
+ get_or_create.alters_data = True
+
+ # remove() and clear() are only provided if the ForeignKey can have a value of null.
+ if rel_field.null:
+ def remove(self, *objs):
+ val = rel_field.get_foreign_related_value(self.instance)
+ for obj in objs:
+ # Is obj actually part of this descriptor set?
+ if rel_field.get_local_related_value(obj) == val:
+ setattr(obj, rel_field.name, None)
+ obj.save()
+ else:
+ raise rel_field.rel.to.DoesNotExist("%r is not related to %r." % (obj, self.instance))
+ remove.alters_data = True
+
+ def clear(self):
+ self.update(**{rel_field.name: None})
+ clear.alters_data = True
+
+ return RelatedManager
+
+
+def create_many_related_manager(superclass, rel):
+ """Creates a manager that subclasses 'superclass' (which is a Manager)
+ and adds behavior for many-to-many related objects."""
+ class ManyRelatedManager(superclass):
+ def __init__(self, model=None, query_field_name=None, instance=None, symmetrical=None,
+ source_field_name=None, target_field_name=None, reverse=False,
+ through=None, prefetch_cache_name=None):
+ super(ManyRelatedManager, self).__init__()
+ self.model = model
+ self.query_field_name = query_field_name
+
+ source_field = through._meta.get_field(source_field_name)
+ source_related_fields = source_field.related_fields
+
+ self.core_filters = {}
+ for lh_field, rh_field in source_related_fields:
+ self.core_filters['%s__%s' % (query_field_name, rh_field.name)] = getattr(instance, rh_field.attname)
+
+ self.instance = instance
+ self.symmetrical = symmetrical
+ self.source_field = source_field
+ self.source_field_name = source_field_name
+ self.target_field_name = target_field_name
+ self.reverse = reverse
+ self.through = through
+ self.prefetch_cache_name = prefetch_cache_name
+ self.related_val = source_field.get_foreign_related_value(instance)
+ # Used for single column related auto created models
+ self._fk_val = self.related_val[0]
+ if None in self.related_val:
+ raise ValueError('"%r" needs to have a value for field "%s" before '
+ 'this many-to-many relationship can be used.' %
+ (instance, source_field_name))
+ # Even if this relation is not to pk, we require still pk value.
+ # The wish is that the instance has been already saved to DB,
+ # although having a pk value isn't a guarantee of that.
+ if instance.pk is None:
+ raise ValueError("%r instance needs to have a primary key value before "
+ "a many-to-many relationship can be used." %
+ instance.__class__.__name__)
+
+
+ def _get_fk_val(self, obj, field_name):
+ """
+ Returns the correct value for this relationship's foreign key. This
+ might be something else than pk value when to_field is used.
+ """
+ fk = self.through._meta.get_field(field_name)
+ if fk.rel.field_name and fk.rel.field_name != fk.rel.to._meta.pk.attname:
+ attname = fk.rel.get_related_field().get_attname()
+ return fk.get_prep_lookup('exact', getattr(obj, attname))
+ else:
+ return obj.pk
+
+ def get_queryset(self):
+ try:
+ return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
+ except (AttributeError, KeyError):
+ db = self._db or router.db_for_read(self.instance.__class__, instance=self.instance)
+ return super(ManyRelatedManager, self).get_queryset().using(db)._next_is_sticky().filter(**self.core_filters)
+
+ def get_prefetch_queryset(self, instances):
+ instance = instances[0]
+ db = self._db or router.db_for_read(instance.__class__, instance=instance)
+ query = {'%s__in' % self.query_field_name: instances}
+ qs = super(ManyRelatedManager, self).get_queryset().using(db)._next_is_sticky().filter(**query)
+
+ # M2M: need to annotate the query in order to get the primary model
+ # that the secondary model was actually related to. We know that
+ # there will already be a join on the join table, so we can just add
+ # the select.
+
+ # For non-autocreated 'through' models, can't assume we are
+ # dealing with PK values.
+ fk = self.through._meta.get_field(self.source_field_name)
+ join_table = self.through._meta.db_table
+ connection = connections[db]
+ qn = connection.ops.quote_name
+ qs = qs.extra(select=dict(
+ ('_prefetch_related_val_%s' % f.attname,
+ '%s.%s' % (qn(join_table), qn(f.column))) for f in fk.local_related_fields))
+ return (qs,
+ lambda result: tuple([getattr(result, '_prefetch_related_val_%s' % f.attname) for f in fk.local_related_fields]),
+ lambda inst: tuple([getattr(inst, f.attname) for f in fk.foreign_related_fields]),
+ False,
+ self.prefetch_cache_name)
+
+ # If the ManyToMany relation has an intermediary model,
+ # the add and remove methods do not exist.
+ if rel.through._meta.auto_created:
+ def add(self, *objs):
+ self._add_items(self.source_field_name, self.target_field_name, *objs)
+
+ # If this is a symmetrical m2m relation to self, add the mirror entry in the m2m table
+ if self.symmetrical:
+ self._add_items(self.target_field_name, self.source_field_name, *objs)
+ add.alters_data = True
+
+ def remove(self, *objs):
+ self._remove_items(self.source_field_name, self.target_field_name, *objs)
+
+ # If this is a symmetrical m2m relation to self, remove the mirror entry in the m2m table
+ if self.symmetrical:
+ self._remove_items(self.target_field_name, self.source_field_name, *objs)
+ remove.alters_data = True
+
+ def clear(self):
+ self._clear_items(self.source_field_name)
+
+ # If this is a symmetrical m2m relation to self, clear the mirror entry in the m2m table
+ if self.symmetrical:
+ self._clear_items(self.target_field_name)
+ clear.alters_data = True
+
+ def create(self, **kwargs):
+ # This check needs to be done here, since we can't later remove this
+ # from the method lookup table, as we do with add and remove.
+ if not self.through._meta.auto_created:
+ opts = self.through._meta
+ raise AttributeError("Cannot use create() on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name))
+ db = router.db_for_write(self.instance.__class__, instance=self.instance)
+ new_obj = super(ManyRelatedManager, self.db_manager(db)).create(**kwargs)
+ self.add(new_obj)
+ return new_obj
+ create.alters_data = True
+
+ def get_or_create(self, **kwargs):
+ db = router.db_for_write(self.instance.__class__, instance=self.instance)
+ obj, created = \
+ super(ManyRelatedManager, self.db_manager(db)).get_or_create(**kwargs)
+ # We only need to add() if created because if we got an object back
+ # from get() then the relationship already exists.
+ if created:
+ self.add(obj)
+ return obj, created
+ get_or_create.alters_data = True
+
+ def _add_items(self, source_field_name, target_field_name, *objs):
+ # source_field_name: the PK fieldname in join table for the source object
+ # target_field_name: the PK fieldname in join table for the target object
+ # *objs - objects to add. Either object instances, or primary keys of object instances.
+
+ # If there aren't any objects, there is nothing to do.
+ from django.db.models import Model
+ if objs:
+ new_ids = set()
+ for obj in objs:
+ if isinstance(obj, self.model):
+ if not router.allow_relation(obj, self.instance):
+ raise ValueError('Cannot add "%r": instance is on database "%s", value is on database "%s"' %
+ (obj, self.instance._state.db, obj._state.db))
+ fk_val = self._get_fk_val(obj, target_field_name)
+ if fk_val is None:
+ raise ValueError('Cannot add "%r": the value for field "%s" is None' %
+ (obj, target_field_name))
+ new_ids.add(self._get_fk_val(obj, target_field_name))
+ elif isinstance(obj, Model):
+ raise TypeError("'%s' instance expected, got %r" % (self.model._meta.object_name, obj))
+ else:
+ new_ids.add(obj)
+ db = router.db_for_write(self.through, instance=self.instance)
+ vals = self.through._default_manager.using(db).values_list(target_field_name, flat=True)
+ vals = vals.filter(**{
+ source_field_name: self._fk_val,
+ '%s__in' % target_field_name: new_ids,
+ })
+ new_ids = new_ids - set(vals)
+
+ if self.reverse or source_field_name == self.source_field_name:
+ # Don't send the signal when we are inserting the
+ # duplicate data row for symmetrical reverse entries.
+ signals.m2m_changed.send(sender=self.through, action='pre_add',
+ instance=self.instance, reverse=self.reverse,
+ model=self.model, pk_set=new_ids, using=db)
+ # Add the ones that aren't there already
+ self.through._default_manager.using(db).bulk_create([
+ self.through(**{
+ '%s_id' % source_field_name: self._fk_val,
+ '%s_id' % target_field_name: obj_id,
+ })
+ for obj_id in new_ids
+ ])
+
+ if self.reverse or source_field_name == self.source_field_name:
+ # Don't send the signal when we are inserting the
+ # duplicate data row for symmetrical reverse entries.
+ signals.m2m_changed.send(sender=self.through, action='post_add',
+ instance=self.instance, reverse=self.reverse,
+ model=self.model, pk_set=new_ids, using=db)
+
+ def _remove_items(self, source_field_name, target_field_name, *objs):
+ # source_field_name: the PK colname in join table for the source object
+ # target_field_name: the PK colname in join table for the target object
+ # *objs - objects to remove
+
+ # If there aren't any objects, there is nothing to do.
+ if objs:
+ # Check that all the objects are of the right type
+ old_ids = set()
+ for obj in objs:
+ if isinstance(obj, self.model):
+ old_ids.add(self._get_fk_val(obj, target_field_name))
+ else:
+ old_ids.add(obj)
+ # Work out what DB we're operating on
+ db = router.db_for_write(self.through, instance=self.instance)
+ # Send a signal to the other end if need be.
+ if self.reverse or source_field_name == self.source_field_name:
+ # Don't send the signal when we are deleting the
+ # duplicate data row for symmetrical reverse entries.
+ signals.m2m_changed.send(sender=self.through, action="pre_remove",
+ instance=self.instance, reverse=self.reverse,
+ model=self.model, pk_set=old_ids, using=db)
+ # Remove the specified objects from the join table
+ self.through._default_manager.using(db).filter(**{
+ source_field_name: self._fk_val,
+ '%s__in' % target_field_name: old_ids
+ }).delete()
+ if self.reverse or source_field_name == self.source_field_name:
+ # Don't send the signal when we are deleting the
+ # duplicate data row for symmetrical reverse entries.
+ signals.m2m_changed.send(sender=self.through, action="post_remove",
+ instance=self.instance, reverse=self.reverse,
+ model=self.model, pk_set=old_ids, using=db)
+
+ def _clear_items(self, source_field_name):
+ db = router.db_for_write(self.through, instance=self.instance)
+ # source_field_name: the PK colname in join table for the source object
+ if self.reverse or source_field_name == self.source_field_name:
+ # Don't send the signal when we are clearing the
+ # duplicate data rows for symmetrical reverse entries.
+ signals.m2m_changed.send(sender=self.through, action="pre_clear",
+ instance=self.instance, reverse=self.reverse,
+ model=self.model, pk_set=None, using=db)
+ self.through._default_manager.using(db).filter(**{
+ source_field_name: self.related_val
+ }).delete()
+ if self.reverse or source_field_name == self.source_field_name:
+ # Don't send the signal when we are clearing the
+ # duplicate data rows for symmetrical reverse entries.
+ signals.m2m_changed.send(sender=self.through, action="post_clear",
+ instance=self.instance, reverse=self.reverse,
+ model=self.model, pk_set=None, using=db)
+
+ return ManyRelatedManager
+
+
+class ManyRelatedObjectsDescriptor(object):
+ # This class provides the functionality that makes the related-object
+ # managers available as attributes on a model class, for fields that have
+ # multiple "remote" values and have a ManyToManyField pointed at them by
+ # some other model (rather than having a ManyToManyField themselves).
+ # In the example "publication.article_set", the article_set attribute is a
+ # ManyRelatedObjectsDescriptor instance.
+ def __init__(self, related):
+ self.related = related # RelatedObject instance
+
+ @cached_property
+ def related_manager_cls(self):
+ # Dynamically create a class that subclasses the related
+ # model's default manager.
+ return create_many_related_manager(
+ self.related.model._default_manager.__class__,
+ self.related.field.rel
+ )
+
+ def __get__(self, instance, instance_type=None):
+ if instance is None:
+ return self
+
+ rel_model = self.related.model
+
+ manager = self.related_manager_cls(
+ model=rel_model,
+ query_field_name=self.related.field.name,
+ prefetch_cache_name=self.related.field.related_query_name(),
+ instance=instance,
+ symmetrical=False,
+ source_field_name=self.related.field.m2m_reverse_field_name(),
+ target_field_name=self.related.field.m2m_field_name(),
+ reverse=True,
+ through=self.related.field.rel.through,
+ )
+
+ return manager
+
+ def __set__(self, instance, value):
+ if not self.related.field.rel.through._meta.auto_created:
+ opts = self.related.field.rel.through._meta
+ raise AttributeError("Cannot set values on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name))
+
+ manager = self.__get__(instance)
+ manager.clear()
+ manager.add(*value)
+
+
+class ReverseManyRelatedObjectsDescriptor(object):
+ # This class provides the functionality that makes the related-object
+ # managers available as attributes on a model class, for fields that have
+ # multiple "remote" values and have a ManyToManyField defined in their
+ # model (rather than having another model pointed *at* them).
+ # In the example "article.publications", the publications attribute is a
+ # ReverseManyRelatedObjectsDescriptor instance.
+ def __init__(self, m2m_field):
+ self.field = m2m_field
+
+ @property
+ def through(self):
+ # through is provided so that you have easy access to the through
+ # model (Book.authors.through) for inlines, etc. This is done as
+ # a property to ensure that the fully resolved value is returned.
+ return self.field.rel.through
+
+ @cached_property
+ def related_manager_cls(self):
+ # Dynamically create a class that subclasses the related model's
+ # default manager.
+ return create_many_related_manager(
+ self.field.rel.to._default_manager.__class__,
+ self.field.rel
+ )
+
+ def __get__(self, instance, instance_type=None):
+ if instance is None:
+ return self
+
+ manager = self.related_manager_cls(
+ model=self.field.rel.to,
+ query_field_name=self.field.related_query_name(),
+ prefetch_cache_name=self.field.name,
+ instance=instance,
+ symmetrical=self.field.rel.symmetrical,
+ source_field_name=self.field.m2m_field_name(),
+ target_field_name=self.field.m2m_reverse_field_name(),
+ reverse=False,
+ through=self.field.rel.through,
+ )
+
+ return manager
+
+ def __set__(self, instance, value):
+ if not self.field.rel.through._meta.auto_created:
+ opts = self.field.rel.through._meta
+ raise AttributeError("Cannot set values on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name))
+
+ manager = self.__get__(instance)
+ # clear() can change expected output of 'value' queryset, we force evaluation
+ # of queryset before clear; ticket #19816
+ value = tuple(value)
+ manager.clear()
+ manager.add(*value)
+
+class ForeignObjectRel(object):
+ def __init__(self, field, to, related_name=None, limit_choices_to=None,
+ parent_link=False, on_delete=None, related_query_name=None):
+ try:
+ to._meta
+ except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
+ assert isinstance(to, six.string_types), "'to' must be either a model, a model name or the string %r" % RECURSIVE_RELATIONSHIP_CONSTANT
+
+ self.field = field
+ self.to = to
+ self.related_name = related_name
+ self.related_query_name = related_query_name
+ self.limit_choices_to = {} if limit_choices_to is None else limit_choices_to
+ self.multiple = True
+ self.parent_link = parent_link
+ self.on_delete = on_delete
+
+ def is_hidden(self):
+ "Should the related object be hidden?"
+ return self.related_name and self.related_name[-1] == '+'
+
+ def get_joining_columns(self):
+ return self.field.get_reverse_joining_columns()
+
+ def get_extra_restriction(self, where_class, alias, related_alias):
+ return self.field.get_extra_restriction(where_class, related_alias, alias)
+
+ def set_field_name(self):
+ """
+ Sets the related field's name, this is not available until later stages
+ of app loading, so set_field_name is called from
+ set_attributes_from_rel()
+ """
+ # By default foreign object doesn't relate to any remote field (for
+ # example custom multicolumn joins currently have no remote field).
+ self.field_name = None
+
+class ManyToOneRel(ForeignObjectRel):
+ def __init__(self, field, to, field_name, related_name=None, limit_choices_to=None,
+ parent_link=False, on_delete=None, related_query_name=None):
+ super(ManyToOneRel, self).__init__(
+ field, to, related_name=related_name, limit_choices_to=limit_choices_to,
+ parent_link=parent_link, on_delete=on_delete, related_query_name=related_query_name)
+ self.field_name = field_name
+
+ def get_related_field(self):
+ """
+ Returns the Field in the 'to' object to which this relationship is
+ tied.
+ """
+ data = self.to._meta.get_field_by_name(self.field_name)
+ if not data[2]:
+ raise FieldDoesNotExist("No related field named '%s'" %
+ self.field_name)
+ return data[0]
+
+ def set_field_name(self):
+ self.field_name = self.field_name or self.to._meta.pk.name
+
+
+class OneToOneRel(ManyToOneRel):
+ def __init__(self, field, to, field_name, related_name=None, limit_choices_to=None,
+ parent_link=False, on_delete=None, related_query_name=None):
+ super(OneToOneRel, self).__init__(field, to, field_name,
+ related_name=related_name, limit_choices_to=limit_choices_to,
+ parent_link=parent_link, on_delete=on_delete, related_query_name=related_query_name,
+ )
+ self.multiple = False
+
+
+class ManyToManyRel(object):
+ def __init__(self, to, related_name=None, limit_choices_to=None,
+ symmetrical=True, through=None, db_constraint=True, related_query_name=None):
+ if through and not db_constraint:
+ raise ValueError("Can't supply a through model and db_constraint=False")
+ self.to = to
+ self.related_name = related_name
+ self.related_query_name = related_query_name
+ if limit_choices_to is None:
+ limit_choices_to = {}
+ self.limit_choices_to = limit_choices_to
+ self.symmetrical = symmetrical
+ self.multiple = True
+ self.through = through
+ self.db_constraint = db_constraint
+
+ def is_hidden(self):
+ "Should the related object be hidden?"
+ return self.related_name and self.related_name[-1] == '+'
+
+ def get_related_field(self):
+ """
+ Returns the field in the to' object to which this relationship is tied
+ (this is always the primary key on the target model). Provided for
+ symmetry with ManyToOneRel.
+ """
+ return self.to._meta.pk
+
+
+class ForeignObject(RelatedField):
+ requires_unique_target = True
+ generate_reverse_relation = True
+
+ def __init__(self, to, from_fields, to_fields, **kwargs):
+ self.from_fields = from_fields
+ self.to_fields = to_fields
+
+ if 'rel' not in kwargs:
+ kwargs['rel'] = ForeignObjectRel(
+ self, to,
+ related_name=kwargs.pop('related_name', None),
+ related_query_name=kwargs.pop('related_query_name', None),
+ limit_choices_to=kwargs.pop('limit_choices_to', None),
+ parent_link=kwargs.pop('parent_link', False),
+ on_delete=kwargs.pop('on_delete', CASCADE),
+ )
+ kwargs['verbose_name'] = kwargs.get('verbose_name', None)
+
+ super(ForeignObject, self).__init__(**kwargs)
+
+ def resolve_related_fields(self):
+ if len(self.from_fields) < 1 or len(self.from_fields) != len(self.to_fields):
+ raise ValueError('Foreign Object from and to fields must be the same non-zero length')
+ related_fields = []
+ for index in range(len(self.from_fields)):
+ from_field_name = self.from_fields[index]
+ to_field_name = self.to_fields[index]
+ from_field = (self if from_field_name == 'self'
+ else self.opts.get_field_by_name(from_field_name)[0])
+ to_field = (self.rel.to._meta.pk if to_field_name is None
+ else self.rel.to._meta.get_field_by_name(to_field_name)[0])
+ related_fields.append((from_field, to_field))
+ return related_fields
+
+ @property
+ def related_fields(self):
+ if not hasattr(self, '_related_fields'):
+ self._related_fields = self.resolve_related_fields()
+ return self._related_fields
+
+ @property
+ def reverse_related_fields(self):
+ return [(rhs_field, lhs_field) for lhs_field, rhs_field in self.related_fields]
+
+ @property
+ def local_related_fields(self):
+ return tuple([lhs_field for lhs_field, rhs_field in self.related_fields])
+
+ @property
+ def foreign_related_fields(self):
+ return tuple([rhs_field for lhs_field, rhs_field in self.related_fields])
+
+ def get_local_related_value(self, instance):
+ return self.get_instance_value_for_fields(instance, self.local_related_fields)
+
+ def get_foreign_related_value(self, instance):
+ return self.get_instance_value_for_fields(instance, self.foreign_related_fields)
+
+ @staticmethod
+ def get_instance_value_for_fields(instance, fields):
+ ret = []
+ for field in fields:
+ # Gotcha: in some cases (like fixture loading) a model can have
+ # different values in parent_ptr_id and parent's id. So, use
+ # instance.pk (that is, parent_ptr_id) when asked for instance.id.
+ if field.primary_key:
+ ret.append(instance.pk)
+ else:
+ ret.append(getattr(instance, field.attname))
+ return tuple(ret)
+
+ def get_attname_column(self):
+ attname, column = super(ForeignObject, self).get_attname_column()
+ return attname, None
+
+ def get_joining_columns(self, reverse_join=False):
+ source = self.reverse_related_fields if reverse_join else self.related_fields
+ return tuple([(lhs_field.column, rhs_field.column) for lhs_field, rhs_field in source])
+
+ def get_reverse_joining_columns(self):
+ return self.get_joining_columns(reverse_join=True)
+
+ def get_extra_descriptor_filter(self, instance):
+ """
+ Returns an extra filter condition for related object fetching when
+ user does 'instance.fieldname', that is the extra filter is used in
+ the descriptor of the field.
+
+ The filter should be either a dict usable in .filter(**kwargs) call or
+ a Q-object. The condition will be ANDed together with the relation's
+ joining columns.
+
+ A parallel method is get_extra_restriction() which is used in
+ JOIN and subquery conditions.
+ """
+ return {}
+
+ def get_extra_restriction(self, where_class, alias, related_alias):
+ """
+ Returns a pair condition used for joining and subquery pushdown. The
+ condition is something that responds to as_sql(qn, connection) method.
+
+ Note that currently referring both the 'alias' and 'related_alias'
+ will not work in some conditions, like subquery pushdown.
+
+ A parallel method is get_extra_descriptor_filter() which is used in
+ instance.fieldname related object fetching.
+ """
+ return None
+
+ def get_path_info(self):
+ """
+ Get path from this field to the related model.
+ """
+ opts = self.rel.to._meta
+ from_opts = self.model._meta
+ return [PathInfo(from_opts, opts, self.foreign_related_fields, self, False, True)]
+
+ def get_reverse_path_info(self):
+ """
+ Get path from the related model to this field's model.
+ """
+ opts = self.model._meta
+ from_opts = self.rel.to._meta
+ pathinfos = [PathInfo(from_opts, opts, (opts.pk,), self.rel, not self.unique, False)]
+ return pathinfos
+
+ def get_lookup_constraint(self, constraint_class, alias, targets, sources, lookup_type,
+ raw_value):
+ from django.db.models.sql.where import SubqueryConstraint, Constraint, AND, OR
+ root_constraint = constraint_class()
+ assert len(targets) == len(sources)
+
+ def get_normalized_value(value):
+
+ from django.db.models import Model
+ if isinstance(value, Model):
+ value_list = []
+ for source in sources:
+ # Account for one-to-one relations when sent a different model
+ while not isinstance(value, source.model) and source.rel:
+ source = source.rel.to._meta.get_field(source.rel.field_name)
+ value_list.append(getattr(value, source.attname))
+ return tuple(value_list)
+ elif not isinstance(value, tuple):
+ return (value,)
+ return value
+
+ is_multicolumn = len(self.related_fields) > 1
+ if (hasattr(raw_value, '_as_sql') or
+ hasattr(raw_value, 'get_compiler')):
+ root_constraint.add(SubqueryConstraint(alias, [target.column for target in targets],
+ [source.name for source in sources], raw_value),
+ AND)
+ elif lookup_type == 'isnull':
+ root_constraint.add(
+ (Constraint(alias, targets[0].column, targets[0]), lookup_type, raw_value), AND)
+ elif (lookup_type == 'exact' or (lookup_type in ['gt', 'lt', 'gte', 'lte']
+ and not is_multicolumn)):
+ value = get_normalized_value(raw_value)
+ for index, source in enumerate(sources):
+ root_constraint.add(
+ (Constraint(alias, targets[index].column, sources[index]), lookup_type,
+ value[index]), AND)
+ elif lookup_type in ['range', 'in'] and not is_multicolumn:
+ values = [get_normalized_value(value) for value in raw_value]
+ value = [val[0] for val in values]
+ root_constraint.add(
+ (Constraint(alias, targets[0].column, sources[0]), lookup_type, value), AND)
+ elif lookup_type == 'in':
+ values = [get_normalized_value(value) for value in raw_value]
+ for value in values:
+ value_constraint = constraint_class()
+ for index, target in enumerate(targets):
+ value_constraint.add(
+ (Constraint(alias, target.column, sources[index]), 'exact', value[index]),
+ AND)
+ root_constraint.add(value_constraint, OR)
+ else:
+ raise TypeError('Related Field got invalid lookup: %s' % lookup_type)
+ return root_constraint
+
+ @property
+ def attnames(self):
+ return tuple([field.attname for field in self.local_related_fields])
+
+ def get_defaults(self):
+ return tuple([field.get_default() for field in self.local_related_fields])
+
+ def contribute_to_class(self, cls, name, virtual_only=False):
+ super(ForeignObject, self).contribute_to_class(cls, name, virtual_only=virtual_only)
+ setattr(cls, self.name, ReverseSingleRelatedObjectDescriptor(self))
+
+ def contribute_to_related_class(self, cls, related):
+ # Internal FK's - i.e., those with a related name ending with '+' -
+ # and swapped models don't get a related descriptor.
+ if not self.rel.is_hidden() and not related.model._meta.swapped:
+ setattr(cls, related.get_accessor_name(), ForeignRelatedObjectsDescriptor(related))
+ if self.rel.limit_choices_to:
+ cls._meta.related_fkey_lookups.append(self.rel.limit_choices_to)
+
+
+class ForeignKey(ForeignObject):
+ empty_strings_allowed = False
+ default_error_messages = {
+ 'invalid': _('%(model)s instance with pk %(pk)r does not exist.')
+ }
+ description = _("Foreign Key (type determined by related field)")
+
+ def __init__(self, to, to_field=None, rel_class=ManyToOneRel,
+ db_constraint=True, **kwargs):
+ try:
+ to_name = to._meta.object_name.lower()
+ except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
+ assert isinstance(to, six.string_types), "%s(%r) is invalid. First parameter to ForeignKey must be either a model, a model name, or the string %r" % (self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT)
+ else:
+ assert not to._meta.abstract, "%s cannot define a relation with abstract class %s" % (self.__class__.__name__, to._meta.object_name)
+ # For backwards compatibility purposes, we need to *try* and set
+ # the to_field during FK construction. It won't be guaranteed to
+ # be correct until contribute_to_class is called. Refs #12190.
+ to_field = to_field or (to._meta.pk and to._meta.pk.name)
+
+ if 'db_index' not in kwargs:
+ kwargs['db_index'] = True
+
+ self.db_constraint = db_constraint
+
+ kwargs['rel'] = rel_class(
+ self, to, to_field,
+ related_name=kwargs.pop('related_name', None),
+ related_query_name=kwargs.pop('related_query_name', None),
+ limit_choices_to=kwargs.pop('limit_choices_to', None),
+ parent_link=kwargs.pop('parent_link', False),
+ on_delete=kwargs.pop('on_delete', CASCADE),
+ )
+ super(ForeignKey, self).__init__(to, ['self'], [to_field], **kwargs)
+
+ @property
+ def related_field(self):
+ return self.foreign_related_fields[0]
+
+ def get_reverse_path_info(self):
+ """
+ Get path from the related model to this field's model.
+ """
+ opts = self.model._meta
+ from_opts = self.rel.to._meta
+ pathinfos = [PathInfo(from_opts, opts, (opts.pk,), self.rel, not self.unique, False)]
+ return pathinfos
+
+ def validate(self, value, model_instance):
+ if self.rel.parent_link:
+ return
+ super(ForeignKey, self).validate(value, model_instance)
+ if value is None:
+ return
+
+ using = router.db_for_read(model_instance.__class__, instance=model_instance)
+ qs = self.rel.to._default_manager.using(using).filter(
+ **{self.rel.field_name: value}
+ )
+ qs = qs.complex_filter(self.rel.limit_choices_to)
+ if not qs.exists():
+ raise exceptions.ValidationError(
+ self.error_messages['invalid'],
+ code='invalid',
+ params={'model': self.rel.to._meta.verbose_name, 'pk': value},
+ )
+
+ def get_attname(self):
+ return '%s_id' % self.name
+
+ def get_attname_column(self):
+ attname = self.get_attname()
+ column = self.db_column or attname
+ return attname, column
+
+ def get_validator_unique_lookup_type(self):
+ return '%s__%s__exact' % (self.name, self.related_field.name)
+
+ def get_default(self):
+ "Here we check if the default value is an object and return the to_field if so."
+ field_default = super(ForeignKey, self).get_default()
+ if isinstance(field_default, self.rel.to):
+ return getattr(field_default, self.related_field.attname)
+ return field_default
+
+ def get_db_prep_save(self, value, connection):
+ if value == '' or value == None:
+ return None
+ else:
+ return self.related_field.get_db_prep_save(value,
+ connection=connection)
+
+ def value_to_string(self, obj):
+ if not obj:
+ # In required many-to-one fields with only one available choice,
+ # select that one available choice. Note: For SelectFields
+ # we have to check that the length of choices is *2*, not 1,
+ # because SelectFields always have an initial "blank" value.
+ if not self.blank and self.choices:
+ choice_list = self.get_choices_default()
+ if len(choice_list) == 2:
+ return smart_text(choice_list[1][0])
+ return super(ForeignKey, self).value_to_string(obj)
+
+ def contribute_to_related_class(self, cls, related):
+ super(ForeignKey, self).contribute_to_related_class(cls, related)
+ if self.rel.field_name is None:
+ self.rel.field_name = cls._meta.pk.name
+
+ def formfield(self, **kwargs):
+ db = kwargs.pop('using', None)
+ if isinstance(self.rel.to, six.string_types):
+ raise ValueError("Cannot create form field for %r yet, because "
+ "its related model %r has not been loaded yet" %
+ (self.name, self.rel.to))
+ defaults = {
+ 'form_class': forms.ModelChoiceField,
+ 'queryset': self.rel.to._default_manager.using(db).complex_filter(self.rel.limit_choices_to),
+ 'to_field_name': self.rel.field_name,
+ }
+ defaults.update(kwargs)
+ return super(ForeignKey, self).formfield(**defaults)
+
+ def db_type(self, connection):
+ # The database column type of a ForeignKey is the column type
+ # of the field to which it points. An exception is if the ForeignKey
+ # points to an AutoField/PositiveIntegerField/PositiveSmallIntegerField,
+ # in which case the column type is simply that of an IntegerField.
+ # If the database needs similar types for key fields however, the only
+ # thing we can do is making AutoField an IntegerField.
+ rel_field = self.related_field
+ if (isinstance(rel_field, AutoField) or
+ (not connection.features.related_fields_match_type and
+ isinstance(rel_field, (PositiveIntegerField,
+ PositiveSmallIntegerField)))):
+ return IntegerField().db_type(connection=connection)
+ return rel_field.db_type(connection=connection)
+
+
+class OneToOneField(ForeignKey):
+ """
+ A OneToOneField is essentially the same as a ForeignKey, with the exception
+ that always carries a "unique" constraint with it and the reverse relation
+ always returns the object pointed to (since there will only ever be one),
+ rather than returning a list.
+ """
+ description = _("One-to-one relationship")
+
+ def __init__(self, to, to_field=None, **kwargs):
+ kwargs['unique'] = True
+ super(OneToOneField, self).__init__(to, to_field, OneToOneRel, **kwargs)
+
+ def contribute_to_related_class(self, cls, related):
+ setattr(cls, related.get_accessor_name(),
+ SingleRelatedObjectDescriptor(related))
+
+ def formfield(self, **kwargs):
+ if self.rel.parent_link:
+ return None
+ return super(OneToOneField, self).formfield(**kwargs)
+
+ def save_form_data(self, instance, data):
+ if isinstance(data, self.rel.to):
+ setattr(instance, self.name, data)
+ else:
+ setattr(instance, self.attname, data)
+
+
+def create_many_to_many_intermediary_model(field, klass):
+ from django.db import models
+ managed = True
+ if isinstance(field.rel.to, six.string_types) and field.rel.to != RECURSIVE_RELATIONSHIP_CONSTANT:
+ to_model = field.rel.to
+ to = to_model.split('.')[-1]
+
+ def set_managed(field, model, cls):
+ field.rel.through._meta.managed = model._meta.managed or cls._meta.managed
+ add_lazy_relation(klass, field, to_model, set_managed)
+ elif isinstance(field.rel.to, six.string_types):
+ to = klass._meta.object_name
+ to_model = klass
+ managed = klass._meta.managed
+ else:
+ to = field.rel.to._meta.object_name
+ to_model = field.rel.to
+ managed = klass._meta.managed or to_model._meta.managed
+ name = '%s_%s' % (klass._meta.object_name, field.name)
+ if field.rel.to == RECURSIVE_RELATIONSHIP_CONSTANT or to == klass._meta.object_name:
+ from_ = 'from_%s' % to.lower()
+ to = 'to_%s' % to.lower()
+ else:
+ from_ = klass._meta.model_name
+ to = to.lower()
+ meta = type('Meta', (object,), {
+ 'db_table': field._get_m2m_db_table(klass._meta),
+ 'managed': managed,
+ 'auto_created': klass,
+ 'app_label': klass._meta.app_label,
+ 'db_tablespace': klass._meta.db_tablespace,
+ 'unique_together': (from_, to),
+ 'verbose_name': '%(from)s-%(to)s relationship' % {'from': from_, 'to': to},
+ 'verbose_name_plural': '%(from)s-%(to)s relationships' % {'from': from_, 'to': to},
+ })
+ # Construct and return the new class.
+ return type(str(name), (models.Model,), {
+ 'Meta': meta,
+ '__module__': klass.__module__,
+ from_: models.ForeignKey(klass, related_name='%s+' % name, db_tablespace=field.db_tablespace, db_constraint=field.rel.db_constraint),
+ to: models.ForeignKey(to_model, related_name='%s+' % name, db_tablespace=field.db_tablespace, db_constraint=field.rel.db_constraint)
+ })
+
+
+class ManyToManyField(RelatedField):
+ description = _("Many-to-many relationship")
+
+ def __init__(self, to, db_constraint=True, **kwargs):
+ try:
+ assert not to._meta.abstract, "%s cannot define a relation with abstract class %s" % (self.__class__.__name__, to._meta.object_name)
+ except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
+ assert isinstance(to, six.string_types), "%s(%r) is invalid. First parameter to ManyToManyField must be either a model, a model name, or the string %r" % (self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT)
+ # Python 2.6 and earlier require dictionary keys to be of str type,
+ # not unicode and class names must be ASCII (in Python 2.x), so we
+ # forcibly coerce it here (breaks early if there's a problem).
+ to = str(to)
+
+ kwargs['verbose_name'] = kwargs.get('verbose_name', None)
+ kwargs['rel'] = ManyToManyRel(to,
+ related_name=kwargs.pop('related_name', None),
+ related_query_name=kwargs.pop('related_query_name', None),
+ limit_choices_to=kwargs.pop('limit_choices_to', None),
+ symmetrical=kwargs.pop('symmetrical', to == RECURSIVE_RELATIONSHIP_CONSTANT),
+ through=kwargs.pop('through', None),
+ db_constraint=db_constraint,
+ )
+
+ self.db_table = kwargs.pop('db_table', None)
+ if kwargs['rel'].through is not None:
+ assert self.db_table is None, "Cannot specify a db_table if an intermediary model is used."
+
+ super(ManyToManyField, self).__init__(**kwargs)
+
+ def _get_path_info(self, direct=False):
+ """
+ Called by both direct an indirect m2m traversal.
+ """
+ pathinfos = []
+ int_model = self.rel.through
+ linkfield1 = int_model._meta.get_field_by_name(self.m2m_field_name())[0]
+ linkfield2 = int_model._meta.get_field_by_name(self.m2m_reverse_field_name())[0]
+ if direct:
+ join1infos = linkfield1.get_reverse_path_info()
+ join2infos = linkfield2.get_path_info()
+ else:
+ join1infos = linkfield2.get_reverse_path_info()
+ join2infos = linkfield1.get_path_info()
+ pathinfos.extend(join1infos)
+ pathinfos.extend(join2infos)
+ return pathinfos
+
+ def get_path_info(self):
+ return self._get_path_info(direct=True)
+
+ def get_reverse_path_info(self):
+ return self._get_path_info(direct=False)
+
+ def get_choices_default(self):
+ return Field.get_choices(self, include_blank=False)
+
+ def _get_m2m_db_table(self, opts):
+ "Function that can be curried to provide the m2m table name for this relation"
+ if self.rel.through is not None:
+ return self.rel.through._meta.db_table
+ elif self.db_table:
+ return self.db_table
+ else:
+ return util.truncate_name('%s_%s' % (opts.db_table, self.name),
+ connection.ops.max_name_length())
+
+ def _get_m2m_attr(self, related, attr):
+ "Function that can be curried to provide the source accessor or DB column name for the m2m table"
+ cache_attr = '_m2m_%s_cache' % attr
+ if hasattr(self, cache_attr):
+ return getattr(self, cache_attr)
+ for f in self.rel.through._meta.fields:
+ if hasattr(f, 'rel') and f.rel and f.rel.to == related.model:
+ setattr(self, cache_attr, getattr(f, attr))
+ return getattr(self, cache_attr)
+
+ def _get_m2m_reverse_attr(self, related, attr):
+ "Function that can be curried to provide the related accessor or DB column name for the m2m table"
+ cache_attr = '_m2m_reverse_%s_cache' % attr
+ if hasattr(self, cache_attr):
+ return getattr(self, cache_attr)
+ found = False
+ for f in self.rel.through._meta.fields:
+ if hasattr(f, 'rel') and f.rel and f.rel.to == related.parent_model:
+ if related.model == related.parent_model:
+ # If this is an m2m-intermediate to self,
+ # the first foreign key you find will be
+ # the source column. Keep searching for
+ # the second foreign key.
+ if found:
+ setattr(self, cache_attr, getattr(f, attr))
+ break
+ else:
+ found = True
+ else:
+ setattr(self, cache_attr, getattr(f, attr))
+ break
+ return getattr(self, cache_attr)
+
+ def value_to_string(self, obj):
+ data = ''
+ if obj:
+ qs = getattr(obj, self.name).all()
+ data = [instance._get_pk_val() for instance in qs]
+ else:
+ # In required many-to-many fields with only one available choice,
+ # select that one available choice.
+ if not self.blank:
+ choices_list = self.get_choices_default()
+ if len(choices_list) == 1:
+ data = [choices_list[0][0]]
+ return smart_text(data)
+
+ def contribute_to_class(self, cls, name):
+ # To support multiple relations to self, it's useful to have a non-None
+ # related name on symmetrical relations for internal reasons. The
+ # concept doesn't make a lot of sense externally ("you want me to
+ # specify *what* on my non-reversible relation?!"), so we set it up
+ # automatically. The funky name reduces the chance of an accidental
+ # clash.
+ if self.rel.symmetrical and (self.rel.to == "self" or self.rel.to == cls._meta.object_name):
+ self.rel.related_name = "%s_rel_+" % name
+
+ super(ManyToManyField, self).contribute_to_class(cls, name)
+
+ # The intermediate m2m model is not auto created if:
+ # 1) There is a manually specified intermediate, or
+ # 2) The class owning the m2m field is abstract.
+ # 3) The class owning the m2m field has been swapped out.
+ if not self.rel.through and not cls._meta.abstract and not cls._meta.swapped:
+ self.rel.through = create_many_to_many_intermediary_model(self, cls)
+
+ # Add the descriptor for the m2m relation
+ setattr(cls, self.name, ReverseManyRelatedObjectsDescriptor(self))
+
+ # Set up the accessor for the m2m table name for the relation
+ self.m2m_db_table = curry(self._get_m2m_db_table, cls._meta)
+
+ # Populate some necessary rel arguments so that cross-app relations
+ # work correctly.
+ if isinstance(self.rel.through, six.string_types):
+ def resolve_through_model(field, model, cls):
+ field.rel.through = model
+ add_lazy_relation(cls, self, self.rel.through, resolve_through_model)
+
+ def contribute_to_related_class(self, cls, related):
+ # Internal M2Ms (i.e., those with a related name ending with '+')
+ # and swapped models don't get a related descriptor.
+ if not self.rel.is_hidden() and not related.model._meta.swapped:
+ setattr(cls, related.get_accessor_name(), ManyRelatedObjectsDescriptor(related))
+
+ # Set up the accessors for the column names on the m2m table
+ self.m2m_column_name = curry(self._get_m2m_attr, related, 'column')
+ self.m2m_reverse_name = curry(self._get_m2m_reverse_attr, related, 'column')
+
+ self.m2m_field_name = curry(self._get_m2m_attr, related, 'name')
+ self.m2m_reverse_field_name = curry(self._get_m2m_reverse_attr, related, 'name')
+
+ get_m2m_rel = curry(self._get_m2m_attr, related, 'rel')
+ self.m2m_target_field_name = lambda: get_m2m_rel().field_name
+ get_m2m_reverse_rel = curry(self._get_m2m_reverse_attr, related, 'rel')
+ self.m2m_reverse_target_field_name = lambda: get_m2m_reverse_rel().field_name
+
+ def set_attributes_from_rel(self):
+ pass
+
+ def value_from_object(self, obj):
+ "Returns the value of this field in the given model instance."
+ return getattr(obj, self.attname).all()
+
+ def save_form_data(self, instance, data):
+ setattr(instance, self.attname, data)
+
+ def formfield(self, **kwargs):
+ db = kwargs.pop('using', None)
+ defaults = {
+ 'form_class': forms.ModelMultipleChoiceField,
+ 'queryset': self.rel.to._default_manager.using(db).complex_filter(self.rel.limit_choices_to)
+ }
+ defaults.update(kwargs)
+ # If initial is passed in, it's a list of related objects, but the
+ # MultipleChoiceField takes a list of IDs.
+ if defaults.get('initial') is not None:
+ initial = defaults['initial']
+ if callable(initial):
+ initial = initial()
+ defaults['initial'] = [i._get_pk_val() for i in initial]
+ return super(ManyToManyField, self).formfield(**defaults)
diff --git a/lib/python2.7/site-packages/django/db/models/fields/subclassing.py b/lib/python2.7/site-packages/django/db/models/fields/subclassing.py
new file mode 100644
index 0000000..e6153ae
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/models/fields/subclassing.py
@@ -0,0 +1,53 @@
+"""
+Convenience routines for creating non-trivial Field subclasses, as well as
+backwards compatibility utilities.
+
+Add SubfieldBase as the metaclass for your Field subclass, implement
+to_python() and the other necessary methods and everything will work
+seamlessly.
+"""
+
+class SubfieldBase(type):
+ """
+ A metaclass for custom Field subclasses. This ensures the model's attribute
+ has the descriptor protocol attached to it.
+ """
+ def __new__(cls, name, bases, attrs):
+ new_class = super(SubfieldBase, cls).__new__(cls, name, bases, attrs)
+ new_class.contribute_to_class = make_contrib(
+ new_class, attrs.get('contribute_to_class')
+ )
+ return new_class
+
+class Creator(object):
+ """
+ A placeholder class that provides a way to set the attribute on the model.
+ """
+ def __init__(self, field):
+ self.field = field
+
+ def __get__(self, obj, type=None):
+ if obj is None:
+ raise AttributeError('Can only be accessed via an instance.')
+ return obj.__dict__[self.field.name]
+
+ def __set__(self, obj, value):
+ obj.__dict__[self.field.name] = self.field.to_python(value)
+
+def make_contrib(superclass, func=None):
+ """
+ Returns a suitable contribute_to_class() method for the Field subclass.
+
+ If 'func' is passed in, it is the existing contribute_to_class() method on
+ the subclass and it is called before anything else. It is assumed in this
+ case that the existing contribute_to_class() calls all the necessary
+ superclass methods.
+ """
+ def contribute_to_class(self, cls, name):
+ if func:
+ func(self, cls, name)
+ else:
+ super(superclass, self).contribute_to_class(cls, name)
+ setattr(cls, self.name, Creator(self))
+
+ return contribute_to_class
diff --git a/lib/python2.7/site-packages/django/db/models/loading.py b/lib/python2.7/site-packages/django/db/models/loading.py
new file mode 100644
index 0000000..bb87728
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/models/loading.py
@@ -0,0 +1,327 @@
+"Utilities for loading models and the modules that contain them."
+
+from django.conf import settings
+from django.core.exceptions import ImproperlyConfigured
+from django.utils.datastructures import SortedDict
+from django.utils.importlib import import_module
+from django.utils.module_loading import module_has_submodule
+from django.utils._os import upath
+from django.utils import six
+
+import imp
+import sys
+import os
+
+__all__ = ('get_apps', 'get_app', 'get_models', 'get_model', 'register_models',
+ 'load_app', 'app_cache_ready')
+
+class UnavailableApp(Exception):
+ pass
+
+class AppCache(object):
+ """
+ A cache that stores installed applications and their models. Used to
+ provide reverse-relations and for app introspection (e.g. admin).
+ """
+ # Use the Borg pattern to share state between all instances. Details at
+ # http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66531.
+ __shared_state = dict(
+ # Keys of app_store are the model modules for each application.
+ app_store=SortedDict(),
+
+ # Mapping of installed app_labels to model modules for that app.
+ app_labels={},
+
+ # Mapping of app_labels to a dictionary of model names to model code.
+ # May contain apps that are not installed.
+ app_models=SortedDict(),
+
+ # Mapping of app_labels to errors raised when trying to import the app.
+ app_errors={},
+
+ # -- Everything below here is only used when populating the cache --
+ loaded=False,
+ handled=set(),
+ postponed=[],
+ nesting_level=0,
+ _get_models_cache={},
+ available_apps=None,
+ )
+
+ def __init__(self):
+ self.__dict__ = self.__shared_state
+
+ def _populate(self):
+ """
+ Fill in all the cache information. This method is threadsafe, in the
+ sense that every caller will see the same state upon return, and if the
+ cache is already initialised, it does no work.
+ """
+ if self.loaded:
+ return
+ # Note that we want to use the import lock here - the app loading is
+ # in many cases initiated implicitly by importing, and thus it is
+ # possible to end up in deadlock when one thread initiates loading
+ # without holding the importer lock and another thread then tries to
+ # import something which also launches the app loading. For details of
+ # this situation see #18251.
+ imp.acquire_lock()
+ try:
+ if self.loaded:
+ return
+ for app_name in settings.INSTALLED_APPS:
+ if app_name in self.handled:
+ continue
+ self.load_app(app_name, True)
+ if not self.nesting_level:
+ for app_name in self.postponed:
+ self.load_app(app_name)
+ self.loaded = True
+ finally:
+ imp.release_lock()
+
+ def _label_for(self, app_mod):
+ """
+ Return app_label for given models module.
+
+ """
+ return app_mod.__name__.split('.')[-2]
+
+ def load_app(self, app_name, can_postpone=False):
+ """
+ Loads the app with the provided fully qualified name, and returns the
+ model module.
+ """
+ self.handled.add(app_name)
+ self.nesting_level += 1
+ app_module = import_module(app_name)
+ try:
+ models = import_module('%s.models' % app_name)
+ except ImportError:
+ self.nesting_level -= 1
+ # If the app doesn't have a models module, we can just ignore the
+ # ImportError and return no models for it.
+ if not module_has_submodule(app_module, 'models'):
+ return None
+ # But if the app does have a models module, we need to figure out
+ # whether to suppress or propagate the error. If can_postpone is
+ # True then it may be that the package is still being imported by
+ # Python and the models module isn't available yet. So we add the
+ # app to the postponed list and we'll try it again after all the
+ # recursion has finished (in populate). If can_postpone is False
+ # then it's time to raise the ImportError.
+ else:
+ if can_postpone:
+ self.postponed.append(app_name)
+ return None
+ else:
+ raise
+
+ self.nesting_level -= 1
+ if models not in self.app_store:
+ self.app_store[models] = len(self.app_store)
+ self.app_labels[self._label_for(models)] = models
+ return models
+
+ def app_cache_ready(self):
+ """
+ Returns true if the model cache is fully populated.
+
+ Useful for code that wants to cache the results of get_models() for
+ themselves once it is safe to do so.
+ """
+ return self.loaded
+
+ def get_apps(self):
+ """
+ Returns a list of all installed modules that contain models.
+ """
+ self._populate()
+
+ apps = self.app_store.items()
+ if self.available_apps is not None:
+ apps = [elt for elt in apps
+ if self._label_for(elt[0]) in self.available_apps]
+
+ # Ensure the returned list is always in the same order (with new apps
+ # added at the end). This avoids unstable ordering on the admin app
+ # list page, for example.
+ apps = sorted(apps, key=lambda elt: elt[1])
+
+ return [elt[0] for elt in apps]
+
+ def get_app_paths(self):
+ """
+ Returns a list of paths to all installed apps.
+
+ Useful for discovering files at conventional locations inside apps
+ (static files, templates, etc.)
+ """
+ self._populate()
+
+ app_paths = []
+ for app in self.get_apps():
+ if hasattr(app, '__path__'): # models/__init__.py package
+ app_paths.extend([upath(path) for path in app.__path__])
+ else: # models.py module
+ app_paths.append(upath(app.__file__))
+ return app_paths
+
+ def get_app(self, app_label, emptyOK=False):
+ """
+ Returns the module containing the models for the given app_label.
+
+ Returns None if the app has no models in it and emptyOK is True.
+
+ Raises UnavailableApp when set_available_apps() in in effect and
+ doesn't include app_label.
+ """
+ self._populate()
+ imp.acquire_lock()
+ try:
+ for app_name in settings.INSTALLED_APPS:
+ if app_label == app_name.split('.')[-1]:
+ mod = self.load_app(app_name, False)
+ if mod is None and not emptyOK:
+ raise ImproperlyConfigured("App with label %s is missing a models.py module." % app_label)
+ if self.available_apps is not None and app_label not in self.available_apps:
+ raise UnavailableApp("App with label %s isn't available." % app_label)
+ return mod
+ raise ImproperlyConfigured("App with label %s could not be found" % app_label)
+ finally:
+ imp.release_lock()
+
+ def get_app_errors(self):
+ "Returns the map of known problems with the INSTALLED_APPS."
+ self._populate()
+ return self.app_errors
+
+ def get_models(self, app_mod=None,
+ include_auto_created=False, include_deferred=False,
+ only_installed=True, include_swapped=False):
+ """
+ Given a module containing models, returns a list of the models.
+ Otherwise returns a list of all installed models.
+
+ By default, auto-created models (i.e., m2m models without an
+ explicit intermediate table) are not included. However, if you
+ specify include_auto_created=True, they will be.
+
+ By default, models created to satisfy deferred attribute
+ queries are *not* included in the list of models. However, if
+ you specify include_deferred, they will be.
+
+ By default, models that aren't part of installed apps will *not*
+ be included in the list of models. However, if you specify
+ only_installed=False, they will be.
+
+ By default, models that have been swapped out will *not* be
+ included in the list of models. However, if you specify
+ include_swapped, they will be.
+ """
+ cache_key = (app_mod, include_auto_created, include_deferred, only_installed, include_swapped)
+ model_list = None
+ try:
+ model_list = self._get_models_cache[cache_key]
+ if self.available_apps is not None and only_installed:
+ model_list = [m for m in model_list
+ if m._meta.app_label in self.available_apps]
+ return model_list
+ except KeyError:
+ pass
+ self._populate()
+ if app_mod:
+ if app_mod in self.app_store:
+ app_list = [self.app_models.get(self._label_for(app_mod),
+ SortedDict())]
+ else:
+ app_list = []
+ else:
+ if only_installed:
+ app_list = [self.app_models.get(app_label, SortedDict())
+ for app_label in six.iterkeys(self.app_labels)]
+ else:
+ app_list = six.itervalues(self.app_models)
+ model_list = []
+ for app in app_list:
+ model_list.extend(
+ model for model in app.values()
+ if ((not model._deferred or include_deferred) and
+ (not model._meta.auto_created or include_auto_created) and
+ (not model._meta.swapped or include_swapped))
+ )
+ self._get_models_cache[cache_key] = model_list
+ if self.available_apps is not None and only_installed:
+ model_list = [m for m in model_list
+ if m._meta.app_label in self.available_apps]
+ return model_list
+
+ def get_model(self, app_label, model_name,
+ seed_cache=True, only_installed=True):
+ """
+ Returns the model matching the given app_label and case-insensitive
+ model_name.
+
+ Returns None if no model is found.
+
+ Raises UnavailableApp when set_available_apps() in in effect and
+ doesn't include app_label.
+ """
+ if seed_cache:
+ self._populate()
+ if only_installed and app_label not in self.app_labels:
+ return None
+ if (self.available_apps is not None and only_installed
+ and app_label not in self.available_apps):
+ raise UnavailableApp("App with label %s isn't available." % app_label)
+ try:
+ return self.app_models[app_label][model_name.lower()]
+ except KeyError:
+ return None
+
+ def register_models(self, app_label, *models):
+ """
+ Register a set of models as belonging to an app.
+ """
+ for model in models:
+ # Store as 'name: model' pair in a dictionary
+ # in the app_models dictionary
+ model_name = model._meta.model_name
+ model_dict = self.app_models.setdefault(app_label, SortedDict())
+ if model_name in model_dict:
+ # The same model may be imported via different paths (e.g.
+ # appname.models and project.appname.models). We use the source
+ # filename as a means to detect identity.
+ fname1 = os.path.abspath(upath(sys.modules[model.__module__].__file__))
+ fname2 = os.path.abspath(upath(sys.modules[model_dict[model_name].__module__].__file__))
+ # Since the filename extension could be .py the first time and
+ # .pyc or .pyo the second time, ignore the extension when
+ # comparing.
+ if os.path.splitext(fname1)[0] == os.path.splitext(fname2)[0]:
+ continue
+ model_dict[model_name] = model
+ self._get_models_cache.clear()
+
+ def set_available_apps(self, available):
+ if not set(available).issubset(set(settings.INSTALLED_APPS)):
+ extra = set(available) - set(settings.INSTALLED_APPS)
+ raise ValueError("Available apps isn't a subset of installed "
+ "apps, extra apps: " + ", ".join(extra))
+ self.available_apps = set(app.rsplit('.', 1)[-1] for app in available)
+
+ def unset_available_apps(self):
+ self.available_apps = None
+
+cache = AppCache()
+
+# These methods were always module level, so are kept that way for backwards
+# compatibility.
+get_apps = cache.get_apps
+get_app_paths = cache.get_app_paths
+get_app = cache.get_app
+get_app_errors = cache.get_app_errors
+get_models = cache.get_models
+get_model = cache.get_model
+register_models = cache.register_models
+load_app = cache.load_app
+app_cache_ready = cache.app_cache_ready
diff --git a/lib/python2.7/site-packages/django/db/models/manager.py b/lib/python2.7/site-packages/django/db/models/manager.py
new file mode 100644
index 0000000..a1aa79f
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/models/manager.py
@@ -0,0 +1,283 @@
+import copy
+from django.db import router
+from django.db.models.query import QuerySet, insert_query, RawQuerySet
+from django.db.models import signals
+from django.db.models.fields import FieldDoesNotExist
+from django.utils import six
+from django.utils.deprecation import RenameMethodsBase
+
+def ensure_default_manager(sender, **kwargs):
+ """
+ Ensures that a Model subclass contains a default manager and sets the
+ _default_manager attribute on the class. Also sets up the _base_manager
+ points to a plain Manager instance (which could be the same as
+ _default_manager if it's not a subclass of Manager).
+ """
+ cls = sender
+ if cls._meta.abstract:
+ setattr(cls, 'objects', AbstractManagerDescriptor(cls))
+ return
+ elif cls._meta.swapped:
+ setattr(cls, 'objects', SwappedManagerDescriptor(cls))
+ return
+ if not getattr(cls, '_default_manager', None):
+ # Create the default manager, if needed.
+ try:
+ cls._meta.get_field('objects')
+ raise ValueError("Model %s must specify a custom Manager, because it has a field named 'objects'" % cls.__name__)
+ except FieldDoesNotExist:
+ pass
+ cls.add_to_class('objects', Manager())
+ cls._base_manager = cls.objects
+ elif not getattr(cls, '_base_manager', None):
+ default_mgr = cls._default_manager.__class__
+ if (default_mgr is Manager or
+ getattr(default_mgr, "use_for_related_fields", False)):
+ cls._base_manager = cls._default_manager
+ else:
+ # Default manager isn't a plain Manager class, or a suitable
+ # replacement, so we walk up the base class hierarchy until we hit
+ # something appropriate.
+ for base_class in default_mgr.mro()[1:]:
+ if (base_class is Manager or
+ getattr(base_class, "use_for_related_fields", False)):
+ cls.add_to_class('_base_manager', base_class())
+ return
+ raise AssertionError("Should never get here. Please report a bug, including your model and model manager setup.")
+
+signals.class_prepared.connect(ensure_default_manager)
+
+
+class RenameManagerMethods(RenameMethodsBase):
+ renamed_methods = (
+ ('get_query_set', 'get_queryset', PendingDeprecationWarning),
+ ('get_prefetch_query_set', 'get_prefetch_queryset', PendingDeprecationWarning),
+ )
+
+
+class Manager(six.with_metaclass(RenameManagerMethods)):
+ # Tracks each time a Manager instance is created. Used to retain order.
+ creation_counter = 0
+
+ def __init__(self):
+ super(Manager, self).__init__()
+ self._set_creation_counter()
+ self.model = None
+ self._inherited = False
+ self._db = None
+
+ def contribute_to_class(self, model, name):
+ # TODO: Use weakref because of possible memory leak / circular reference.
+ self.model = model
+ # Only contribute the manager if the model is concrete
+ if model._meta.abstract:
+ setattr(model, name, AbstractManagerDescriptor(model))
+ elif model._meta.swapped:
+ setattr(model, name, SwappedManagerDescriptor(model))
+ else:
+ # if not model._meta.abstract and not model._meta.swapped:
+ setattr(model, name, ManagerDescriptor(self))
+ if not getattr(model, '_default_manager', None) or self.creation_counter < model._default_manager.creation_counter:
+ model._default_manager = self
+ if model._meta.abstract or (self._inherited and not self.model._meta.proxy):
+ model._meta.abstract_managers.append((self.creation_counter, name,
+ self))
+ else:
+ model._meta.concrete_managers.append((self.creation_counter, name,
+ self))
+
+ def _set_creation_counter(self):
+ """
+ Sets the creation counter value for this instance and increments the
+ class-level copy.
+ """
+ self.creation_counter = Manager.creation_counter
+ Manager.creation_counter += 1
+
+ def _copy_to_model(self, model):
+ """
+ Makes a copy of the manager and assigns it to 'model', which should be
+ a child of the existing model (used when inheriting a manager from an
+ abstract base class).
+ """
+ assert issubclass(model, self.model)
+ mgr = copy.copy(self)
+ mgr._set_creation_counter()
+ mgr.model = model
+ mgr._inherited = True
+ return mgr
+
+ def db_manager(self, using):
+ obj = copy.copy(self)
+ obj._db = using
+ return obj
+
+ @property
+ def db(self):
+ return self._db or router.db_for_read(self.model)
+
+ #######################
+ # PROXIES TO QUERYSET #
+ #######################
+
+ def get_queryset(self):
+ """Returns a new QuerySet object. Subclasses can override this method
+ to easily customize the behavior of the Manager.
+ """
+ return QuerySet(self.model, using=self._db)
+
+ def none(self):
+ return self.get_queryset().none()
+
+ def all(self):
+ return self.get_queryset()
+
+ def count(self):
+ return self.get_queryset().count()
+
+ def dates(self, *args, **kwargs):
+ return self.get_queryset().dates(*args, **kwargs)
+
+ def datetimes(self, *args, **kwargs):
+ return self.get_queryset().datetimes(*args, **kwargs)
+
+ def distinct(self, *args, **kwargs):
+ return self.get_queryset().distinct(*args, **kwargs)
+
+ def extra(self, *args, **kwargs):
+ return self.get_queryset().extra(*args, **kwargs)
+
+ def get(self, *args, **kwargs):
+ return self.get_queryset().get(*args, **kwargs)
+
+ def get_or_create(self, **kwargs):
+ return self.get_queryset().get_or_create(**kwargs)
+
+ def create(self, **kwargs):
+ return self.get_queryset().create(**kwargs)
+
+ def bulk_create(self, *args, **kwargs):
+ return self.get_queryset().bulk_create(*args, **kwargs)
+
+ def filter(self, *args, **kwargs):
+ return self.get_queryset().filter(*args, **kwargs)
+
+ def aggregate(self, *args, **kwargs):
+ return self.get_queryset().aggregate(*args, **kwargs)
+
+ def annotate(self, *args, **kwargs):
+ return self.get_queryset().annotate(*args, **kwargs)
+
+ def complex_filter(self, *args, **kwargs):
+ return self.get_queryset().complex_filter(*args, **kwargs)
+
+ def exclude(self, *args, **kwargs):
+ return self.get_queryset().exclude(*args, **kwargs)
+
+ def in_bulk(self, *args, **kwargs):
+ return self.get_queryset().in_bulk(*args, **kwargs)
+
+ def iterator(self, *args, **kwargs):
+ return self.get_queryset().iterator(*args, **kwargs)
+
+ def earliest(self, *args, **kwargs):
+ return self.get_queryset().earliest(*args, **kwargs)
+
+ def latest(self, *args, **kwargs):
+ return self.get_queryset().latest(*args, **kwargs)
+
+ def first(self):
+ return self.get_queryset().first()
+
+ def last(self):
+ return self.get_queryset().last()
+
+ def order_by(self, *args, **kwargs):
+ return self.get_queryset().order_by(*args, **kwargs)
+
+ def select_for_update(self, *args, **kwargs):
+ return self.get_queryset().select_for_update(*args, **kwargs)
+
+ def select_related(self, *args, **kwargs):
+ return self.get_queryset().select_related(*args, **kwargs)
+
+ def prefetch_related(self, *args, **kwargs):
+ return self.get_queryset().prefetch_related(*args, **kwargs)
+
+ def values(self, *args, **kwargs):
+ return self.get_queryset().values(*args, **kwargs)
+
+ def values_list(self, *args, **kwargs):
+ return self.get_queryset().values_list(*args, **kwargs)
+
+ def update(self, *args, **kwargs):
+ return self.get_queryset().update(*args, **kwargs)
+
+ def reverse(self, *args, **kwargs):
+ return self.get_queryset().reverse(*args, **kwargs)
+
+ def defer(self, *args, **kwargs):
+ return self.get_queryset().defer(*args, **kwargs)
+
+ def only(self, *args, **kwargs):
+ return self.get_queryset().only(*args, **kwargs)
+
+ def using(self, *args, **kwargs):
+ return self.get_queryset().using(*args, **kwargs)
+
+ def exists(self, *args, **kwargs):
+ return self.get_queryset().exists(*args, **kwargs)
+
+ def _insert(self, objs, fields, **kwargs):
+ return insert_query(self.model, objs, fields, **kwargs)
+
+ def _update(self, values, **kwargs):
+ return self.get_queryset()._update(values, **kwargs)
+
+ def raw(self, raw_query, params=None, *args, **kwargs):
+ return RawQuerySet(raw_query=raw_query, model=self.model, params=params, using=self._db, *args, **kwargs)
+
+
+class ManagerDescriptor(object):
+ # This class ensures managers aren't accessible via model instances.
+ # For example, Poll.objects works, but poll_obj.objects raises AttributeError.
+ def __init__(self, manager):
+ self.manager = manager
+
+ def __get__(self, instance, type=None):
+ if instance != None:
+ raise AttributeError("Manager isn't accessible via %s instances" % type.__name__)
+ return self.manager
+
+
+class AbstractManagerDescriptor(object):
+ # This class provides a better error message when you try to access a
+ # manager on an abstract model.
+ def __init__(self, model):
+ self.model = model
+
+ def __get__(self, instance, type=None):
+ raise AttributeError("Manager isn't available; %s is abstract" % (
+ self.model._meta.object_name,
+ ))
+
+
+class SwappedManagerDescriptor(object):
+ # This class provides a better error message when you try to access a
+ # manager on a swapped model.
+ def __init__(self, model):
+ self.model = model
+
+ def __get__(self, instance, type=None):
+ raise AttributeError("Manager isn't available; %s has been swapped for '%s'" % (
+ self.model._meta.object_name, self.model._meta.swapped
+ ))
+
+
+class EmptyManager(Manager):
+ def __init__(self, model):
+ super(EmptyManager, self).__init__()
+ self.model = model
+
+ def get_queryset(self):
+ return super(EmptyManager, self).get_queryset().none()
diff --git a/lib/python2.7/site-packages/django/db/models/options.py b/lib/python2.7/site-packages/django/db/models/options.py
new file mode 100644
index 0000000..6ccc67d
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/models/options.py
@@ -0,0 +1,589 @@
+from __future__ import unicode_literals
+
+import re
+from bisect import bisect
+import warnings
+
+from django.conf import settings
+from django.db.models.fields.related import ManyToManyRel
+from django.db.models.fields import AutoField, FieldDoesNotExist
+from django.db.models.fields.proxy import OrderWrt
+from django.db.models.loading import get_models, app_cache_ready
+from django.utils import six
+from django.utils.functional import cached_property
+from django.utils.datastructures import SortedDict
+from django.utils.encoding import force_text, smart_text, python_2_unicode_compatible
+from django.utils.translation import activate, deactivate_all, get_language, string_concat
+
+# Calculate the verbose_name by converting from InitialCaps to "lowercase with spaces".
+get_verbose_name = lambda class_name: re.sub('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))', ' \\1', class_name).lower().strip()
+
+DEFAULT_NAMES = ('verbose_name', 'verbose_name_plural', 'db_table', 'ordering',
+ 'unique_together', 'permissions', 'get_latest_by',
+ 'order_with_respect_to', 'app_label', 'db_tablespace',
+ 'abstract', 'managed', 'proxy', 'swappable', 'auto_created',
+ 'index_together', 'select_on_save')
+
+
+@python_2_unicode_compatible
+class Options(object):
+ def __init__(self, meta, app_label=None):
+ self.local_fields, self.local_many_to_many = [], []
+ self.virtual_fields = []
+ self.model_name, self.verbose_name = None, None
+ self.verbose_name_plural = None
+ self.db_table = ''
+ self.ordering = []
+ self.unique_together = []
+ self.index_together = []
+ self.select_on_save = False
+ self.permissions = []
+ self.object_name, self.app_label = None, app_label
+ self.get_latest_by = None
+ self.order_with_respect_to = None
+ self.db_tablespace = settings.DEFAULT_TABLESPACE
+ self.meta = meta
+ self.pk = None
+ self.has_auto_field, self.auto_field = False, None
+ self.abstract = False
+ self.managed = True
+ self.proxy = False
+ # For any class that is a proxy (including automatically created
+ # classes for deferred object loading), proxy_for_model tells us
+ # which class this model is proxying. Note that proxy_for_model
+ # can create a chain of proxy models. For non-proxy models, the
+ # variable is always None.
+ self.proxy_for_model = None
+ # For any non-abstract class, the concrete class is the model
+ # in the end of the proxy_for_model chain. In particular, for
+ # concrete models, the concrete_model is always the class itself.
+ self.concrete_model = None
+ self.swappable = None
+ self.parents = SortedDict()
+ self.auto_created = False
+
+ # To handle various inheritance situations, we need to track where
+ # managers came from (concrete or abstract base classes).
+ self.abstract_managers = []
+ self.concrete_managers = []
+
+ # List of all lookups defined in ForeignKey 'limit_choices_to' options
+ # from *other* models. Needed for some admin checks. Internal use only.
+ self.related_fkey_lookups = []
+
+ def contribute_to_class(self, cls, name):
+ from django.db import connection
+ from django.db.backends.util import truncate_name
+
+ cls._meta = self
+ self.model = cls
+ self.installed = re.sub('\.models$', '', cls.__module__) in settings.INSTALLED_APPS
+ # First, construct the default values for these options.
+ self.object_name = cls.__name__
+ self.model_name = self.object_name.lower()
+ self.verbose_name = get_verbose_name(self.object_name)
+
+ # Next, apply any overridden values from 'class Meta'.
+ if self.meta:
+ meta_attrs = self.meta.__dict__.copy()
+ for name in self.meta.__dict__:
+ # Ignore any private attributes that Django doesn't care about.
+ # NOTE: We can't modify a dictionary's contents while looping
+ # over it, so we loop over the *original* dictionary instead.
+ if name.startswith('_'):
+ del meta_attrs[name]
+ for attr_name in DEFAULT_NAMES:
+ if attr_name in meta_attrs:
+ setattr(self, attr_name, meta_attrs.pop(attr_name))
+ elif hasattr(self.meta, attr_name):
+ setattr(self, attr_name, getattr(self.meta, attr_name))
+
+ # unique_together can be either a tuple of tuples, or a single
+ # tuple of two strings. Normalize it to a tuple of tuples, so that
+ # calling code can uniformly expect that.
+ ut = meta_attrs.pop('unique_together', self.unique_together)
+ if ut and not isinstance(ut[0], (tuple, list)):
+ ut = (ut,)
+ self.unique_together = ut
+
+ # verbose_name_plural is a special case because it uses a 's'
+ # by default.
+ if self.verbose_name_plural is None:
+ self.verbose_name_plural = string_concat(self.verbose_name, 's')
+
+ # Any leftover attributes must be invalid.
+ if meta_attrs != {}:
+ raise TypeError("'class Meta' got invalid attribute(s): %s" % ','.join(meta_attrs.keys()))
+ else:
+ self.verbose_name_plural = string_concat(self.verbose_name, 's')
+ del self.meta
+
+ # If the db_table wasn't provided, use the app_label + model_name.
+ if not self.db_table:
+ self.db_table = "%s_%s" % (self.app_label, self.model_name)
+ self.db_table = truncate_name(self.db_table, connection.ops.max_name_length())
+
+ @property
+ def module_name(self):
+ """
+ This property has been deprecated in favor of `model_name`. refs #19689
+ """
+ warnings.warn(
+ "Options.module_name has been deprecated in favor of model_name",
+ PendingDeprecationWarning, stacklevel=2)
+ return self.model_name
+
+ def _prepare(self, model):
+ if self.order_with_respect_to:
+ self.order_with_respect_to = self.get_field(self.order_with_respect_to)
+ self.ordering = ('_order',)
+ model.add_to_class('_order', OrderWrt())
+ else:
+ self.order_with_respect_to = None
+
+ if self.pk is None:
+ if self.parents:
+ # Promote the first parent link in lieu of adding yet another
+ # field.
+ field = next(six.itervalues(self.parents))
+ # Look for a local field with the same name as the
+ # first parent link. If a local field has already been
+ # created, use it instead of promoting the parent
+ already_created = [fld for fld in self.local_fields if fld.name == field.name]
+ if already_created:
+ field = already_created[0]
+ field.primary_key = True
+ self.setup_pk(field)
+ else:
+ auto = AutoField(verbose_name='ID', primary_key=True,
+ auto_created=True)
+ model.add_to_class('id', auto)
+
+ def add_field(self, field):
+ # Insert the given field in the order in which it was created, using
+ # the "creation_counter" attribute of the field.
+ # Move many-to-many related fields from self.fields into
+ # self.many_to_many.
+ if field.rel and isinstance(field.rel, ManyToManyRel):
+ self.local_many_to_many.insert(bisect(self.local_many_to_many, field), field)
+ if hasattr(self, '_m2m_cache'):
+ del self._m2m_cache
+ else:
+ self.local_fields.insert(bisect(self.local_fields, field), field)
+ self.setup_pk(field)
+ if hasattr(self, '_field_cache'):
+ del self._field_cache
+ del self._field_name_cache
+ # The fields, concrete_fields and local_concrete_fields are
+ # implemented as cached properties for performance reasons.
+ # The attrs will not exists if the cached property isn't
+ # accessed yet, hence the try-excepts.
+ try:
+ del self.fields
+ except AttributeError:
+ pass
+ try:
+ del self.concrete_fields
+ except AttributeError:
+ pass
+ try:
+ del self.local_concrete_fields
+ except AttributeError:
+ pass
+
+ if hasattr(self, '_name_map'):
+ del self._name_map
+
+ def add_virtual_field(self, field):
+ self.virtual_fields.append(field)
+
+ def setup_pk(self, field):
+ if not self.pk and field.primary_key:
+ self.pk = field
+ field.serialize = False
+
+ def pk_index(self):
+ """
+ Returns the index of the primary key field in the self.concrete_fields
+ list.
+ """
+ return self.concrete_fields.index(self.pk)
+
+ def setup_proxy(self, target):
+ """
+ Does the internal setup so that the current model is a proxy for
+ "target".
+ """
+ self.pk = target._meta.pk
+ self.proxy_for_model = target
+ self.db_table = target._meta.db_table
+
+ def __repr__(self):
+ return '<Options for %s>' % self.object_name
+
+ def __str__(self):
+ return "%s.%s" % (smart_text(self.app_label), smart_text(self.model_name))
+
+ def verbose_name_raw(self):
+ """
+ There are a few places where the untranslated verbose name is needed
+ (so that we get the same value regardless of currently active
+ locale).
+ """
+ lang = get_language()
+ deactivate_all()
+ raw = force_text(self.verbose_name)
+ activate(lang)
+ return raw
+ verbose_name_raw = property(verbose_name_raw)
+
+ def _swapped(self):
+ """
+ Has this model been swapped out for another? If so, return the model
+ name of the replacement; otherwise, return None.
+
+ For historical reasons, model name lookups using get_model() are
+ case insensitive, so we make sure we are case insensitive here.
+ """
+ if self.swappable:
+ model_label = '%s.%s' % (self.app_label, self.model_name)
+ swapped_for = getattr(settings, self.swappable, None)
+ if swapped_for:
+ try:
+ swapped_label, swapped_object = swapped_for.split('.')
+ except ValueError:
+ # setting not in the format app_label.model_name
+ # raising ImproperlyConfigured here causes problems with
+ # test cleanup code - instead it is raised in get_user_model
+ # or as part of validation.
+ return swapped_for
+
+ if '%s.%s' % (swapped_label, swapped_object.lower()) not in (None, model_label):
+ return swapped_for
+ return None
+ swapped = property(_swapped)
+
+ @cached_property
+ def fields(self):
+ """
+ The getter for self.fields. This returns the list of field objects
+ available to this model (including through parent models).
+
+ Callers are not permitted to modify this list, since it's a reference
+ to this instance (not a copy).
+ """
+ try:
+ self._field_name_cache
+ except AttributeError:
+ self._fill_fields_cache()
+ return self._field_name_cache
+
+ @cached_property
+ def concrete_fields(self):
+ return [f for f in self.fields if f.column is not None]
+
+ @cached_property
+ def local_concrete_fields(self):
+ return [f for f in self.local_fields if f.column is not None]
+
+ def get_fields_with_model(self):
+ """
+ Returns a sequence of (field, model) pairs for all fields. The "model"
+ element is None for fields on the current model. Mostly of use when
+ constructing queries so that we know which model a field belongs to.
+ """
+ try:
+ self._field_cache
+ except AttributeError:
+ self._fill_fields_cache()
+ return self._field_cache
+
+ def get_concrete_fields_with_model(self):
+ return [(field, model) for field, model in self.get_fields_with_model() if
+ field.column is not None]
+
+ def _fill_fields_cache(self):
+ cache = []
+ for parent in self.parents:
+ for field, model in parent._meta.get_fields_with_model():
+ if model:
+ cache.append((field, model))
+ else:
+ cache.append((field, parent))
+ cache.extend([(f, None) for f in self.local_fields])
+ self._field_cache = tuple(cache)
+ self._field_name_cache = [x for x, _ in cache]
+
+ def _many_to_many(self):
+ try:
+ self._m2m_cache
+ except AttributeError:
+ self._fill_m2m_cache()
+ return list(self._m2m_cache)
+ many_to_many = property(_many_to_many)
+
+ def get_m2m_with_model(self):
+ """
+ The many-to-many version of get_fields_with_model().
+ """
+ try:
+ self._m2m_cache
+ except AttributeError:
+ self._fill_m2m_cache()
+ return list(six.iteritems(self._m2m_cache))
+
+ def _fill_m2m_cache(self):
+ cache = SortedDict()
+ for parent in self.parents:
+ for field, model in parent._meta.get_m2m_with_model():
+ if model:
+ cache[field] = model
+ else:
+ cache[field] = parent
+ for field in self.local_many_to_many:
+ cache[field] = None
+ self._m2m_cache = cache
+
+ def get_field(self, name, many_to_many=True):
+ """
+ Returns the requested field by name. Raises FieldDoesNotExist on error.
+ """
+ to_search = (self.fields + self.many_to_many) if many_to_many else self.fields
+ for f in to_search:
+ if f.name == name:
+ return f
+ raise FieldDoesNotExist('%s has no field named %r' % (self.object_name, name))
+
+ def get_field_by_name(self, name):
+ """
+ Returns the (field_object, model, direct, m2m), where field_object is
+ the Field instance for the given name, model is the model containing
+ this field (None for local fields), direct is True if the field exists
+ on this model, and m2m is True for many-to-many relations. When
+ 'direct' is False, 'field_object' is the corresponding RelatedObject
+ for this field (since the field doesn't have an instance associated
+ with it).
+
+ Uses a cache internally, so after the first access, this is very fast.
+ """
+ try:
+ try:
+ return self._name_map[name]
+ except AttributeError:
+ cache = self.init_name_map()
+ return cache[name]
+ except KeyError:
+ raise FieldDoesNotExist('%s has no field named %r'
+ % (self.object_name, name))
+
+ def get_all_field_names(self):
+ """
+ Returns a list of all field names that are possible for this model
+ (including reverse relation names). This is used for pretty printing
+ debugging output (a list of choices), so any internal-only field names
+ are not included.
+ """
+ try:
+ cache = self._name_map
+ except AttributeError:
+ cache = self.init_name_map()
+ names = sorted(cache.keys())
+ # Internal-only names end with "+" (symmetrical m2m related names being
+ # the main example). Trim them.
+ return [val for val in names if not val.endswith('+')]
+
+ def init_name_map(self):
+ """
+ Initialises the field name -> field object mapping.
+ """
+ cache = {}
+ # We intentionally handle related m2m objects first so that symmetrical
+ # m2m accessor names can be overridden, if necessary.
+ for f, model in self.get_all_related_m2m_objects_with_model():
+ cache[f.field.related_query_name()] = (f, model, False, True)
+ for f, model in self.get_all_related_objects_with_model():
+ cache[f.field.related_query_name()] = (f, model, False, False)
+ for f, model in self.get_m2m_with_model():
+ cache[f.name] = (f, model, True, True)
+ for f, model in self.get_fields_with_model():
+ cache[f.name] = (f, model, True, False)
+ for f in self.virtual_fields:
+ if hasattr(f, 'related'):
+ cache[f.name] = (f.related, None if f.model == self.model else f.model, True, False)
+ if app_cache_ready():
+ self._name_map = cache
+ return cache
+
+ def get_add_permission(self):
+ """
+ This method has been deprecated in favor of
+ `django.contrib.auth.get_permission_codename`. refs #20642
+ """
+ warnings.warn(
+ "`Options.get_add_permission` has been deprecated in favor "
+ "of `django.contrib.auth.get_permission_codename`.",
+ PendingDeprecationWarning, stacklevel=2)
+ return 'add_%s' % self.model_name
+
+ def get_change_permission(self):
+ """
+ This method has been deprecated in favor of
+ `django.contrib.auth.get_permission_codename`. refs #20642
+ """
+ warnings.warn(
+ "`Options.get_change_permission` has been deprecated in favor "
+ "of `django.contrib.auth.get_permission_codename`.",
+ PendingDeprecationWarning, stacklevel=2)
+ return 'change_%s' % self.model_name
+
+ def get_delete_permission(self):
+ """
+ This method has been deprecated in favor of
+ `django.contrib.auth.get_permission_codename`. refs #20642
+ """
+ warnings.warn(
+ "`Options.get_delete_permission` has been deprecated in favor "
+ "of `django.contrib.auth.get_permission_codename`.",
+ PendingDeprecationWarning, stacklevel=2)
+ return 'delete_%s' % self.model_name
+
+ def get_all_related_objects(self, local_only=False, include_hidden=False,
+ include_proxy_eq=False):
+ return [k for k, v in self.get_all_related_objects_with_model(
+ local_only=local_only, include_hidden=include_hidden,
+ include_proxy_eq=include_proxy_eq)]
+
+ def get_all_related_objects_with_model(self, local_only=False,
+ include_hidden=False,
+ include_proxy_eq=False):
+ """
+ Returns a list of (related-object, model) pairs. Similar to
+ get_fields_with_model().
+ """
+ try:
+ self._related_objects_cache
+ except AttributeError:
+ self._fill_related_objects_cache()
+ predicates = []
+ if local_only:
+ predicates.append(lambda k, v: not v)
+ if not include_hidden:
+ predicates.append(lambda k, v: not k.field.rel.is_hidden())
+ cache = (self._related_objects_proxy_cache if include_proxy_eq
+ else self._related_objects_cache)
+ return [t for t in cache.items() if all(p(*t) for p in predicates)]
+
+ def _fill_related_objects_cache(self):
+ cache = SortedDict()
+ parent_list = self.get_parent_list()
+ for parent in self.parents:
+ for obj, model in parent._meta.get_all_related_objects_with_model(include_hidden=True):
+ if (obj.field.creation_counter < 0 or obj.field.rel.parent_link) and obj.model not in parent_list:
+ continue
+ if not model:
+ cache[obj] = parent
+ else:
+ cache[obj] = model
+ # Collect also objects which are in relation to some proxy child/parent of self.
+ proxy_cache = cache.copy()
+ for klass in get_models(include_auto_created=True, only_installed=False):
+ if not klass._meta.swapped:
+ for f in klass._meta.local_fields:
+ if f.rel and not isinstance(f.rel.to, six.string_types) and f.generate_reverse_relation:
+ if self == f.rel.to._meta:
+ cache[f.related] = None
+ proxy_cache[f.related] = None
+ elif self.concrete_model == f.rel.to._meta.concrete_model:
+ proxy_cache[f.related] = None
+ self._related_objects_cache = cache
+ self._related_objects_proxy_cache = proxy_cache
+
+ def get_all_related_many_to_many_objects(self, local_only=False):
+ try:
+ cache = self._related_many_to_many_cache
+ except AttributeError:
+ cache = self._fill_related_many_to_many_cache()
+ if local_only:
+ return [k for k, v in cache.items() if not v]
+ return list(cache)
+
+ def get_all_related_m2m_objects_with_model(self):
+ """
+ Returns a list of (related-m2m-object, model) pairs. Similar to
+ get_fields_with_model().
+ """
+ try:
+ cache = self._related_many_to_many_cache
+ except AttributeError:
+ cache = self._fill_related_many_to_many_cache()
+ return list(six.iteritems(cache))
+
+ def _fill_related_many_to_many_cache(self):
+ cache = SortedDict()
+ parent_list = self.get_parent_list()
+ for parent in self.parents:
+ for obj, model in parent._meta.get_all_related_m2m_objects_with_model():
+ if obj.field.creation_counter < 0 and obj.model not in parent_list:
+ continue
+ if not model:
+ cache[obj] = parent
+ else:
+ cache[obj] = model
+ for klass in get_models(only_installed=False):
+ if not klass._meta.swapped:
+ for f in klass._meta.local_many_to_many:
+ if (f.rel
+ and not isinstance(f.rel.to, six.string_types)
+ and self == f.rel.to._meta):
+ cache[f.related] = None
+ if app_cache_ready():
+ self._related_many_to_many_cache = cache
+ return cache
+
+ def get_base_chain(self, model):
+ """
+ Returns a list of parent classes leading to 'model' (order from closet
+ to most distant ancestor). This has to handle the case were 'model' is
+ a granparent or even more distant relation.
+ """
+ if not self.parents:
+ return None
+ if model in self.parents:
+ return [model]
+ for parent in self.parents:
+ res = parent._meta.get_base_chain(model)
+ if res:
+ res.insert(0, parent)
+ return res
+ return None
+
+ def get_parent_list(self):
+ """
+ Returns a list of all the ancestor of this model as a list. Useful for
+ determining if something is an ancestor, regardless of lineage.
+ """
+ result = set()
+ for parent in self.parents:
+ result.add(parent)
+ result.update(parent._meta.get_parent_list())
+ return result
+
+ def get_ancestor_link(self, ancestor):
+ """
+ Returns the field on the current model which points to the given
+ "ancestor". This is possible an indirect link (a pointer to a parent
+ model, which points, eventually, to the ancestor). Used when
+ constructing table joins for model inheritance.
+
+ Returns None if the model isn't an ancestor of this one.
+ """
+ if ancestor in self.parents:
+ return self.parents[ancestor]
+ for parent in self.parents:
+ # Tries to get a link field from the immediate parent
+ parent_link = parent._meta.get_ancestor_link(ancestor)
+ if parent_link:
+ # In case of a proxied model, the first link
+ # of the chain to the ancestor is that parent
+ # links
+ return self.parents[parent] or parent_link
diff --git a/lib/python2.7/site-packages/django/db/models/query.py b/lib/python2.7/site-packages/django/db/models/query.py
new file mode 100644
index 0000000..44047d4
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/models/query.py
@@ -0,0 +1,1731 @@
+"""
+The main QuerySet implementation. This provides the public API for the ORM.
+"""
+
+import copy
+import itertools
+import sys
+import warnings
+
+from django.conf import settings
+from django.core import exceptions
+from django.db import connections, router, transaction, DatabaseError
+from django.db.models.constants import LOOKUP_SEP
+from django.db.models.fields import AutoField
+from django.db.models.query_utils import (Q, select_related_descend,
+ deferred_class_factory, InvalidQuery)
+from django.db.models.deletion import Collector
+from django.db.models import sql
+from django.utils.functional import partition
+from django.utils import six
+from django.utils import timezone
+
+# The maximum number of items to display in a QuerySet.__repr__
+REPR_OUTPUT_SIZE = 20
+
+# Pull into this namespace for backwards compatibility.
+EmptyResultSet = sql.EmptyResultSet
+
+
+class QuerySet(object):
+ """
+ Represents a lazy database lookup for a set of objects.
+ """
+ def __init__(self, model=None, query=None, using=None):
+ self.model = model
+ self._db = using
+ self.query = query or sql.Query(self.model)
+ self._result_cache = None
+ self._sticky_filter = False
+ self._for_write = False
+ self._prefetch_related_lookups = []
+ self._prefetch_done = False
+ self._known_related_objects = {} # {rel_field, {pk: rel_obj}}
+
+ ########################
+ # PYTHON MAGIC METHODS #
+ ########################
+
+ def __deepcopy__(self, memo):
+ """
+ Deep copy of a QuerySet doesn't populate the cache
+ """
+ obj = self.__class__()
+ for k, v in self.__dict__.items():
+ if k == '_result_cache':
+ obj.__dict__[k] = None
+ else:
+ obj.__dict__[k] = copy.deepcopy(v, memo)
+ return obj
+
+ def __getstate__(self):
+ """
+ Allows the QuerySet to be pickled.
+ """
+ # Force the cache to be fully populated.
+ self._fetch_all()
+ obj_dict = self.__dict__.copy()
+ return obj_dict
+
+ def __repr__(self):
+ data = list(self[:REPR_OUTPUT_SIZE + 1])
+ if len(data) > REPR_OUTPUT_SIZE:
+ data[-1] = "...(remaining elements truncated)..."
+ return repr(data)
+
+ def __len__(self):
+ self._fetch_all()
+ return len(self._result_cache)
+
+ def __iter__(self):
+ """
+ The queryset iterator protocol uses three nested iterators in the
+ default case:
+ 1. sql.compiler:execute_sql()
+ - Returns 100 rows at time (constants.GET_ITERATOR_CHUNK_SIZE)
+ using cursor.fetchmany(). This part is responsible for
+ doing some column masking, and returning the rows in chunks.
+ 2. sql/compiler.results_iter()
+ - Returns one row at time. At this point the rows are still just
+ tuples. In some cases the return values are converted to
+ Python values at this location (see resolve_columns(),
+ resolve_aggregate()).
+ 3. self.iterator()
+ - Responsible for turning the rows into model objects.
+ """
+ self._fetch_all()
+ return iter(self._result_cache)
+
+ def __nonzero__(self):
+ self._fetch_all()
+ return bool(self._result_cache)
+
+ def __getitem__(self, k):
+ """
+ Retrieves an item or slice from the set of results.
+ """
+ if not isinstance(k, (slice,) + six.integer_types):
+ raise TypeError
+ assert ((not isinstance(k, slice) and (k >= 0))
+ or (isinstance(k, slice) and (k.start is None or k.start >= 0)
+ and (k.stop is None or k.stop >= 0))), \
+ "Negative indexing is not supported."
+
+ if self._result_cache is not None:
+ return self._result_cache[k]
+
+ if isinstance(k, slice):
+ qs = self._clone()
+ if k.start is not None:
+ start = int(k.start)
+ else:
+ start = None
+ if k.stop is not None:
+ stop = int(k.stop)
+ else:
+ stop = None
+ qs.query.set_limits(start, stop)
+ return list(qs)[::k.step] if k.step else qs
+
+ qs = self._clone()
+ qs.query.set_limits(k, k + 1)
+ return list(qs)[0]
+
+ def __and__(self, other):
+ self._merge_sanity_check(other)
+ if isinstance(other, EmptyQuerySet):
+ return other
+ if isinstance(self, EmptyQuerySet):
+ return self
+ combined = self._clone()
+ combined._merge_known_related_objects(other)
+ combined.query.combine(other.query, sql.AND)
+ return combined
+
+ def __or__(self, other):
+ self._merge_sanity_check(other)
+ if isinstance(self, EmptyQuerySet):
+ return other
+ if isinstance(other, EmptyQuerySet):
+ return self
+ combined = self._clone()
+ combined._merge_known_related_objects(other)
+ combined.query.combine(other.query, sql.OR)
+ return combined
+
+ ####################################
+ # METHODS THAT DO DATABASE QUERIES #
+ ####################################
+
+ def iterator(self):
+ """
+ An iterator over the results from applying this QuerySet to the
+ database.
+ """
+ fill_cache = False
+ if connections[self.db].features.supports_select_related:
+ fill_cache = self.query.select_related
+ if isinstance(fill_cache, dict):
+ requested = fill_cache
+ else:
+ requested = None
+ max_depth = self.query.max_depth
+
+ extra_select = list(self.query.extra_select)
+ aggregate_select = list(self.query.aggregate_select)
+
+ only_load = self.query.get_loaded_field_names()
+ if not fill_cache:
+ fields = self.model._meta.concrete_fields
+
+ load_fields = []
+ # If only/defer clauses have been specified,
+ # build the list of fields that are to be loaded.
+ if only_load:
+ for field, model in self.model._meta.get_concrete_fields_with_model():
+ if model is None:
+ model = self.model
+ try:
+ if field.name in only_load[model]:
+ # Add a field that has been explicitly included
+ load_fields.append(field.name)
+ except KeyError:
+ # Model wasn't explicitly listed in the only_load table
+ # Therefore, we need to load all fields from this model
+ load_fields.append(field.name)
+
+ index_start = len(extra_select)
+ aggregate_start = index_start + len(load_fields or self.model._meta.concrete_fields)
+
+ skip = None
+ if load_fields and not fill_cache:
+ # Some fields have been deferred, so we have to initialise
+ # via keyword arguments.
+ skip = set()
+ init_list = []
+ for field in fields:
+ if field.name not in load_fields:
+ skip.add(field.attname)
+ else:
+ init_list.append(field.attname)
+ model_cls = deferred_class_factory(self.model, skip)
+
+ # Cache db and model outside the loop
+ db = self.db
+ model = self.model
+ compiler = self.query.get_compiler(using=db)
+ if fill_cache:
+ klass_info = get_klass_info(model, max_depth=max_depth,
+ requested=requested, only_load=only_load)
+ for row in compiler.results_iter():
+ if fill_cache:
+ obj, _ = get_cached_row(row, index_start, db, klass_info,
+ offset=len(aggregate_select))
+ else:
+ # Omit aggregates in object creation.
+ row_data = row[index_start:aggregate_start]
+ if skip:
+ obj = model_cls(**dict(zip(init_list, row_data)))
+ else:
+ obj = model(*row_data)
+
+ # Store the source database of the object
+ obj._state.db = db
+ # This object came from the database; it's not being added.
+ obj._state.adding = False
+
+ if extra_select:
+ for i, k in enumerate(extra_select):
+ setattr(obj, k, row[i])
+
+ # Add the aggregates to the model
+ if aggregate_select:
+ for i, aggregate in enumerate(aggregate_select):
+ setattr(obj, aggregate, row[i + aggregate_start])
+
+ # Add the known related objects to the model, if there are any
+ if self._known_related_objects:
+ for field, rel_objs in self._known_related_objects.items():
+ pk = getattr(obj, field.get_attname())
+ try:
+ rel_obj = rel_objs[pk]
+ except KeyError:
+ pass # may happen in qs1 | qs2 scenarios
+ else:
+ setattr(obj, field.name, rel_obj)
+
+ yield obj
+
+ def aggregate(self, *args, **kwargs):
+ """
+ Returns a dictionary containing the calculations (aggregation)
+ over the current queryset
+
+ If args is present the expression is passed as a kwarg using
+ the Aggregate object's default alias.
+ """
+ if self.query.distinct_fields:
+ raise NotImplementedError("aggregate() + distinct(fields) not implemented.")
+ for arg in args:
+ kwargs[arg.default_alias] = arg
+
+ query = self.query.clone()
+
+ for (alias, aggregate_expr) in kwargs.items():
+ query.add_aggregate(aggregate_expr, self.model, alias,
+ is_summary=True)
+
+ return query.get_aggregation(using=self.db)
+
+ def count(self):
+ """
+ Performs a SELECT COUNT() and returns the number of records as an
+ integer.
+
+ If the QuerySet is already fully cached this simply returns the length
+ of the cached results set to avoid multiple SELECT COUNT(*) calls.
+ """
+ if self._result_cache is not None:
+ return len(self._result_cache)
+
+ return self.query.get_count(using=self.db)
+
+ def get(self, *args, **kwargs):
+ """
+ Performs the query and returns a single object matching the given
+ keyword arguments.
+ """
+ clone = self.filter(*args, **kwargs)
+ if self.query.can_filter():
+ clone = clone.order_by()
+ num = len(clone)
+ if num == 1:
+ return clone._result_cache[0]
+ if not num:
+ raise self.model.DoesNotExist(
+ "%s matching query does not exist." %
+ self.model._meta.object_name)
+ raise self.model.MultipleObjectsReturned(
+ "get() returned more than one %s -- it returned %s!" %
+ (self.model._meta.object_name, num))
+
+ def create(self, **kwargs):
+ """
+ Creates a new object with the given kwargs, saving it to the database
+ and returning the created object.
+ """
+ obj = self.model(**kwargs)
+ self._for_write = True
+ obj.save(force_insert=True, using=self.db)
+ return obj
+
+ def bulk_create(self, objs, batch_size=None):
+ """
+ Inserts each of the instances into the database. This does *not* call
+ save() on each of the instances, does not send any pre/post save
+ signals, and does not set the primary key attribute if it is an
+ autoincrement field.
+ """
+ # So this case is fun. When you bulk insert you don't get the primary
+ # keys back (if it's an autoincrement), so you can't insert into the
+ # child tables which references this. There are two workarounds, 1)
+ # this could be implemented if you didn't have an autoincrement pk,
+ # and 2) you could do it by doing O(n) normal inserts into the parent
+ # tables to get the primary keys back, and then doing a single bulk
+ # insert into the childmost table. Some databases might allow doing
+ # this by using RETURNING clause for the insert query. We're punting
+ # on these for now because they are relatively rare cases.
+ assert batch_size is None or batch_size > 0
+ if self.model._meta.parents:
+ raise ValueError("Can't bulk create an inherited model")
+ if not objs:
+ return objs
+ self._for_write = True
+ connection = connections[self.db]
+ fields = self.model._meta.local_concrete_fields
+ with transaction.commit_on_success_unless_managed(using=self.db):
+ if (connection.features.can_combine_inserts_with_and_without_auto_increment_pk
+ and self.model._meta.has_auto_field):
+ self._batched_insert(objs, fields, batch_size)
+ else:
+ objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs)
+ if objs_with_pk:
+ self._batched_insert(objs_with_pk, fields, batch_size)
+ if objs_without_pk:
+ fields= [f for f in fields if not isinstance(f, AutoField)]
+ self._batched_insert(objs_without_pk, fields, batch_size)
+
+ return objs
+
+ def get_or_create(self, **kwargs):
+ """
+ Looks up an object with the given kwargs, creating one if necessary.
+ Returns a tuple of (object, created), where created is a boolean
+ specifying whether an object was created.
+ """
+ defaults = kwargs.pop('defaults', {})
+ lookup = kwargs.copy()
+ for f in self.model._meta.fields:
+ if f.attname in lookup:
+ lookup[f.name] = lookup.pop(f.attname)
+ try:
+ self._for_write = True
+ return self.get(**lookup), False
+ except self.model.DoesNotExist:
+ try:
+ params = dict((k, v) for k, v in kwargs.items() if LOOKUP_SEP not in k)
+ params.update(defaults)
+ obj = self.model(**params)
+ with transaction.atomic(using=self.db):
+ obj.save(force_insert=True, using=self.db)
+ return obj, True
+ except DatabaseError:
+ exc_info = sys.exc_info()
+ try:
+ return self.get(**lookup), False
+ except self.model.DoesNotExist:
+ # Re-raise the DatabaseError with its original traceback.
+ six.reraise(*exc_info)
+
+ def _earliest_or_latest(self, field_name=None, direction="-"):
+ """
+ Returns the latest object, according to the model's
+ 'get_latest_by' option or optional given field_name.
+ """
+ order_by = field_name or getattr(self.model._meta, 'get_latest_by')
+ assert bool(order_by), "earliest() and latest() require either a "\
+ "field_name parameter or 'get_latest_by' in the model"
+ assert self.query.can_filter(), \
+ "Cannot change a query once a slice has been taken."
+ obj = self._clone()
+ obj.query.set_limits(high=1)
+ obj.query.clear_ordering(force_empty=True)
+ obj.query.add_ordering('%s%s' % (direction, order_by))
+ return obj.get()
+
+ def earliest(self, field_name=None):
+ return self._earliest_or_latest(field_name=field_name, direction="")
+
+ def latest(self, field_name=None):
+ return self._earliest_or_latest(field_name=field_name, direction="-")
+
+ def first(self):
+ """
+ Returns the first object of a query, returns None if no match is found.
+ """
+ qs = self if self.ordered else self.order_by('pk')
+ try:
+ return qs[0]
+ except IndexError:
+ return None
+
+ def last(self):
+ """
+ Returns the last object of a query, returns None if no match is found.
+ """
+ qs = self.reverse() if self.ordered else self.order_by('-pk')
+ try:
+ return qs[0]
+ except IndexError:
+ return None
+
+ def in_bulk(self, id_list):
+ """
+ Returns a dictionary mapping each of the given IDs to the object with
+ that ID.
+ """
+ assert self.query.can_filter(), \
+ "Cannot use 'limit' or 'offset' with in_bulk"
+ if not id_list:
+ return {}
+ qs = self.filter(pk__in=id_list).order_by()
+ return dict([(obj._get_pk_val(), obj) for obj in qs])
+
+ def delete(self):
+ """
+ Deletes the records in the current QuerySet.
+ """
+ assert self.query.can_filter(), \
+ "Cannot use 'limit' or 'offset' with delete."
+
+ del_query = self._clone()
+
+ # The delete is actually 2 queries - one to find related objects,
+ # and one to delete. Make sure that the discovery of related
+ # objects is performed on the same database as the deletion.
+ del_query._for_write = True
+
+ # Disable non-supported fields.
+ del_query.query.select_for_update = False
+ del_query.query.select_related = False
+ del_query.query.clear_ordering(force_empty=True)
+
+ collector = Collector(using=del_query.db)
+ collector.collect(del_query)
+ collector.delete()
+
+ # Clear the result cache, in case this QuerySet gets reused.
+ self._result_cache = None
+ delete.alters_data = True
+
+ def _raw_delete(self, using):
+ """
+ Deletes objects found from the given queryset in single direct SQL
+ query. No signals are sent, and there is no protection for cascades.
+ """
+ sql.DeleteQuery(self.model).delete_qs(self, using)
+ _raw_delete.alters_data = True
+
+ def update(self, **kwargs):
+ """
+ Updates all elements in the current QuerySet, setting all the given
+ fields to the appropriate values.
+ """
+ assert self.query.can_filter(), \
+ "Cannot update a query once a slice has been taken."
+ self._for_write = True
+ query = self.query.clone(sql.UpdateQuery)
+ query.add_update_values(kwargs)
+ with transaction.commit_on_success_unless_managed(using=self.db):
+ rows = query.get_compiler(self.db).execute_sql(None)
+ self._result_cache = None
+ return rows
+ update.alters_data = True
+
+ def _update(self, values):
+ """
+ A version of update that accepts field objects instead of field names.
+ Used primarily for model saving and not intended for use by general
+ code (it requires too much poking around at model internals to be
+ useful at that level).
+ """
+ assert self.query.can_filter(), \
+ "Cannot update a query once a slice has been taken."
+ query = self.query.clone(sql.UpdateQuery)
+ query.add_update_fields(values)
+ self._result_cache = None
+ return query.get_compiler(self.db).execute_sql(None)
+ _update.alters_data = True
+
+ def exists(self):
+ if self._result_cache is None:
+ return self.query.has_results(using=self.db)
+ return bool(self._result_cache)
+
+ def _prefetch_related_objects(self):
+ # This method can only be called once the result cache has been filled.
+ prefetch_related_objects(self._result_cache, self._prefetch_related_lookups)
+ self._prefetch_done = True
+
+ ##################################################
+ # PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS #
+ ##################################################
+
+ def values(self, *fields):
+ return self._clone(klass=ValuesQuerySet, setup=True, _fields=fields)
+
+ def values_list(self, *fields, **kwargs):
+ flat = kwargs.pop('flat', False)
+ if kwargs:
+ raise TypeError('Unexpected keyword arguments to values_list: %s'
+ % (list(kwargs),))
+ if flat and len(fields) > 1:
+ raise TypeError("'flat' is not valid when values_list is called with more than one field.")
+ return self._clone(klass=ValuesListQuerySet, setup=True, flat=flat,
+ _fields=fields)
+
+ def dates(self, field_name, kind, order='ASC'):
+ """
+ Returns a list of date objects representing all available dates for
+ the given field_name, scoped to 'kind'.
+ """
+ assert kind in ("year", "month", "day"), \
+ "'kind' must be one of 'year', 'month' or 'day'."
+ assert order in ('ASC', 'DESC'), \
+ "'order' must be either 'ASC' or 'DESC'."
+ return self._clone(klass=DateQuerySet, setup=True,
+ _field_name=field_name, _kind=kind, _order=order)
+
+ def datetimes(self, field_name, kind, order='ASC', tzinfo=None):
+ """
+ Returns a list of datetime objects representing all available
+ datetimes for the given field_name, scoped to 'kind'.
+ """
+ assert kind in ("year", "month", "day", "hour", "minute", "second"), \
+ "'kind' must be one of 'year', 'month', 'day', 'hour', 'minute' or 'second'."
+ assert order in ('ASC', 'DESC'), \
+ "'order' must be either 'ASC' or 'DESC'."
+ if settings.USE_TZ:
+ if tzinfo is None:
+ tzinfo = timezone.get_current_timezone()
+ else:
+ tzinfo = None
+ return self._clone(klass=DateTimeQuerySet, setup=True,
+ _field_name=field_name, _kind=kind, _order=order, _tzinfo=tzinfo)
+
+ def none(self):
+ """
+ Returns an empty QuerySet.
+ """
+ clone = self._clone()
+ clone.query.set_empty()
+ return clone
+
+ ##################################################################
+ # PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET #
+ ##################################################################
+
+ def all(self):
+ """
+ Returns a new QuerySet that is a copy of the current one. This allows a
+ QuerySet to proxy for a model manager in some cases.
+ """
+ return self._clone()
+
+ def filter(self, *args, **kwargs):
+ """
+ Returns a new QuerySet instance with the args ANDed to the existing
+ set.
+ """
+ return self._filter_or_exclude(False, *args, **kwargs)
+
+ def exclude(self, *args, **kwargs):
+ """
+ Returns a new QuerySet instance with NOT (args) ANDed to the existing
+ set.
+ """
+ return self._filter_or_exclude(True, *args, **kwargs)
+
+ def _filter_or_exclude(self, negate, *args, **kwargs):
+ if args or kwargs:
+ assert self.query.can_filter(), \
+ "Cannot filter a query once a slice has been taken."
+
+ clone = self._clone()
+ if negate:
+ clone.query.add_q(~Q(*args, **kwargs))
+ else:
+ clone.query.add_q(Q(*args, **kwargs))
+ return clone
+
+ def complex_filter(self, filter_obj):
+ """
+ Returns a new QuerySet instance with filter_obj added to the filters.
+
+ filter_obj can be a Q object (or anything with an add_to_query()
+ method) or a dictionary of keyword lookup arguments.
+
+ This exists to support framework features such as 'limit_choices_to',
+ and usually it will be more natural to use other methods.
+ """
+ if isinstance(filter_obj, Q) or hasattr(filter_obj, 'add_to_query'):
+ clone = self._clone()
+ clone.query.add_q(filter_obj)
+ return clone
+ else:
+ return self._filter_or_exclude(None, **filter_obj)
+
+ def select_for_update(self, **kwargs):
+ """
+ Returns a new QuerySet instance that will select objects with a
+ FOR UPDATE lock.
+ """
+ # Default to false for nowait
+ nowait = kwargs.pop('nowait', False)
+ obj = self._clone()
+ obj._for_write = True
+ obj.query.select_for_update = True
+ obj.query.select_for_update_nowait = nowait
+ return obj
+
+ def select_related(self, *fields, **kwargs):
+ """
+ Returns a new QuerySet instance that will select related objects.
+
+ If fields are specified, they must be ForeignKey fields and only those
+ related objects are included in the selection.
+
+ If select_related(None) is called, the list is cleared.
+ """
+ if 'depth' in kwargs:
+ warnings.warn('The "depth" keyword argument has been deprecated.\n'
+ 'Use related field names instead.', DeprecationWarning, stacklevel=2)
+ depth = kwargs.pop('depth', 0)
+ if kwargs:
+ raise TypeError('Unexpected keyword arguments to select_related: %s'
+ % (list(kwargs),))
+ obj = self._clone()
+ if fields == (None,):
+ obj.query.select_related = False
+ elif fields:
+ if depth:
+ raise TypeError('Cannot pass both "depth" and fields to select_related()')
+ obj.query.add_select_related(fields)
+ else:
+ obj.query.select_related = True
+ if depth:
+ obj.query.max_depth = depth
+ return obj
+
+ def prefetch_related(self, *lookups):
+ """
+ Returns a new QuerySet instance that will prefetch the specified
+ Many-To-One and Many-To-Many related objects when the QuerySet is
+ evaluated.
+
+ When prefetch_related() is called more than once, the list of lookups to
+ prefetch is appended to. If prefetch_related(None) is called, the
+ the list is cleared.
+ """
+ clone = self._clone()
+ if lookups == (None,):
+ clone._prefetch_related_lookups = []
+ else:
+ clone._prefetch_related_lookups.extend(lookups)
+ return clone
+
+ def annotate(self, *args, **kwargs):
+ """
+ Return a query set in which the returned objects have been annotated
+ with data aggregated from related fields.
+ """
+ for arg in args:
+ if arg.default_alias in kwargs:
+ raise ValueError("The named annotation '%s' conflicts with the "
+ "default name for another annotation."
+ % arg.default_alias)
+ kwargs[arg.default_alias] = arg
+
+ names = getattr(self, '_fields', None)
+ if names is None:
+ names = set(self.model._meta.get_all_field_names())
+ for aggregate in kwargs:
+ if aggregate in names:
+ raise ValueError("The annotation '%s' conflicts with a field on "
+ "the model." % aggregate)
+
+ obj = self._clone()
+
+ obj._setup_aggregate_query(list(kwargs))
+
+ # Add the aggregates to the query
+ for (alias, aggregate_expr) in kwargs.items():
+ obj.query.add_aggregate(aggregate_expr, self.model, alias,
+ is_summary=False)
+
+ return obj
+
+ def order_by(self, *field_names):
+ """
+ Returns a new QuerySet instance with the ordering changed.
+ """
+ assert self.query.can_filter(), \
+ "Cannot reorder a query once a slice has been taken."
+ obj = self._clone()
+ obj.query.clear_ordering(force_empty=False)
+ obj.query.add_ordering(*field_names)
+ return obj
+
+ def distinct(self, *field_names):
+ """
+ Returns a new QuerySet instance that will select only distinct results.
+ """
+ assert self.query.can_filter(), \
+ "Cannot create distinct fields once a slice has been taken."
+ obj = self._clone()
+ obj.query.add_distinct_fields(*field_names)
+ return obj
+
+ def extra(self, select=None, where=None, params=None, tables=None,
+ order_by=None, select_params=None):
+ """
+ Adds extra SQL fragments to the query.
+ """
+ assert self.query.can_filter(), \
+ "Cannot change a query once a slice has been taken"
+ clone = self._clone()
+ clone.query.add_extra(select, select_params, where, params, tables, order_by)
+ return clone
+
+ def reverse(self):
+ """
+ Reverses the ordering of the QuerySet.
+ """
+ clone = self._clone()
+ clone.query.standard_ordering = not clone.query.standard_ordering
+ return clone
+
+ def defer(self, *fields):
+ """
+ Defers the loading of data for certain fields until they are accessed.
+ The set of fields to defer is added to any existing set of deferred
+ fields. The only exception to this is if None is passed in as the only
+ parameter, in which case all deferrals are removed (None acts as a
+ reset option).
+ """
+ clone = self._clone()
+ if fields == (None,):
+ clone.query.clear_deferred_loading()
+ else:
+ clone.query.add_deferred_loading(fields)
+ return clone
+
+ def only(self, *fields):
+ """
+ Essentially, the opposite of defer. Only the fields passed into this
+ method and that are not already specified as deferred are loaded
+ immediately when the queryset is evaluated.
+ """
+ if fields == (None,):
+ # Can only pass None to defer(), not only(), as the rest option.
+ # That won't stop people trying to do this, so let's be explicit.
+ raise TypeError("Cannot pass None as an argument to only().")
+ clone = self._clone()
+ clone.query.add_immediate_loading(fields)
+ return clone
+
+ def using(self, alias):
+ """
+ Selects which database this QuerySet should excecute its query against.
+ """
+ clone = self._clone()
+ clone._db = alias
+ return clone
+
+ ###################################
+ # PUBLIC INTROSPECTION ATTRIBUTES #
+ ###################################
+
+ def ordered(self):
+ """
+ Returns True if the QuerySet is ordered -- i.e. has an order_by()
+ clause or a default ordering on the model.
+ """
+ if self.query.extra_order_by or self.query.order_by:
+ return True
+ elif self.query.default_ordering and self.query.get_meta().ordering:
+ return True
+ else:
+ return False
+ ordered = property(ordered)
+
+ @property
+ def db(self):
+ "Return the database that will be used if this query is executed now"
+ if self._for_write:
+ return self._db or router.db_for_write(self.model)
+ return self._db or router.db_for_read(self.model)
+
+ ###################
+ # PRIVATE METHODS #
+ ###################
+ def _batched_insert(self, objs, fields, batch_size):
+ """
+ A little helper method for bulk_insert to insert the bulk one batch
+ at a time. Inserts recursively a batch from the front of the bulk and
+ then _batched_insert() the remaining objects again.
+ """
+ if not objs:
+ return
+ ops = connections[self.db].ops
+ batch_size = (batch_size or max(ops.bulk_batch_size(fields, objs), 1))
+ for batch in [objs[i:i+batch_size]
+ for i in range(0, len(objs), batch_size)]:
+ self.model._base_manager._insert(batch, fields=fields,
+ using=self.db)
+
+ def _clone(self, klass=None, setup=False, **kwargs):
+ if klass is None:
+ klass = self.__class__
+ query = self.query.clone()
+ if self._sticky_filter:
+ query.filter_is_sticky = True
+ c = klass(model=self.model, query=query, using=self._db)
+ c._for_write = self._for_write
+ c._prefetch_related_lookups = self._prefetch_related_lookups[:]
+ c._known_related_objects = self._known_related_objects
+ c.__dict__.update(kwargs)
+ if setup and hasattr(c, '_setup_query'):
+ c._setup_query()
+ return c
+
+ def _fetch_all(self):
+ if self._result_cache is None:
+ self._result_cache = list(self.iterator())
+ if self._prefetch_related_lookups and not self._prefetch_done:
+ self._prefetch_related_objects()
+
+ def _next_is_sticky(self):
+ """
+ Indicates that the next filter call and the one following that should
+ be treated as a single filter. This is only important when it comes to
+ determining when to reuse tables for many-to-many filters. Required so
+ that we can filter naturally on the results of related managers.
+
+ This doesn't return a clone of the current QuerySet (it returns
+ "self"). The method is only used internally and should be immediately
+ followed by a filter() that does create a clone.
+ """
+ self._sticky_filter = True
+ return self
+
+ def _merge_sanity_check(self, other):
+ """
+ Checks that we are merging two comparable QuerySet classes. By default
+ this does nothing, but see the ValuesQuerySet for an example of where
+ it's useful.
+ """
+ pass
+
+ def _merge_known_related_objects(self, other):
+ """
+ Keep track of all known related objects from either QuerySet instance.
+ """
+ for field, objects in other._known_related_objects.items():
+ self._known_related_objects.setdefault(field, {}).update(objects)
+
+ def _setup_aggregate_query(self, aggregates):
+ """
+ Prepare the query for computing a result that contains aggregate annotations.
+ """
+ opts = self.model._meta
+ if self.query.group_by is None:
+ field_names = [f.attname for f in opts.concrete_fields]
+ self.query.add_fields(field_names, False)
+ self.query.set_group_by()
+
+ def _prepare(self):
+ return self
+
+ def _as_sql(self, connection):
+ """
+ Returns the internal query's SQL and parameters (as a tuple).
+ """
+ obj = self.values("pk")
+ if obj._db is None or connection == connections[obj._db]:
+ return obj.query.get_compiler(connection=connection).as_nested_sql()
+ raise ValueError("Can't do subqueries with queries on different DBs.")
+
+ # When used as part of a nested query, a queryset will never be an "always
+ # empty" result.
+ value_annotation = True
+
+class InstanceCheckMeta(type):
+ def __instancecheck__(self, instance):
+ return instance.query.is_empty()
+
+class EmptyQuerySet(six.with_metaclass(InstanceCheckMeta)):
+ """
+ Marker class usable for checking if a queryset is empty by .none():
+ isinstance(qs.none(), EmptyQuerySet) -> True
+ """
+
+ def __init__(self, *args, **kwargs):
+ raise TypeError("EmptyQuerySet can't be instantiated")
+
+class ValuesQuerySet(QuerySet):
+ def __init__(self, *args, **kwargs):
+ super(ValuesQuerySet, self).__init__(*args, **kwargs)
+ # select_related isn't supported in values(). (FIXME -#3358)
+ self.query.select_related = False
+
+ # QuerySet.clone() will also set up the _fields attribute with the
+ # names of the model fields to select.
+
+ def iterator(self):
+ # Purge any extra columns that haven't been explicitly asked for
+ extra_names = list(self.query.extra_select)
+ field_names = self.field_names
+ aggregate_names = list(self.query.aggregate_select)
+
+ names = extra_names + field_names + aggregate_names
+
+ for row in self.query.get_compiler(self.db).results_iter():
+ yield dict(zip(names, row))
+
+ def delete(self):
+ # values().delete() doesn't work currently - make sure it raises an
+ # user friendly error.
+ raise TypeError("Queries with .values() or .values_list() applied "
+ "can't be deleted")
+
+ def _setup_query(self):
+ """
+ Constructs the field_names list that the values query will be
+ retrieving.
+
+ Called by the _clone() method after initializing the rest of the
+ instance.
+ """
+ self.query.clear_deferred_loading()
+ self.query.clear_select_fields()
+
+ if self._fields:
+ self.extra_names = []
+ self.aggregate_names = []
+ if not self.query.extra and not self.query.aggregates:
+ # Short cut - if there are no extra or aggregates, then
+ # the values() clause must be just field names.
+ self.field_names = list(self._fields)
+ else:
+ self.query.default_cols = False
+ self.field_names = []
+ for f in self._fields:
+ # we inspect the full extra_select list since we might
+ # be adding back an extra select item that we hadn't
+ # had selected previously.
+ if f in self.query.extra:
+ self.extra_names.append(f)
+ elif f in self.query.aggregate_select:
+ self.aggregate_names.append(f)
+ else:
+ self.field_names.append(f)
+ else:
+ # Default to all fields.
+ self.extra_names = None
+ self.field_names = [f.attname for f in self.model._meta.concrete_fields]
+ self.aggregate_names = None
+
+ self.query.select = []
+ if self.extra_names is not None:
+ self.query.set_extra_mask(self.extra_names)
+ self.query.add_fields(self.field_names, True)
+ if self.aggregate_names is not None:
+ self.query.set_aggregate_mask(self.aggregate_names)
+
+ def _clone(self, klass=None, setup=False, **kwargs):
+ """
+ Cloning a ValuesQuerySet preserves the current fields.
+ """
+ c = super(ValuesQuerySet, self)._clone(klass, **kwargs)
+ if not hasattr(c, '_fields'):
+ # Only clone self._fields if _fields wasn't passed into the cloning
+ # call directly.
+ c._fields = self._fields[:]
+ c.field_names = self.field_names
+ c.extra_names = self.extra_names
+ c.aggregate_names = self.aggregate_names
+ if setup and hasattr(c, '_setup_query'):
+ c._setup_query()
+ return c
+
+ def _merge_sanity_check(self, other):
+ super(ValuesQuerySet, self)._merge_sanity_check(other)
+ if (set(self.extra_names) != set(other.extra_names) or
+ set(self.field_names) != set(other.field_names) or
+ self.aggregate_names != other.aggregate_names):
+ raise TypeError("Merging '%s' classes must involve the same values in each case."
+ % self.__class__.__name__)
+
+ def _setup_aggregate_query(self, aggregates):
+ """
+ Prepare the query for computing a result that contains aggregate annotations.
+ """
+ self.query.set_group_by()
+
+ if self.aggregate_names is not None:
+ self.aggregate_names.extend(aggregates)
+ self.query.set_aggregate_mask(self.aggregate_names)
+
+ super(ValuesQuerySet, self)._setup_aggregate_query(aggregates)
+
+ def _as_sql(self, connection):
+ """
+ For ValuesQuerySet (and subclasses like ValuesListQuerySet), they can
+ only be used as nested queries if they're already set up to select only
+ a single field (in which case, that is the field column that is
+ returned). This differs from QuerySet.as_sql(), where the column to
+ select is set up by Django.
+ """
+ if ((self._fields and len(self._fields) > 1) or
+ (not self._fields and len(self.model._meta.fields) > 1)):
+ raise TypeError('Cannot use a multi-field %s as a filter value.'
+ % self.__class__.__name__)
+
+ obj = self._clone()
+ if obj._db is None or connection == connections[obj._db]:
+ return obj.query.get_compiler(connection=connection).as_nested_sql()
+ raise ValueError("Can't do subqueries with queries on different DBs.")
+
+ def _prepare(self):
+ """
+ Validates that we aren't trying to do a query like
+ value__in=qs.values('value1', 'value2'), which isn't valid.
+ """
+ if ((self._fields and len(self._fields) > 1) or
+ (not self._fields and len(self.model._meta.fields) > 1)):
+ raise TypeError('Cannot use a multi-field %s as a filter value.'
+ % self.__class__.__name__)
+ return self
+
+
+class ValuesListQuerySet(ValuesQuerySet):
+ def iterator(self):
+ if self.flat and len(self._fields) == 1:
+ for row in self.query.get_compiler(self.db).results_iter():
+ yield row[0]
+ elif not self.query.extra_select and not self.query.aggregate_select:
+ for row in self.query.get_compiler(self.db).results_iter():
+ yield tuple(row)
+ else:
+ # When extra(select=...) or an annotation is involved, the extra
+ # cols are always at the start of the row, and we need to reorder
+ # the fields to match the order in self._fields.
+ extra_names = list(self.query.extra_select)
+ field_names = self.field_names
+ aggregate_names = list(self.query.aggregate_select)
+
+ names = extra_names + field_names + aggregate_names
+
+ # If a field list has been specified, use it. Otherwise, use the
+ # full list of fields, including extras and aggregates.
+ if self._fields:
+ fields = list(self._fields) + [f for f in aggregate_names if f not in self._fields]
+ else:
+ fields = names
+
+ for row in self.query.get_compiler(self.db).results_iter():
+ data = dict(zip(names, row))
+ yield tuple([data[f] for f in fields])
+
+ def _clone(self, *args, **kwargs):
+ clone = super(ValuesListQuerySet, self)._clone(*args, **kwargs)
+ if not hasattr(clone, "flat"):
+ # Only assign flat if the clone didn't already get it from kwargs
+ clone.flat = self.flat
+ return clone
+
+
+class DateQuerySet(QuerySet):
+ def iterator(self):
+ return self.query.get_compiler(self.db).results_iter()
+
+ def _setup_query(self):
+ """
+ Sets up any special features of the query attribute.
+
+ Called by the _clone() method after initializing the rest of the
+ instance.
+ """
+ self.query.clear_deferred_loading()
+ self.query = self.query.clone(klass=sql.DateQuery, setup=True)
+ self.query.select = []
+ self.query.add_select(self._field_name, self._kind, self._order)
+
+ def _clone(self, klass=None, setup=False, **kwargs):
+ c = super(DateQuerySet, self)._clone(klass, False, **kwargs)
+ c._field_name = self._field_name
+ c._kind = self._kind
+ if setup and hasattr(c, '_setup_query'):
+ c._setup_query()
+ return c
+
+
+class DateTimeQuerySet(QuerySet):
+ def iterator(self):
+ return self.query.get_compiler(self.db).results_iter()
+
+ def _setup_query(self):
+ """
+ Sets up any special features of the query attribute.
+
+ Called by the _clone() method after initializing the rest of the
+ instance.
+ """
+ self.query.clear_deferred_loading()
+ self.query = self.query.clone(klass=sql.DateTimeQuery, setup=True, tzinfo=self._tzinfo)
+ self.query.select = []
+ self.query.add_select(self._field_name, self._kind, self._order)
+
+ def _clone(self, klass=None, setup=False, **kwargs):
+ c = super(DateTimeQuerySet, self)._clone(klass, False, **kwargs)
+ c._field_name = self._field_name
+ c._kind = self._kind
+ c._tzinfo = self._tzinfo
+ if setup and hasattr(c, '_setup_query'):
+ c._setup_query()
+ return c
+
+
+def get_klass_info(klass, max_depth=0, cur_depth=0, requested=None,
+ only_load=None, from_parent=None):
+ """
+ Helper function that recursively returns an information for a klass, to be
+ used in get_cached_row. It exists just to compute this information only
+ once for entire queryset. Otherwise it would be computed for each row, which
+ leads to poor perfomance on large querysets.
+
+ Arguments:
+ * klass - the class to retrieve (and instantiate)
+ * max_depth - the maximum depth to which a select_related()
+ relationship should be explored.
+ * cur_depth - the current depth in the select_related() tree.
+ Used in recursive calls to determin if we should dig deeper.
+ * requested - A dictionary describing the select_related() tree
+ that is to be retrieved. keys are field names; values are
+ dictionaries describing the keys on that related object that
+ are themselves to be select_related().
+ * only_load - if the query has had only() or defer() applied,
+ this is the list of field names that will be returned. If None,
+ the full field list for `klass` can be assumed.
+ * from_parent - the parent model used to get to this model
+
+ Note that when travelling from parent to child, we will only load child
+ fields which aren't in the parent.
+ """
+ if max_depth and requested is None and cur_depth > max_depth:
+ # We've recursed deeply enough; stop now.
+ return None
+
+ if only_load:
+ load_fields = only_load.get(klass) or set()
+ # When we create the object, we will also be creating populating
+ # all the parent classes, so traverse the parent classes looking
+ # for fields that must be included on load.
+ for parent in klass._meta.get_parent_list():
+ fields = only_load.get(parent)
+ if fields:
+ load_fields.update(fields)
+ else:
+ load_fields = None
+
+ if load_fields:
+ # Handle deferred fields.
+ skip = set()
+ init_list = []
+ # Build the list of fields that *haven't* been requested
+ for field, model in klass._meta.get_concrete_fields_with_model():
+ if field.name not in load_fields:
+ skip.add(field.attname)
+ elif from_parent and issubclass(from_parent, model.__class__):
+ # Avoid loading fields already loaded for parent model for
+ # child models.
+ continue
+ else:
+ init_list.append(field.attname)
+ # Retrieve all the requested fields
+ field_count = len(init_list)
+ if skip:
+ klass = deferred_class_factory(klass, skip)
+ field_names = init_list
+ else:
+ field_names = ()
+ else:
+ # Load all fields on klass
+
+ field_count = len(klass._meta.concrete_fields)
+ # Check if we need to skip some parent fields.
+ if from_parent and len(klass._meta.local_concrete_fields) != len(klass._meta.concrete_fields):
+ # Only load those fields which haven't been already loaded into
+ # 'from_parent'.
+ non_seen_models = [p for p in klass._meta.get_parent_list()
+ if not issubclass(from_parent, p)]
+ # Load local fields, too...
+ non_seen_models.append(klass)
+ field_names = [f.attname for f in klass._meta.concrete_fields
+ if f.model in non_seen_models]
+ field_count = len(field_names)
+ # Try to avoid populating field_names variable for perfomance reasons.
+ # If field_names variable is set, we use **kwargs based model init
+ # which is slower than normal init.
+ if field_count == len(klass._meta.concrete_fields):
+ field_names = ()
+
+ restricted = requested is not None
+
+ related_fields = []
+ for f in klass._meta.fields:
+ if select_related_descend(f, restricted, requested, load_fields):
+ if restricted:
+ next = requested[f.name]
+ else:
+ next = None
+ klass_info = get_klass_info(f.rel.to, max_depth=max_depth, cur_depth=cur_depth+1,
+ requested=next, only_load=only_load)
+ related_fields.append((f, klass_info))
+
+ reverse_related_fields = []
+ if restricted:
+ for o in klass._meta.get_all_related_objects():
+ if o.field.unique and select_related_descend(o.field, restricted, requested,
+ only_load.get(o.model), reverse=True):
+ next = requested[o.field.related_query_name()]
+ parent = klass if issubclass(o.model, klass) else None
+ klass_info = get_klass_info(o.model, max_depth=max_depth, cur_depth=cur_depth+1,
+ requested=next, only_load=only_load, from_parent=parent)
+ reverse_related_fields.append((o.field, klass_info))
+ if field_names:
+ pk_idx = field_names.index(klass._meta.pk.attname)
+ else:
+ pk_idx = klass._meta.pk_index()
+
+ return klass, field_names, field_count, related_fields, reverse_related_fields, pk_idx
+
+
+def get_cached_row(row, index_start, using, klass_info, offset=0,
+ parent_data=()):
+ """
+ Helper function that recursively returns an object with the specified
+ related attributes already populated.
+
+ This method may be called recursively to populate deep select_related()
+ clauses.
+
+ Arguments:
+ * row - the row of data returned by the database cursor
+ * index_start - the index of the row at which data for this
+ object is known to start
+ * offset - the number of additional fields that are known to
+ exist in row for `klass`. This usually means the number of
+ annotated results on `klass`.
+ * using - the database alias on which the query is being executed.
+ * klass_info - result of the get_klass_info function
+ * parent_data - parent model data in format (field, value). Used
+ to populate the non-local fields of child models.
+ """
+ if klass_info is None:
+ return None
+ klass, field_names, field_count, related_fields, reverse_related_fields, pk_idx = klass_info
+
+
+ fields = row[index_start : index_start + field_count]
+ # If the pk column is None (or the Oracle equivalent ''), then the related
+ # object must be non-existent - set the relation to None.
+ if fields[pk_idx] == None or fields[pk_idx] == '':
+ obj = None
+ elif field_names:
+ fields = list(fields)
+ for rel_field, value in parent_data:
+ field_names.append(rel_field.attname)
+ fields.append(value)
+ obj = klass(**dict(zip(field_names, fields)))
+ else:
+ obj = klass(*fields)
+ # If an object was retrieved, set the database state.
+ if obj:
+ obj._state.db = using
+ obj._state.adding = False
+
+ # Instantiate related fields
+ index_end = index_start + field_count + offset
+ # Iterate over each related object, populating any
+ # select_related() fields
+ for f, klass_info in related_fields:
+ # Recursively retrieve the data for the related object
+ cached_row = get_cached_row(row, index_end, using, klass_info)
+ # If the recursive descent found an object, populate the
+ # descriptor caches relevant to the object
+ if cached_row:
+ rel_obj, index_end = cached_row
+ if obj is not None:
+ # If the base object exists, populate the
+ # descriptor cache
+ setattr(obj, f.get_cache_name(), rel_obj)
+ if f.unique and rel_obj is not None:
+ # If the field is unique, populate the
+ # reverse descriptor cache on the related object
+ setattr(rel_obj, f.related.get_cache_name(), obj)
+
+ # Now do the same, but for reverse related objects.
+ # Only handle the restricted case - i.e., don't do a depth
+ # descent into reverse relations unless explicitly requested
+ for f, klass_info in reverse_related_fields:
+ # Transfer data from this object to childs.
+ parent_data = []
+ for rel_field, rel_model in klass_info[0]._meta.get_fields_with_model():
+ if rel_model is not None and isinstance(obj, rel_model):
+ parent_data.append((rel_field, getattr(obj, rel_field.attname)))
+ # Recursively retrieve the data for the related object
+ cached_row = get_cached_row(row, index_end, using, klass_info,
+ parent_data=parent_data)
+ # If the recursive descent found an object, populate the
+ # descriptor caches relevant to the object
+ if cached_row:
+ rel_obj, index_end = cached_row
+ if obj is not None:
+ # populate the reverse descriptor cache
+ setattr(obj, f.related.get_cache_name(), rel_obj)
+ if rel_obj is not None:
+ # If the related object exists, populate
+ # the descriptor cache.
+ setattr(rel_obj, f.get_cache_name(), obj)
+ # Populate related object caches using parent data.
+ for rel_field, _ in parent_data:
+ if rel_field.rel:
+ setattr(rel_obj, rel_field.attname, getattr(obj, rel_field.attname))
+ try:
+ cached_obj = getattr(obj, rel_field.get_cache_name())
+ setattr(rel_obj, rel_field.get_cache_name(), cached_obj)
+ except AttributeError:
+ # Related object hasn't been cached yet
+ pass
+ return obj, index_end
+
+
+class RawQuerySet(object):
+ """
+ Provides an iterator which converts the results of raw SQL queries into
+ annotated model instances.
+ """
+ def __init__(self, raw_query, model=None, query=None, params=None,
+ translations=None, using=None):
+ self.raw_query = raw_query
+ self.model = model
+ self._db = using
+ self.query = query or sql.RawQuery(sql=raw_query, using=self.db, params=params)
+ self.params = params or ()
+ self.translations = translations or {}
+
+ def __iter__(self):
+ # Mapping of attrnames to row column positions. Used for constructing
+ # the model using kwargs, needed when not all model's fields are present
+ # in the query.
+ model_init_field_names = {}
+ # A list of tuples of (column name, column position). Used for
+ # annotation fields.
+ annotation_fields = []
+
+ # Cache some things for performance reasons outside the loop.
+ db = self.db
+ compiler = connections[db].ops.compiler('SQLCompiler')(
+ self.query, connections[db], db
+ )
+ need_resolv_columns = hasattr(compiler, 'resolve_columns')
+
+ query = iter(self.query)
+
+ # Find out which columns are model's fields, and which ones should be
+ # annotated to the model.
+ for pos, column in enumerate(self.columns):
+ if column in self.model_fields:
+ model_init_field_names[self.model_fields[column].attname] = pos
+ else:
+ annotation_fields.append((column, pos))
+
+ # Find out which model's fields are not present in the query.
+ skip = set()
+ for field in self.model._meta.fields:
+ if field.attname not in model_init_field_names:
+ skip.add(field.attname)
+ if skip:
+ if self.model._meta.pk.attname in skip:
+ raise InvalidQuery('Raw query must include the primary key')
+ model_cls = deferred_class_factory(self.model, skip)
+ else:
+ model_cls = self.model
+ # All model's fields are present in the query. So, it is possible
+ # to use *args based model instantation. For each field of the model,
+ # record the query column position matching that field.
+ model_init_field_pos = []
+ for field in self.model._meta.fields:
+ model_init_field_pos.append(model_init_field_names[field.attname])
+ if need_resolv_columns:
+ fields = [self.model_fields.get(c, None) for c in self.columns]
+ # Begin looping through the query values.
+ for values in query:
+ if need_resolv_columns:
+ values = compiler.resolve_columns(values, fields)
+ # Associate fields to values
+ if skip:
+ model_init_kwargs = {}
+ for attname, pos in six.iteritems(model_init_field_names):
+ model_init_kwargs[attname] = values[pos]
+ instance = model_cls(**model_init_kwargs)
+ else:
+ model_init_args = [values[pos] for pos in model_init_field_pos]
+ instance = model_cls(*model_init_args)
+ if annotation_fields:
+ for column, pos in annotation_fields:
+ setattr(instance, column, values[pos])
+
+ instance._state.db = db
+ instance._state.adding = False
+
+ yield instance
+
+ def __repr__(self):
+ text = self.raw_query
+ if self.params:
+ text = text % (self.params if hasattr(self.params, 'keys') else tuple(self.params))
+ return "<RawQuerySet: %r>" % text
+
+ def __getitem__(self, k):
+ return list(self)[k]
+
+ @property
+ def db(self):
+ "Return the database that will be used if this query is executed now"
+ return self._db or router.db_for_read(self.model)
+
+ def using(self, alias):
+ """
+ Selects which database this Raw QuerySet should excecute it's query against.
+ """
+ return RawQuerySet(self.raw_query, model=self.model,
+ query=self.query.clone(using=alias),
+ params=self.params, translations=self.translations,
+ using=alias)
+
+ @property
+ def columns(self):
+ """
+ A list of model field names in the order they'll appear in the
+ query results.
+ """
+ if not hasattr(self, '_columns'):
+ self._columns = self.query.get_columns()
+
+ # Adjust any column names which don't match field names
+ for (query_name, model_name) in self.translations.items():
+ try:
+ index = self._columns.index(query_name)
+ self._columns[index] = model_name
+ except ValueError:
+ # Ignore translations for non-existant column names
+ pass
+
+ return self._columns
+
+ @property
+ def model_fields(self):
+ """
+ A dict mapping column names to model field names.
+ """
+ if not hasattr(self, '_model_fields'):
+ converter = connections[self.db].introspection.table_name_converter
+ self._model_fields = {}
+ for field in self.model._meta.fields:
+ name, column = field.get_attname_column()
+ self._model_fields[converter(column)] = field
+ return self._model_fields
+
+
+def insert_query(model, objs, fields, return_id=False, raw=False, using=None):
+ """
+ Inserts a new record for the given model. This provides an interface to
+ the InsertQuery class and is how Model.save() is implemented. It is not
+ part of the public API.
+ """
+ query = sql.InsertQuery(model)
+ query.insert_values(fields, objs, raw=raw)
+ return query.get_compiler(using=using).execute_sql(return_id)
+
+
+def prefetch_related_objects(result_cache, related_lookups):
+ """
+ Helper function for prefetch_related functionality
+
+ Populates prefetched objects caches for a list of results
+ from a QuerySet
+ """
+ if len(result_cache) == 0:
+ return # nothing to do
+
+ model = result_cache[0].__class__
+
+ # We need to be able to dynamically add to the list of prefetch_related
+ # lookups that we look up (see below). So we need some book keeping to
+ # ensure we don't do duplicate work.
+ done_lookups = set() # list of lookups like foo__bar__baz
+ done_queries = {} # dictionary of things like 'foo__bar': [results]
+
+ auto_lookups = [] # we add to this as we go through.
+ followed_descriptors = set() # recursion protection
+
+ all_lookups = itertools.chain(related_lookups, auto_lookups)
+ for lookup in all_lookups:
+ if lookup in done_lookups:
+ # We've done exactly this already, skip the whole thing
+ continue
+ done_lookups.add(lookup)
+
+ # Top level, the list of objects to decorate is the result cache
+ # from the primary QuerySet. It won't be for deeper levels.
+ obj_list = result_cache
+
+ attrs = lookup.split(LOOKUP_SEP)
+ for level, attr in enumerate(attrs):
+ # Prepare main instances
+ if len(obj_list) == 0:
+ break
+
+ current_lookup = LOOKUP_SEP.join(attrs[0:level+1])
+ if current_lookup in done_queries:
+ # Skip any prefetching, and any object preparation
+ obj_list = done_queries[current_lookup]
+ continue
+
+ # Prepare objects:
+ good_objects = True
+ for obj in obj_list:
+ # Since prefetching can re-use instances, it is possible to have
+ # the same instance multiple times in obj_list, so obj might
+ # already be prepared.
+ if not hasattr(obj, '_prefetched_objects_cache'):
+ try:
+ obj._prefetched_objects_cache = {}
+ except AttributeError:
+ # Must be in a QuerySet subclass that is not returning
+ # Model instances, either in Django or 3rd
+ # party. prefetch_related() doesn't make sense, so quit
+ # now.
+ good_objects = False
+ break
+ if not good_objects:
+ break
+
+ # Descend down tree
+
+ # We assume that objects retrieved are homogenous (which is the premise
+ # of prefetch_related), so what applies to first object applies to all.
+ first_obj = obj_list[0]
+ prefetcher, descriptor, attr_found, is_fetched = get_prefetcher(first_obj, attr)
+
+ if not attr_found:
+ raise AttributeError("Cannot find '%s' on %s object, '%s' is an invalid "
+ "parameter to prefetch_related()" %
+ (attr, first_obj.__class__.__name__, lookup))
+
+ if level == len(attrs) - 1 and prefetcher is None:
+ # Last one, this *must* resolve to something that supports
+ # prefetching, otherwise there is no point adding it and the
+ # developer asking for it has made a mistake.
+ raise ValueError("'%s' does not resolve to a item that supports "
+ "prefetching - this is an invalid parameter to "
+ "prefetch_related()." % lookup)
+
+ if prefetcher is not None and not is_fetched:
+ obj_list, additional_prl = prefetch_one_level(obj_list, prefetcher, attr)
+ # We need to ensure we don't keep adding lookups from the
+ # same relationships to stop infinite recursion. So, if we
+ # are already on an automatically added lookup, don't add
+ # the new lookups from relationships we've seen already.
+ if not (lookup in auto_lookups and
+ descriptor in followed_descriptors):
+ for f in additional_prl:
+ new_prl = LOOKUP_SEP.join([current_lookup, f])
+ auto_lookups.append(new_prl)
+ done_queries[current_lookup] = obj_list
+ followed_descriptors.add(descriptor)
+ else:
+ # Either a singly related object that has already been fetched
+ # (e.g. via select_related), or hopefully some other property
+ # that doesn't support prefetching but needs to be traversed.
+
+ # We replace the current list of parent objects with the list
+ # of related objects, filtering out empty or missing values so
+ # that we can continue with nullable or reverse relations.
+ new_obj_list = []
+ for obj in obj_list:
+ try:
+ new_obj = getattr(obj, attr)
+ except exceptions.ObjectDoesNotExist:
+ continue
+ if new_obj is None:
+ continue
+ new_obj_list.append(new_obj)
+ obj_list = new_obj_list
+
+
+def get_prefetcher(instance, attr):
+ """
+ For the attribute 'attr' on the given instance, finds
+ an object that has a get_prefetch_queryset().
+ Returns a 4 tuple containing:
+ (the object with get_prefetch_queryset (or None),
+ the descriptor object representing this relationship (or None),
+ a boolean that is False if the attribute was not found at all,
+ a boolean that is True if the attribute has already been fetched)
+ """
+ prefetcher = None
+ attr_found = False
+ is_fetched = False
+
+ # For singly related objects, we have to avoid getting the attribute
+ # from the object, as this will trigger the query. So we first try
+ # on the class, in order to get the descriptor object.
+ rel_obj_descriptor = getattr(instance.__class__, attr, None)
+ if rel_obj_descriptor is None:
+ try:
+ rel_obj = getattr(instance, attr)
+ attr_found = True
+ except AttributeError:
+ pass
+ else:
+ attr_found = True
+ if rel_obj_descriptor:
+ # singly related object, descriptor object has the
+ # get_prefetch_queryset() method.
+ if hasattr(rel_obj_descriptor, 'get_prefetch_queryset'):
+ prefetcher = rel_obj_descriptor
+ if rel_obj_descriptor.is_cached(instance):
+ is_fetched = True
+ else:
+ # descriptor doesn't support prefetching, so we go ahead and get
+ # the attribute on the instance rather than the class to
+ # support many related managers
+ rel_obj = getattr(instance, attr)
+ if hasattr(rel_obj, 'get_prefetch_queryset'):
+ prefetcher = rel_obj
+ return prefetcher, rel_obj_descriptor, attr_found, is_fetched
+
+
+def prefetch_one_level(instances, prefetcher, attname):
+ """
+ Helper function for prefetch_related_objects
+
+ Runs prefetches on all instances using the prefetcher object,
+ assigning results to relevant caches in instance.
+
+ The prefetched objects are returned, along with any additional
+ prefetches that must be done due to prefetch_related lookups
+ found from default managers.
+ """
+ # prefetcher must have a method get_prefetch_queryset() which takes a list
+ # of instances, and returns a tuple:
+
+ # (queryset of instances of self.model that are related to passed in instances,
+ # callable that gets value to be matched for returned instances,
+ # callable that gets value to be matched for passed in instances,
+ # boolean that is True for singly related objects,
+ # cache name to assign to).
+
+ # The 'values to be matched' must be hashable as they will be used
+ # in a dictionary.
+
+ rel_qs, rel_obj_attr, instance_attr, single, cache_name =\
+ prefetcher.get_prefetch_queryset(instances)
+ # We have to handle the possibility that the default manager itself added
+ # prefetch_related lookups to the QuerySet we just got back. We don't want to
+ # trigger the prefetch_related functionality by evaluating the query.
+ # Rather, we need to merge in the prefetch_related lookups.
+ additional_prl = getattr(rel_qs, '_prefetch_related_lookups', [])
+ if additional_prl:
+ # Don't need to clone because the manager should have given us a fresh
+ # instance, so we access an internal instead of using public interface
+ # for performance reasons.
+ rel_qs._prefetch_related_lookups = []
+
+ all_related_objects = list(rel_qs)
+
+ rel_obj_cache = {}
+ for rel_obj in all_related_objects:
+ rel_attr_val = rel_obj_attr(rel_obj)
+ rel_obj_cache.setdefault(rel_attr_val, []).append(rel_obj)
+
+ for obj in instances:
+ instance_attr_val = instance_attr(obj)
+ vals = rel_obj_cache.get(instance_attr_val, [])
+ if single:
+ # Need to assign to single cache on instance
+ setattr(obj, cache_name, vals[0] if vals else None)
+ else:
+ # Multi, attribute represents a manager with an .all() method that
+ # returns a QuerySet
+ qs = getattr(obj, attname).all()
+ qs._result_cache = vals
+ # We don't want the individual qs doing prefetch_related now, since we
+ # have merged this into the current work.
+ qs._prefetch_done = True
+ obj._prefetched_objects_cache[cache_name] = qs
+ return all_related_objects, additional_prl
diff --git a/lib/python2.7/site-packages/django/db/models/query_utils.py b/lib/python2.7/site-packages/django/db/models/query_utils.py
new file mode 100644
index 0000000..ee7a56a
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/models/query_utils.py
@@ -0,0 +1,205 @@
+"""
+Various data structures used in query construction.
+
+Factored out from django.db.models.query to avoid making the main module very
+large and/or so that they can be used by other modules without getting into
+circular import difficulties.
+"""
+from __future__ import unicode_literals
+
+from django.db.backends import util
+from django.utils import six
+from django.utils import tree
+
+
+class InvalidQuery(Exception):
+ """
+ The query passed to raw isn't a safe query to use with raw.
+ """
+ pass
+
+
+class QueryWrapper(object):
+ """
+ A type that indicates the contents are an SQL fragment and the associate
+ parameters. Can be used to pass opaque data to a where-clause, for example.
+ """
+ def __init__(self, sql, params):
+ self.data = sql, list(params)
+
+ def as_sql(self, qn=None, connection=None):
+ return self.data
+
+class Q(tree.Node):
+ """
+ Encapsulates filters as objects that can then be combined logically (using
+ & and |).
+ """
+ # Connection types
+ AND = 'AND'
+ OR = 'OR'
+ default = AND
+
+ def __init__(self, *args, **kwargs):
+ super(Q, self).__init__(children=list(args) + list(six.iteritems(kwargs)))
+
+ def _combine(self, other, conn):
+ if not isinstance(other, Q):
+ raise TypeError(other)
+ obj = type(self)()
+ obj.connector = conn
+ obj.add(self, conn)
+ obj.add(other, conn)
+ return obj
+
+ def __or__(self, other):
+ return self._combine(other, self.OR)
+
+ def __and__(self, other):
+ return self._combine(other, self.AND)
+
+ def __invert__(self):
+ obj = type(self)()
+ obj.add(self, self.AND)
+ obj.negate()
+ return obj
+
+ def clone(self):
+ clone = self.__class__._new_instance(
+ children=[], connector=self.connector, negated=self.negated)
+ for child in self.children:
+ if hasattr(child, 'clone'):
+ clone.children.append(child.clone())
+ else:
+ clone.children.append(child)
+ return clone
+
+class DeferredAttribute(object):
+ """
+ A wrapper for a deferred-loading field. When the value is read from this
+ object the first time, the query is executed.
+ """
+ def __init__(self, field_name, model):
+ self.field_name = field_name
+
+ def __get__(self, instance, owner):
+ """
+ Retrieves and caches the value from the datastore on the first lookup.
+ Returns the cached value.
+ """
+ from django.db.models.fields import FieldDoesNotExist
+ non_deferred_model = instance._meta.proxy_for_model
+ opts = non_deferred_model._meta
+
+ assert instance is not None
+ data = instance.__dict__
+ if data.get(self.field_name, self) is self:
+ # self.field_name is the attname of the field, but only() takes the
+ # actual name, so we need to translate it here.
+ try:
+ f = opts.get_field_by_name(self.field_name)[0]
+ except FieldDoesNotExist:
+ f = [f for f in opts.fields
+ if f.attname == self.field_name][0]
+ name = f.name
+ # Let's see if the field is part of the parent chain. If so we
+ # might be able to reuse the already loaded value. Refs #18343.
+ val = self._check_parent_chain(instance, name)
+ if val is None:
+ # We use only() instead of values() here because we want the
+ # various data coersion methods (to_python(), etc.) to be
+ # called here.
+ val = getattr(
+ non_deferred_model._base_manager.only(name).using(
+ instance._state.db).get(pk=instance.pk),
+ self.field_name
+ )
+ data[self.field_name] = val
+ return data[self.field_name]
+
+ def __set__(self, instance, value):
+ """
+ Deferred loading attributes can be set normally (which means there will
+ never be a database lookup involved.
+ """
+ instance.__dict__[self.field_name] = value
+
+ def _check_parent_chain(self, instance, name):
+ """
+ Check if the field value can be fetched from a parent field already
+ loaded in the instance. This can be done if the to-be fetched
+ field is a primary key field.
+ """
+ opts = instance._meta
+ f = opts.get_field_by_name(name)[0]
+ link_field = opts.get_ancestor_link(f.model)
+ if f.primary_key and f != link_field:
+ return getattr(instance, link_field.attname)
+ return None
+
+
+def select_related_descend(field, restricted, requested, load_fields, reverse=False):
+ """
+ Returns True if this field should be used to descend deeper for
+ select_related() purposes. Used by both the query construction code
+ (sql.query.fill_related_selections()) and the model instance creation code
+ (query.get_klass_info()).
+
+ Arguments:
+ * field - the field to be checked
+ * restricted - a boolean field, indicating if the field list has been
+ manually restricted using a requested clause)
+ * requested - The select_related() dictionary.
+ * load_fields - the set of fields to be loaded on this model
+ * reverse - boolean, True if we are checking a reverse select related
+ """
+ if not field.rel:
+ return False
+ if field.rel.parent_link and not reverse:
+ return False
+ if restricted:
+ if reverse and field.related_query_name() not in requested:
+ return False
+ if not reverse and field.name not in requested:
+ return False
+ if not restricted and field.null:
+ return False
+ if load_fields:
+ if field.name not in load_fields:
+ if restricted and field.name in requested:
+ raise InvalidQuery("Field %s.%s cannot be both deferred"
+ " and traversed using select_related"
+ " at the same time." %
+ (field.model._meta.object_name, field.name))
+ return False
+ return True
+
+# This function is needed because data descriptors must be defined on a class
+# object, not an instance, to have any effect.
+
+def deferred_class_factory(model, attrs):
+ """
+ Returns a class object that is a copy of "model" with the specified "attrs"
+ being replaced with DeferredAttribute objects. The "pk_value" ties the
+ deferred attributes to a particular instance of the model.
+ """
+ class Meta:
+ proxy = True
+ app_label = model._meta.app_label
+
+ # The app_cache wants a unique name for each model, otherwise the new class
+ # won't be created (we get an old one back). Therefore, we generate the
+ # name using the passed in attrs. It's OK to reuse an existing class
+ # object if the attrs are identical.
+ name = "%s_Deferred_%s" % (model.__name__, '_'.join(sorted(list(attrs))))
+ name = util.truncate_name(name, 80, 32)
+
+ overrides = dict((attr, DeferredAttribute(attr, model)) for attr in attrs)
+ overrides["Meta"] = Meta
+ overrides["__module__"] = model.__module__
+ overrides["_deferred"] = True
+ return type(str(name), (model,), overrides)
+
+# The above function is also used to unpickle model instances with deferred
+# fields.
+deferred_class_factory.__safe_for_unpickling__ = True
diff --git a/lib/python2.7/site-packages/django/db/models/related.py b/lib/python2.7/site-packages/django/db/models/related.py
new file mode 100644
index 0000000..4b00dd3
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/models/related.py
@@ -0,0 +1,67 @@
+from collections import namedtuple
+
+from django.utils.encoding import smart_text
+from django.db.models.fields import BLANK_CHOICE_DASH
+
+# PathInfo is used when converting lookups (fk__somecol). The contents
+# describe the relation in Model terms (model Options and Fields for both
+# sides of the relation. The join_field is the field backing the relation.
+PathInfo = namedtuple('PathInfo',
+ 'from_opts to_opts target_fields join_field '
+ 'm2m direct')
+
+class RelatedObject(object):
+ def __init__(self, parent_model, model, field):
+ self.parent_model = parent_model
+ self.model = model
+ self.opts = model._meta
+ self.field = field
+ self.name = '%s:%s' % (self.opts.app_label, self.opts.model_name)
+ self.var_name = self.opts.model_name
+
+ def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH,
+ limit_to_currently_related=False):
+ """Returns choices with a default blank choices included, for use
+ as SelectField choices for this field.
+
+ Analogue of django.db.models.fields.Field.get_choices, provided
+ initially for utilisation by RelatedFieldListFilter.
+ """
+ first_choice = blank_choice if include_blank else []
+ queryset = self.model._default_manager.all()
+ if limit_to_currently_related:
+ queryset = queryset.complex_filter(
+ {'%s__isnull' % self.parent_model._meta.model_name: False})
+ lst = [(x._get_pk_val(), smart_text(x)) for x in queryset]
+ return first_choice + lst
+
+ def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
+ # Defer to the actual field definition for db prep
+ return self.field.get_db_prep_lookup(lookup_type, value,
+ connection=connection, prepared=prepared)
+
+ def editable_fields(self):
+ "Get the fields in this class that should be edited inline."
+ return [f for f in self.opts.fields + self.opts.many_to_many if f.editable and f != self.field]
+
+ def __repr__(self):
+ return "<RelatedObject: %s related to %s>" % (self.name, self.field.name)
+
+ def get_accessor_name(self):
+ # This method encapsulates the logic that decides what name to give an
+ # accessor descriptor that retrieves related many-to-one or
+ # many-to-many objects. It uses the lower-cased object_name + "_set",
+ # but this can be overridden with the "related_name" option.
+ if self.field.rel.multiple:
+ # If this is a symmetrical m2m relation on self, there is no reverse accessor.
+ if getattr(self.field.rel, 'symmetrical', False) and self.model == self.parent_model:
+ return None
+ return self.field.rel.related_name or (self.opts.model_name + '_set')
+ else:
+ return self.field.rel.related_name or (self.opts.model_name)
+
+ def get_cache_name(self):
+ return "_%s_cache" % self.get_accessor_name()
+
+ def get_path_info(self):
+ return self.field.get_reverse_path_info()
diff --git a/lib/python2.7/site-packages/django/db/models/signals.py b/lib/python2.7/site-packages/django/db/models/signals.py
new file mode 100644
index 0000000..0782442
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/models/signals.py
@@ -0,0 +1,18 @@
+from django.dispatch import Signal
+
+class_prepared = Signal(providing_args=["class"])
+
+pre_init = Signal(providing_args=["instance", "args", "kwargs"], use_caching=True)
+post_init = Signal(providing_args=["instance"], use_caching=True)
+
+pre_save = Signal(providing_args=["instance", "raw", "using", "update_fields"],
+ use_caching=True)
+post_save = Signal(providing_args=["instance", "raw", "created", "using", "update_fields"], use_caching=True)
+
+pre_delete = Signal(providing_args=["instance", "using"], use_caching=True)
+post_delete = Signal(providing_args=["instance", "using"], use_caching=True)
+
+pre_syncdb = Signal(providing_args=["app", "create_models", "verbosity", "interactive", "db"])
+post_syncdb = Signal(providing_args=["class", "app", "created_models", "verbosity", "interactive", "db"])
+
+m2m_changed = Signal(providing_args=["action", "instance", "reverse", "model", "pk_set", "using"], use_caching=True)
diff --git a/lib/python2.7/site-packages/django/db/models/sql/__init__.py b/lib/python2.7/site-packages/django/db/models/sql/__init__.py
new file mode 100644
index 0000000..df5b74e
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/models/sql/__init__.py
@@ -0,0 +1,9 @@
+from __future__ import absolute_import
+
+from django.db.models.sql.datastructures import EmptyResultSet
+from django.db.models.sql.subqueries import *
+from django.db.models.sql.query import *
+from django.db.models.sql.where import AND, OR
+
+
+__all__ = ['Query', 'AND', 'OR', 'EmptyResultSet']
diff --git a/lib/python2.7/site-packages/django/db/models/sql/aggregates.py b/lib/python2.7/site-packages/django/db/models/sql/aggregates.py
new file mode 100644
index 0000000..2bd2b2f
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/models/sql/aggregates.py
@@ -0,0 +1,125 @@
+"""
+Classes to represent the default SQL aggregate functions
+"""
+import copy
+
+from django.db.models.fields import IntegerField, FloatField
+
+# Fake fields used to identify aggregate types in data-conversion operations.
+ordinal_aggregate_field = IntegerField()
+computed_aggregate_field = FloatField()
+
+class Aggregate(object):
+ """
+ Default SQL Aggregate.
+ """
+ is_ordinal = False
+ is_computed = False
+ sql_template = '%(function)s(%(field)s)'
+
+ def __init__(self, col, source=None, is_summary=False, **extra):
+ """Instantiate an SQL aggregate
+
+ * col is a column reference describing the subject field
+ of the aggregate. It can be an alias, or a tuple describing
+ a table and column name.
+ * source is the underlying field or aggregate definition for
+ the column reference. If the aggregate is not an ordinal or
+ computed type, this reference is used to determine the coerced
+ output type of the aggregate.
+ * extra is a dictionary of additional data to provide for the
+ aggregate definition
+
+ Also utilizes the class variables:
+ * sql_function, the name of the SQL function that implements the
+ aggregate.
+ * sql_template, a template string that is used to render the
+ aggregate into SQL.
+ * is_ordinal, a boolean indicating if the output of this aggregate
+ is an integer (e.g., a count)
+ * is_computed, a boolean indicating if this output of this aggregate
+ is a computed float (e.g., an average), regardless of the input
+ type.
+
+ """
+ self.col = col
+ self.source = source
+ self.is_summary = is_summary
+ self.extra = extra
+
+ # Follow the chain of aggregate sources back until you find an
+ # actual field, or an aggregate that forces a particular output
+ # type. This type of this field will be used to coerce values
+ # retrieved from the database.
+ tmp = self
+
+ while tmp and isinstance(tmp, Aggregate):
+ if getattr(tmp, 'is_ordinal', False):
+ tmp = ordinal_aggregate_field
+ elif getattr(tmp, 'is_computed', False):
+ tmp = computed_aggregate_field
+ else:
+ tmp = tmp.source
+
+ self.field = tmp
+
+ def relabeled_clone(self, change_map):
+ clone = copy.copy(self)
+ if isinstance(self.col, (list, tuple)):
+ clone.col = (change_map.get(self.col[0], self.col[0]), self.col[1])
+ return clone
+
+ def as_sql(self, qn, connection):
+ "Return the aggregate, rendered as SQL with parameters."
+ params = []
+
+ if hasattr(self.col, 'as_sql'):
+ field_name, params = self.col.as_sql(qn, connection)
+ elif isinstance(self.col, (list, tuple)):
+ field_name = '.'.join([qn(c) for c in self.col])
+ else:
+ field_name = self.col
+
+ substitutions = {
+ 'function': self.sql_function,
+ 'field': field_name
+ }
+ substitutions.update(self.extra)
+
+ return self.sql_template % substitutions, params
+
+
+class Avg(Aggregate):
+ is_computed = True
+ sql_function = 'AVG'
+
+class Count(Aggregate):
+ is_ordinal = True
+ sql_function = 'COUNT'
+ sql_template = '%(function)s(%(distinct)s%(field)s)'
+
+ def __init__(self, col, distinct=False, **extra):
+ super(Count, self).__init__(col, distinct='DISTINCT ' if distinct else '', **extra)
+
+class Max(Aggregate):
+ sql_function = 'MAX'
+
+class Min(Aggregate):
+ sql_function = 'MIN'
+
+class StdDev(Aggregate):
+ is_computed = True
+
+ def __init__(self, col, sample=False, **extra):
+ super(StdDev, self).__init__(col, **extra)
+ self.sql_function = 'STDDEV_SAMP' if sample else 'STDDEV_POP'
+
+class Sum(Aggregate):
+ sql_function = 'SUM'
+
+class Variance(Aggregate):
+ is_computed = True
+
+ def __init__(self, col, sample=False, **extra):
+ super(Variance, self).__init__(col, **extra)
+ self.sql_function = 'VAR_SAMP' if sample else 'VAR_POP'
diff --git a/lib/python2.7/site-packages/django/db/models/sql/compiler.py b/lib/python2.7/site-packages/django/db/models/sql/compiler.py
new file mode 100644
index 0000000..ea7f9f4
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/models/sql/compiler.py
@@ -0,0 +1,1128 @@
+import datetime
+
+from django.conf import settings
+from django.core.exceptions import FieldError
+from django.db.backends.util import truncate_name
+from django.db.models.constants import LOOKUP_SEP
+from django.db.models.query_utils import select_related_descend, QueryWrapper
+from django.db.models.sql.constants import (SINGLE, MULTI, ORDER_DIR,
+ GET_ITERATOR_CHUNK_SIZE, SelectInfo)
+from django.db.models.sql.datastructures import EmptyResultSet
+from django.db.models.sql.expressions import SQLEvaluator
+from django.db.models.sql.query import get_order_dir, Query
+from django.db.transaction import TransactionManagementError
+from django.db.utils import DatabaseError
+from django.utils import six
+from django.utils.six.moves import zip
+from django.utils import timezone
+
+
+class SQLCompiler(object):
+ def __init__(self, query, connection, using):
+ self.query = query
+ self.connection = connection
+ self.using = using
+ self.quote_cache = {}
+ # When ordering a queryset with distinct on a column not part of the
+ # select set, the ordering column needs to be added to the select
+ # clause. This information is needed both in SQL construction and
+ # masking away the ordering selects from the returned row.
+ self.ordering_aliases = []
+ self.ordering_params = []
+
+ def pre_sql_setup(self):
+ """
+ Does any necessary class setup immediately prior to producing SQL. This
+ is for things that can't necessarily be done in __init__ because we
+ might not have all the pieces in place at that time.
+ # TODO: after the query has been executed, the altered state should be
+ # cleaned. We are not using a clone() of the query here.
+ """
+ if not self.query.tables:
+ self.query.join((None, self.query.get_meta().db_table, None))
+ if (not self.query.select and self.query.default_cols and not
+ self.query.included_inherited_models):
+ self.query.setup_inherited_models()
+ if self.query.select_related and not self.query.related_select_cols:
+ self.fill_related_selections()
+
+ def quote_name_unless_alias(self, name):
+ """
+ A wrapper around connection.ops.quote_name that doesn't quote aliases
+ for table names. This avoids problems with some SQL dialects that treat
+ quoted strings specially (e.g. PostgreSQL).
+ """
+ if name in self.quote_cache:
+ return self.quote_cache[name]
+ if ((name in self.query.alias_map and name not in self.query.table_map) or
+ name in self.query.extra_select):
+ self.quote_cache[name] = name
+ return name
+ r = self.connection.ops.quote_name(name)
+ self.quote_cache[name] = r
+ return r
+
+ def as_sql(self, with_limits=True, with_col_aliases=False):
+ """
+ Creates the SQL for this query. Returns the SQL string and list of
+ parameters.
+
+ If 'with_limits' is False, any limit/offset information is not included
+ in the query.
+ """
+ if with_limits and self.query.low_mark == self.query.high_mark:
+ return '', ()
+
+ self.pre_sql_setup()
+ # After executing the query, we must get rid of any joins the query
+ # setup created. So, take note of alias counts before the query ran.
+ # However we do not want to get rid of stuff done in pre_sql_setup(),
+ # as the pre_sql_setup will modify query state in a way that forbids
+ # another run of it.
+ self.refcounts_before = self.query.alias_refcount.copy()
+ out_cols, s_params = self.get_columns(with_col_aliases)
+ ordering, o_params, ordering_group_by = self.get_ordering()
+
+ distinct_fields = self.get_distinct()
+
+ # This must come after 'select', 'ordering' and 'distinct' -- see
+ # docstring of get_from_clause() for details.
+ from_, f_params = self.get_from_clause()
+
+ qn = self.quote_name_unless_alias
+
+ where, w_params = self.query.where.as_sql(qn=qn, connection=self.connection)
+ having, h_params = self.query.having.as_sql(qn=qn, connection=self.connection)
+ having_group_by = self.query.having.get_cols()
+ params = []
+ for val in six.itervalues(self.query.extra_select):
+ params.extend(val[1])
+
+ result = ['SELECT']
+
+ if self.query.distinct:
+ result.append(self.connection.ops.distinct_sql(distinct_fields))
+ params.extend(o_params)
+ result.append(', '.join(out_cols + self.ordering_aliases))
+ params.extend(s_params)
+ params.extend(self.ordering_params)
+
+ result.append('FROM')
+ result.extend(from_)
+ params.extend(f_params)
+
+ if where:
+ result.append('WHERE %s' % where)
+ params.extend(w_params)
+
+ grouping, gb_params = self.get_grouping(having_group_by, ordering_group_by)
+ if grouping:
+ if distinct_fields:
+ raise NotImplementedError(
+ "annotate() + distinct(fields) not implemented.")
+ if not ordering:
+ ordering = self.connection.ops.force_no_ordering()
+ result.append('GROUP BY %s' % ', '.join(grouping))
+ params.extend(gb_params)
+
+ if having:
+ result.append('HAVING %s' % having)
+ params.extend(h_params)
+
+ if ordering:
+ result.append('ORDER BY %s' % ', '.join(ordering))
+
+ if with_limits:
+ if self.query.high_mark is not None:
+ result.append('LIMIT %d' % (self.query.high_mark - self.query.low_mark))
+ if self.query.low_mark:
+ if self.query.high_mark is None:
+ val = self.connection.ops.no_limit_value()
+ if val:
+ result.append('LIMIT %d' % val)
+ result.append('OFFSET %d' % self.query.low_mark)
+
+ if self.query.select_for_update and self.connection.features.has_select_for_update:
+ if self.connection.get_autocommit():
+ raise TransactionManagementError("select_for_update cannot be used outside of a transaction.")
+
+ # If we've been asked for a NOWAIT query but the backend does not support it,
+ # raise a DatabaseError otherwise we could get an unexpected deadlock.
+ nowait = self.query.select_for_update_nowait
+ if nowait and not self.connection.features.has_select_for_update_nowait:
+ raise DatabaseError('NOWAIT is not supported on this database backend.')
+ result.append(self.connection.ops.for_update_sql(nowait=nowait))
+
+ # Finally do cleanup - get rid of the joins we created above.
+ self.query.reset_refcounts(self.refcounts_before)
+
+ return ' '.join(result), tuple(params)
+
+ def as_nested_sql(self):
+ """
+ Perform the same functionality as the as_sql() method, returning an
+ SQL string and parameters. However, the alias prefixes are bumped
+ beforehand (in a copy -- the current query isn't changed), and any
+ ordering is removed if the query is unsliced.
+
+ Used when nesting this query inside another.
+ """
+ obj = self.query.clone()
+ if obj.low_mark == 0 and obj.high_mark is None:
+ # If there is no slicing in use, then we can safely drop all ordering
+ obj.clear_ordering(True)
+ obj.bump_prefix()
+ return obj.get_compiler(connection=self.connection).as_sql()
+
+ def get_columns(self, with_aliases=False):
+ """
+ Returns the list of columns to use in the select statement, as well as
+ a list any extra parameters that need to be included. If no columns
+ have been specified, returns all columns relating to fields in the
+ model.
+
+ If 'with_aliases' is true, any column names that are duplicated
+ (without the table names) are given unique aliases. This is needed in
+ some cases to avoid ambiguity with nested queries.
+ """
+ qn = self.quote_name_unless_alias
+ qn2 = self.connection.ops.quote_name
+ result = ['(%s) AS %s' % (col[0], qn2(alias)) for alias, col in six.iteritems(self.query.extra_select)]
+ params = []
+ aliases = set(self.query.extra_select.keys())
+ if with_aliases:
+ col_aliases = aliases.copy()
+ else:
+ col_aliases = set()
+ if self.query.select:
+ only_load = self.deferred_to_columns()
+ for col, _ in self.query.select:
+ if isinstance(col, (list, tuple)):
+ alias, column = col
+ table = self.query.alias_map[alias].table_name
+ if table in only_load and column not in only_load[table]:
+ continue
+ r = '%s.%s' % (qn(alias), qn(column))
+ if with_aliases:
+ if col[1] in col_aliases:
+ c_alias = 'Col%d' % len(col_aliases)
+ result.append('%s AS %s' % (r, c_alias))
+ aliases.add(c_alias)
+ col_aliases.add(c_alias)
+ else:
+ result.append('%s AS %s' % (r, qn2(col[1])))
+ aliases.add(r)
+ col_aliases.add(col[1])
+ else:
+ result.append(r)
+ aliases.add(r)
+ col_aliases.add(col[1])
+ else:
+ col_sql, col_params = col.as_sql(qn, self.connection)
+ result.append(col_sql)
+ params.extend(col_params)
+
+ if hasattr(col, 'alias'):
+ aliases.add(col.alias)
+ col_aliases.add(col.alias)
+
+ elif self.query.default_cols:
+ cols, new_aliases = self.get_default_columns(with_aliases,
+ col_aliases)
+ result.extend(cols)
+ aliases.update(new_aliases)
+
+ max_name_length = self.connection.ops.max_name_length()
+ for alias, aggregate in self.query.aggregate_select.items():
+ agg_sql, agg_params = aggregate.as_sql(qn, self.connection)
+ if alias is None:
+ result.append(agg_sql)
+ else:
+ result.append('%s AS %s' % (agg_sql, qn(truncate_name(alias, max_name_length))))
+ params.extend(agg_params)
+
+ for (table, col), _ in self.query.related_select_cols:
+ r = '%s.%s' % (qn(table), qn(col))
+ if with_aliases and col in col_aliases:
+ c_alias = 'Col%d' % len(col_aliases)
+ result.append('%s AS %s' % (r, c_alias))
+ aliases.add(c_alias)
+ col_aliases.add(c_alias)
+ else:
+ result.append(r)
+ aliases.add(r)
+ col_aliases.add(col)
+
+ self._select_aliases = aliases
+ return result, params
+
+ def get_default_columns(self, with_aliases=False, col_aliases=None,
+ start_alias=None, opts=None, as_pairs=False, from_parent=None):
+ """
+ Computes the default columns for selecting every field in the base
+ model. Will sometimes be called to pull in related models (e.g. via
+ select_related), in which case "opts" and "start_alias" will be given
+ to provide a starting point for the traversal.
+
+ Returns a list of strings, quoted appropriately for use in SQL
+ directly, as well as a set of aliases used in the select statement (if
+ 'as_pairs' is True, returns a list of (alias, col_name) pairs instead
+ of strings as the first component and None as the second component).
+ """
+ result = []
+ if opts is None:
+ opts = self.query.get_meta()
+ qn = self.quote_name_unless_alias
+ qn2 = self.connection.ops.quote_name
+ aliases = set()
+ only_load = self.deferred_to_columns()
+ if not start_alias:
+ start_alias = self.query.get_initial_alias()
+ # The 'seen_models' is used to optimize checking the needed parent
+ # alias for a given field. This also includes None -> start_alias to
+ # be used by local fields.
+ seen_models = {None: start_alias}
+
+ for field, model in opts.get_concrete_fields_with_model():
+ if from_parent and model is not None and issubclass(from_parent, model):
+ # Avoid loading data for already loaded parents.
+ continue
+ alias = self.query.join_parent_model(opts, model, start_alias,
+ seen_models)
+ table = self.query.alias_map[alias].table_name
+ if table in only_load and field.column not in only_load[table]:
+ continue
+ if as_pairs:
+ result.append((alias, field))
+ aliases.add(alias)
+ continue
+ if with_aliases and field.column in col_aliases:
+ c_alias = 'Col%d' % len(col_aliases)
+ result.append('%s.%s AS %s' % (qn(alias),
+ qn2(field.column), c_alias))
+ col_aliases.add(c_alias)
+ aliases.add(c_alias)
+ else:
+ r = '%s.%s' % (qn(alias), qn2(field.column))
+ result.append(r)
+ aliases.add(r)
+ if with_aliases:
+ col_aliases.add(field.column)
+ return result, aliases
+
+ def get_distinct(self):
+ """
+ Returns a quoted list of fields to use in DISTINCT ON part of the query.
+
+ Note that this method can alter the tables in the query, and thus it
+ must be called before get_from_clause().
+ """
+ qn = self.quote_name_unless_alias
+ qn2 = self.connection.ops.quote_name
+ result = []
+ opts = self.query.get_meta()
+
+ for name in self.query.distinct_fields:
+ parts = name.split(LOOKUP_SEP)
+ field, cols, alias, _, _ = self._setup_joins(parts, opts, None)
+ cols, alias = self._final_join_removal(cols, alias)
+ for col in cols:
+ result.append("%s.%s" % (qn(alias), qn2(col)))
+ return result
+
+ def get_ordering(self):
+ """
+ Returns a tuple containing a list representing the SQL elements in the
+ "order by" clause, and the list of SQL elements that need to be added
+ to the GROUP BY clause as a result of the ordering.
+
+ Also sets the ordering_aliases attribute on this instance to a list of
+ extra aliases needed in the select.
+
+ Determining the ordering SQL can change the tables we need to include,
+ so this should be run *before* get_from_clause().
+ """
+ if self.query.extra_order_by:
+ ordering = self.query.extra_order_by
+ elif not self.query.default_ordering:
+ ordering = self.query.order_by
+ else:
+ ordering = (self.query.order_by
+ or self.query.get_meta().ordering
+ or [])
+ qn = self.quote_name_unless_alias
+ qn2 = self.connection.ops.quote_name
+ distinct = self.query.distinct
+ select_aliases = self._select_aliases
+ result = []
+ group_by = []
+ ordering_aliases = []
+ if self.query.standard_ordering:
+ asc, desc = ORDER_DIR['ASC']
+ else:
+ asc, desc = ORDER_DIR['DESC']
+
+ # It's possible, due to model inheritance, that normal usage might try
+ # to include the same field more than once in the ordering. We track
+ # the table/column pairs we use and discard any after the first use.
+ processed_pairs = set()
+
+ params = []
+ ordering_params = []
+ for pos, field in enumerate(ordering):
+ if field == '?':
+ result.append(self.connection.ops.random_function_sql())
+ continue
+ if isinstance(field, int):
+ if field < 0:
+ order = desc
+ field = -field
+ else:
+ order = asc
+ result.append('%s %s' % (field, order))
+ group_by.append((str(field), []))
+ continue
+ col, order = get_order_dir(field, asc)
+ if col in self.query.aggregate_select:
+ result.append('%s %s' % (qn(col), order))
+ continue
+ if '.' in field:
+ # This came in through an extra(order_by=...) addition. Pass it
+ # on verbatim.
+ table, col = col.split('.', 1)
+ if (table, col) not in processed_pairs:
+ elt = '%s.%s' % (qn(table), col)
+ processed_pairs.add((table, col))
+ if not distinct or elt in select_aliases:
+ result.append('%s %s' % (elt, order))
+ group_by.append((elt, []))
+ elif get_order_dir(field)[0] not in self.query.extra:
+ # 'col' is of the form 'field' or 'field1__field2' or
+ # '-field1__field2__field', etc.
+ for table, cols, order in self.find_ordering_name(field,
+ self.query.get_meta(), default_order=asc):
+ for col in cols:
+ if (table, col) not in processed_pairs:
+ elt = '%s.%s' % (qn(table), qn2(col))
+ processed_pairs.add((table, col))
+ if distinct and elt not in select_aliases:
+ ordering_aliases.append(elt)
+ result.append('%s %s' % (elt, order))
+ group_by.append((elt, []))
+ else:
+ elt = qn2(col)
+ if col not in self.query.extra_select:
+ sql = "(%s) AS %s" % (self.query.extra[col][0], elt)
+ ordering_aliases.append(sql)
+ ordering_params.extend(self.query.extra[col][1])
+ else:
+ if distinct and col not in select_aliases:
+ ordering_aliases.append(elt)
+ ordering_params.extend(params)
+ result.append('%s %s' % (elt, order))
+ group_by.append(self.query.extra[col])
+ self.ordering_aliases = ordering_aliases
+ self.ordering_params = ordering_params
+ return result, params, group_by
+
+ def find_ordering_name(self, name, opts, alias=None, default_order='ASC',
+ already_seen=None):
+ """
+ Returns the table alias (the name might be ambiguous, the alias will
+ not be) and column name for ordering by the given 'name' parameter.
+ The 'name' is of the form 'field1__field2__...__fieldN'.
+ """
+ name, order = get_order_dir(name, default_order)
+ pieces = name.split(LOOKUP_SEP)
+ field, cols, alias, joins, opts = self._setup_joins(pieces, opts, alias)
+
+ # If we get to this point and the field is a relation to another model,
+ # append the default ordering for that model.
+ if field.rel and len(joins) > 1 and opts.ordering:
+ # Firstly, avoid infinite loops.
+ if not already_seen:
+ already_seen = set()
+ join_tuple = tuple([self.query.alias_map[j].table_name for j in joins])
+ if join_tuple in already_seen:
+ raise FieldError('Infinite loop caused by ordering.')
+ already_seen.add(join_tuple)
+
+ results = []
+ for item in opts.ordering:
+ results.extend(self.find_ordering_name(item, opts, alias,
+ order, already_seen))
+ return results
+ cols, alias = self._final_join_removal(cols, alias)
+ return [(alias, cols, order)]
+
+ def _setup_joins(self, pieces, opts, alias):
+ """
+ A helper method for get_ordering and get_distinct. This method will
+ call query.setup_joins, handle refcounts and then promote the joins.
+
+ Note that get_ordering and get_distinct must produce same target
+ columns on same input, as the prefixes of get_ordering and get_distinct
+ must match. Executing SQL where this is not true is an error.
+ """
+ if not alias:
+ alias = self.query.get_initial_alias()
+ field, targets, opts, joins, _ = self.query.setup_joins(
+ pieces, opts, alias)
+ # We will later on need to promote those joins that were added to the
+ # query afresh above.
+ joins_to_promote = [j for j in joins if self.query.alias_refcount[j] < 2]
+ alias = joins[-1]
+ cols = [target.column for target in targets]
+ if not field.rel:
+ # To avoid inadvertent trimming of a necessary alias, use the
+ # refcount to show that we are referencing a non-relation field on
+ # the model.
+ self.query.ref_alias(alias)
+
+ # Must use left outer joins for nullable fields and their relations.
+ # Ordering or distinct must not affect the returned set, and INNER
+ # JOINS for nullable fields could do this.
+ self.query.promote_joins(joins_to_promote)
+ return field, cols, alias, joins, opts
+
+ def _final_join_removal(self, cols, alias):
+ """
+ A helper method for get_distinct and get_ordering. This method will
+ trim extra not-needed joins from the tail of the join chain.
+
+ This is very similar to what is done in trim_joins, but we will
+ trim LEFT JOINS here. It would be a good idea to consolidate this
+ method and query.trim_joins().
+ """
+ if alias:
+ while 1:
+ join = self.query.alias_map[alias]
+ lhs_cols, rhs_cols = zip(*[(lhs_col, rhs_col) for lhs_col, rhs_col in join.join_cols])
+ if set(cols) != set(rhs_cols):
+ break
+
+ cols = [lhs_cols[rhs_cols.index(col)] for col in cols]
+ self.query.unref_alias(alias)
+ alias = join.lhs_alias
+ return cols, alias
+
+ def get_from_clause(self):
+ """
+ Returns a list of strings that are joined together to go after the
+ "FROM" part of the query, as well as a list any extra parameters that
+ need to be included. Sub-classes, can override this to create a
+ from-clause via a "select".
+
+ This should only be called after any SQL construction methods that
+ might change the tables we need. This means the select columns,
+ ordering and distinct must be done first.
+ """
+ result = []
+ qn = self.quote_name_unless_alias
+ qn2 = self.connection.ops.quote_name
+ first = True
+ from_params = []
+ for alias in self.query.tables:
+ if not self.query.alias_refcount[alias]:
+ continue
+ try:
+ name, alias, join_type, lhs, join_cols, _, join_field = self.query.alias_map[alias]
+ except KeyError:
+ # Extra tables can end up in self.tables, but not in the
+ # alias_map if they aren't in a join. That's OK. We skip them.
+ continue
+ alias_str = '' if alias == name else (' %s' % alias)
+ if join_type and not first:
+ extra_cond = join_field.get_extra_restriction(
+ self.query.where_class, alias, lhs)
+ if extra_cond:
+ extra_sql, extra_params = extra_cond.as_sql(
+ qn, self.connection)
+ extra_sql = 'AND (%s)' % extra_sql
+ from_params.extend(extra_params)
+ else:
+ extra_sql = ""
+ result.append('%s %s%s ON ('
+ % (join_type, qn(name), alias_str))
+ for index, (lhs_col, rhs_col) in enumerate(join_cols):
+ if index != 0:
+ result.append(' AND ')
+ result.append('%s.%s = %s.%s' %
+ (qn(lhs), qn2(lhs_col), qn(alias), qn2(rhs_col)))
+ result.append('%s)' % extra_sql)
+ else:
+ connector = '' if first else ', '
+ result.append('%s%s%s' % (connector, qn(name), alias_str))
+ first = False
+ for t in self.query.extra_tables:
+ alias, unused = self.query.table_alias(t)
+ # Only add the alias if it's not already present (the table_alias()
+ # calls increments the refcount, so an alias refcount of one means
+ # this is the only reference.
+ if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1:
+ connector = '' if first else ', '
+ result.append('%s%s' % (connector, qn(alias)))
+ first = False
+ return result, from_params
+
+ def get_grouping(self, having_group_by, ordering_group_by):
+ """
+ Returns a tuple representing the SQL elements in the "group by" clause.
+ """
+ qn = self.quote_name_unless_alias
+ result, params = [], []
+ if self.query.group_by is not None:
+ select_cols = self.query.select + self.query.related_select_cols
+ # Just the column, not the fields.
+ select_cols = [s[0] for s in select_cols]
+ if (len(self.query.get_meta().concrete_fields) == len(self.query.select)
+ and self.connection.features.allows_group_by_pk):
+ self.query.group_by = [
+ (self.query.get_meta().db_table, self.query.get_meta().pk.column)
+ ]
+ select_cols = []
+ seen = set()
+ cols = self.query.group_by + having_group_by + select_cols
+ for col in cols:
+ col_params = ()
+ if isinstance(col, (list, tuple)):
+ sql = '%s.%s' % (qn(col[0]), qn(col[1]))
+ elif hasattr(col, 'as_sql'):
+ sql, col_params = col.as_sql(qn, self.connection)
+ else:
+ sql = '(%s)' % str(col)
+ if sql not in seen:
+ result.append(sql)
+ params.extend(col_params)
+ seen.add(sql)
+
+ # Still, we need to add all stuff in ordering (except if the backend can
+ # group by just by PK).
+ if ordering_group_by and not self.connection.features.allows_group_by_pk:
+ for order, order_params in ordering_group_by:
+ # Even if we have seen the same SQL string, it might have
+ # different params, so, we add same SQL in "has params" case.
+ if order not in seen or order_params:
+ result.append(order)
+ params.extend(order_params)
+ seen.add(order)
+
+ # Unconditionally add the extra_select items.
+ for extra_select, extra_params in self.query.extra_select.values():
+ sql = '(%s)' % str(extra_select)
+ result.append(sql)
+ params.extend(extra_params)
+
+ return result, params
+
+ def fill_related_selections(self, opts=None, root_alias=None, cur_depth=1,
+ requested=None, restricted=None, nullable=None):
+ """
+ Fill in the information needed for a select_related query. The current
+ depth is measured as the number of connections away from the root model
+ (for example, cur_depth=1 means we are looking at models with direct
+ connections to the root model).
+ """
+ if not restricted and self.query.max_depth and cur_depth > self.query.max_depth:
+ # We've recursed far enough; bail out.
+ return
+
+ if not opts:
+ opts = self.query.get_meta()
+ root_alias = self.query.get_initial_alias()
+ self.query.related_select_cols = []
+ only_load = self.query.get_loaded_field_names()
+
+ # Setup for the case when only particular related fields should be
+ # included in the related selection.
+ if requested is None:
+ if isinstance(self.query.select_related, dict):
+ requested = self.query.select_related
+ restricted = True
+ else:
+ restricted = False
+
+ for f, model in opts.get_fields_with_model():
+ # The get_fields_with_model() returns None for fields that live
+ # in the field's local model. So, for those fields we want to use
+ # the f.model - that is the field's local model.
+ field_model = model or f.model
+ if not select_related_descend(f, restricted, requested,
+ only_load.get(field_model)):
+ continue
+ promote = nullable or f.null
+ _, _, _, joins, _ = self.query.setup_joins(
+ [f.name], opts, root_alias, outer_if_first=promote)
+ alias = joins[-1]
+ columns, _ = self.get_default_columns(start_alias=alias,
+ opts=f.rel.to._meta, as_pairs=True)
+ self.query.related_select_cols.extend(
+ SelectInfo((col[0], col[1].column), col[1]) for col in columns)
+ if restricted:
+ next = requested.get(f.name, {})
+ else:
+ next = False
+ new_nullable = f.null or promote
+ self.fill_related_selections(f.rel.to._meta, alias, cur_depth + 1,
+ next, restricted, new_nullable)
+
+ if restricted:
+ related_fields = [
+ (o.field, o.model)
+ for o in opts.get_all_related_objects()
+ if o.field.unique
+ ]
+ for f, model in related_fields:
+ if not select_related_descend(f, restricted, requested,
+ only_load.get(model), reverse=True):
+ continue
+
+ _, _, _, joins, _ = self.query.setup_joins(
+ [f.related_query_name()], opts, root_alias, outer_if_first=True)
+ alias = joins[-1]
+ from_parent = (opts.model if issubclass(model, opts.model)
+ else None)
+ columns, _ = self.get_default_columns(start_alias=alias,
+ opts=model._meta, as_pairs=True, from_parent=from_parent)
+ self.query.related_select_cols.extend(
+ SelectInfo((col[0], col[1].column), col[1]) for col in columns)
+ next = requested.get(f.related_query_name(), {})
+ # Use True here because we are looking at the _reverse_ side of
+ # the relation, which is always nullable.
+ new_nullable = True
+ self.fill_related_selections(model._meta, alias, cur_depth + 1,
+ next, restricted, new_nullable)
+
+ def deferred_to_columns(self):
+ """
+ Converts the self.deferred_loading data structure to mapping of table
+ names to sets of column names which are to be loaded. Returns the
+ dictionary.
+ """
+ columns = {}
+ self.query.deferred_to_data(columns, self.query.deferred_to_columns_cb)
+ return columns
+
+ def results_iter(self):
+ """
+ Returns an iterator over the results from executing this query.
+ """
+ resolve_columns = hasattr(self, 'resolve_columns')
+ fields = None
+ has_aggregate_select = bool(self.query.aggregate_select)
+ for rows in self.execute_sql(MULTI):
+ for row in rows:
+ if has_aggregate_select:
+ loaded_fields = self.query.get_loaded_field_names().get(self.query.model, set()) or self.query.select
+ aggregate_start = len(self.query.extra_select) + len(loaded_fields)
+ aggregate_end = aggregate_start + len(self.query.aggregate_select)
+ if resolve_columns:
+ if fields is None:
+ # We only set this up here because
+ # related_select_cols isn't populated until
+ # execute_sql() has been called.
+
+ # We also include types of fields of related models that
+ # will be included via select_related() for the benefit
+ # of MySQL/MySQLdb when boolean fields are involved
+ # (#15040).
+
+ # This code duplicates the logic for the order of fields
+ # found in get_columns(). It would be nice to clean this up.
+ if self.query.select:
+ fields = [f.field for f in self.query.select]
+ elif self.query.default_cols:
+ fields = self.query.get_meta().concrete_fields
+ else:
+ fields = []
+ fields = fields + [f.field for f in self.query.related_select_cols]
+
+ # If the field was deferred, exclude it from being passed
+ # into `resolve_columns` because it wasn't selected.
+ only_load = self.deferred_to_columns()
+ if only_load:
+ fields = [f for f in fields if f.model._meta.db_table not in only_load or
+ f.column in only_load[f.model._meta.db_table]]
+ if has_aggregate_select:
+ # pad None in to fields for aggregates
+ fields = fields[:aggregate_start] + [
+ None for x in range(0, aggregate_end - aggregate_start)
+ ] + fields[aggregate_start:]
+ row = self.resolve_columns(row, fields)
+
+ if has_aggregate_select:
+ row = tuple(row[:aggregate_start]) + tuple([
+ self.query.resolve_aggregate(value, aggregate, self.connection)
+ for (alias, aggregate), value
+ in zip(self.query.aggregate_select.items(), row[aggregate_start:aggregate_end])
+ ]) + tuple(row[aggregate_end:])
+
+ yield row
+
+ def execute_sql(self, result_type=MULTI):
+ """
+ Run the query against the database and returns the result(s). The
+ return value is a single data item if result_type is SINGLE, or an
+ iterator over the results if the result_type is MULTI.
+
+ result_type is either MULTI (use fetchmany() to retrieve all rows),
+ SINGLE (only retrieve a single row), or None. In this last case, the
+ cursor is returned if any query is executed, since it's used by
+ subclasses such as InsertQuery). It's possible, however, that no query
+ is needed, as the filters describe an empty set. In that case, None is
+ returned, to avoid any unnecessary database interaction.
+ """
+ try:
+ sql, params = self.as_sql()
+ if not sql:
+ raise EmptyResultSet
+ except EmptyResultSet:
+ if result_type == MULTI:
+ return iter([])
+ else:
+ return
+
+ cursor = self.connection.cursor()
+ cursor.execute(sql, params)
+
+ if not result_type:
+ return cursor
+ if result_type == SINGLE:
+ if self.ordering_aliases:
+ return cursor.fetchone()[:-len(self.ordering_aliases)]
+ return cursor.fetchone()
+
+ # The MULTI case.
+ if self.ordering_aliases:
+ result = order_modified_iter(cursor, len(self.ordering_aliases),
+ self.connection.features.empty_fetchmany_value)
+ else:
+ result = iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
+ self.connection.features.empty_fetchmany_value)
+ if not self.connection.features.can_use_chunked_reads:
+ # If we are using non-chunked reads, we return the same data
+ # structure as normally, but ensure it is all read into memory
+ # before going any further.
+ return list(result)
+ return result
+
+ def as_subquery_condition(self, alias, columns, qn):
+ qn2 = self.connection.ops.quote_name
+ if len(columns) == 1:
+ sql, params = self.as_sql()
+ return '%s.%s IN (%s)' % (qn(alias), qn2(columns[0]), sql), params
+
+ for index, select_col in enumerate(self.query.select):
+ lhs = '%s.%s' % (qn(select_col.col[0]), qn2(select_col.col[1]))
+ rhs = '%s.%s' % (qn(alias), qn2(columns[index]))
+ self.query.where.add(
+ QueryWrapper('%s = %s' % (lhs, rhs), []), 'AND')
+
+ sql, params = self.as_sql()
+ return 'EXISTS (%s)' % sql, params
+
+
+class SQLInsertCompiler(SQLCompiler):
+
+ def __init__(self, *args, **kwargs):
+ self.return_id = False
+ super(SQLInsertCompiler, self).__init__(*args, **kwargs)
+
+ def placeholder(self, field, val):
+ if field is None:
+ # A field value of None means the value is raw.
+ return val
+ elif hasattr(field, 'get_placeholder'):
+ # Some fields (e.g. geo fields) need special munging before
+ # they can be inserted.
+ return field.get_placeholder(val, self.connection)
+ else:
+ # Return the common case for the placeholder
+ return '%s'
+
+ def as_sql(self):
+ # We don't need quote_name_unless_alias() here, since these are all
+ # going to be column names (so we can avoid the extra overhead).
+ qn = self.connection.ops.quote_name
+ opts = self.query.get_meta()
+ result = ['INSERT INTO %s' % qn(opts.db_table)]
+
+ has_fields = bool(self.query.fields)
+ fields = self.query.fields if has_fields else [opts.pk]
+ result.append('(%s)' % ', '.join([qn(f.column) for f in fields]))
+
+ if has_fields:
+ params = values = [
+ [
+ f.get_db_prep_save(getattr(obj, f.attname) if self.query.raw else f.pre_save(obj, True), connection=self.connection)
+ for f in fields
+ ]
+ for obj in self.query.objs
+ ]
+ else:
+ values = [[self.connection.ops.pk_default_value()] for obj in self.query.objs]
+ params = [[]]
+ fields = [None]
+ can_bulk = (not any(hasattr(field, "get_placeholder") for field in fields) and
+ not self.return_id and self.connection.features.has_bulk_insert)
+
+ if can_bulk:
+ placeholders = [["%s"] * len(fields)]
+ else:
+ placeholders = [
+ [self.placeholder(field, v) for field, v in zip(fields, val)]
+ for val in values
+ ]
+ # Oracle Spatial needs to remove some values due to #10888
+ params = self.connection.ops.modify_insert_params(placeholders, params)
+ if self.return_id and self.connection.features.can_return_id_from_insert:
+ params = params[0]
+ col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column))
+ result.append("VALUES (%s)" % ", ".join(placeholders[0]))
+ r_fmt, r_params = self.connection.ops.return_insert_id()
+ # Skip empty r_fmt to allow subclasses to customize behaviour for
+ # 3rd party backends. Refs #19096.
+ if r_fmt:
+ result.append(r_fmt % col)
+ params += r_params
+ return [(" ".join(result), tuple(params))]
+ if can_bulk:
+ result.append(self.connection.ops.bulk_insert_sql(fields, len(values)))
+ return [(" ".join(result), tuple([v for val in values for v in val]))]
+ else:
+ return [
+ (" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals)
+ for p, vals in zip(placeholders, params)
+ ]
+
+ def execute_sql(self, return_id=False):
+ assert not (return_id and len(self.query.objs) != 1)
+ self.return_id = return_id
+ cursor = self.connection.cursor()
+ for sql, params in self.as_sql():
+ cursor.execute(sql, params)
+ if not (return_id and cursor):
+ return
+ if self.connection.features.can_return_id_from_insert:
+ return self.connection.ops.fetch_returned_insert_id(cursor)
+ return self.connection.ops.last_insert_id(cursor,
+ self.query.get_meta().db_table, self.query.get_meta().pk.column)
+
+
+class SQLDeleteCompiler(SQLCompiler):
+ def as_sql(self):
+ """
+ Creates the SQL for this query. Returns the SQL string and list of
+ parameters.
+ """
+ assert len(self.query.tables) == 1, \
+ "Can only delete from one table at a time."
+ qn = self.quote_name_unless_alias
+ result = ['DELETE FROM %s' % qn(self.query.tables[0])]
+ where, params = self.query.where.as_sql(qn=qn, connection=self.connection)
+ if where:
+ result.append('WHERE %s' % where)
+ return ' '.join(result), tuple(params)
+
+class SQLUpdateCompiler(SQLCompiler):
+ def as_sql(self):
+ """
+ Creates the SQL for this query. Returns the SQL string and list of
+ parameters.
+ """
+ self.pre_sql_setup()
+ if not self.query.values:
+ return '', ()
+ table = self.query.tables[0]
+ qn = self.quote_name_unless_alias
+ result = ['UPDATE %s' % qn(table)]
+ result.append('SET')
+ values, update_params = [], []
+ for field, model, val in self.query.values:
+ if hasattr(val, 'prepare_database_save'):
+ val = val.prepare_database_save(field)
+ else:
+ val = field.get_db_prep_save(val, connection=self.connection)
+
+ # Getting the placeholder for the field.
+ if hasattr(field, 'get_placeholder'):
+ placeholder = field.get_placeholder(val, self.connection)
+ else:
+ placeholder = '%s'
+
+ if hasattr(val, 'evaluate'):
+ val = SQLEvaluator(val, self.query, allow_joins=False)
+ name = field.column
+ if hasattr(val, 'as_sql'):
+ sql, params = val.as_sql(qn, self.connection)
+ values.append('%s = %s' % (qn(name), sql))
+ update_params.extend(params)
+ elif val is not None:
+ values.append('%s = %s' % (qn(name), placeholder))
+ update_params.append(val)
+ else:
+ values.append('%s = NULL' % qn(name))
+ if not values:
+ return '', ()
+ result.append(', '.join(values))
+ where, params = self.query.where.as_sql(qn=qn, connection=self.connection)
+ if where:
+ result.append('WHERE %s' % where)
+ return ' '.join(result), tuple(update_params + params)
+
+ def execute_sql(self, result_type):
+ """
+ Execute the specified update. Returns the number of rows affected by
+ the primary update query. The "primary update query" is the first
+ non-empty query that is executed. Row counts for any subsequent,
+ related queries are not available.
+ """
+ cursor = super(SQLUpdateCompiler, self).execute_sql(result_type)
+ rows = cursor.rowcount if cursor else 0
+ is_empty = cursor is None
+ del cursor
+ for query in self.query.get_related_updates():
+ aux_rows = query.get_compiler(self.using).execute_sql(result_type)
+ if is_empty:
+ rows = aux_rows
+ is_empty = False
+ return rows
+
+ def pre_sql_setup(self):
+ """
+ If the update depends on results from other tables, we need to do some
+ munging of the "where" conditions to match the format required for
+ (portable) SQL updates. That is done here.
+
+ Further, if we are going to be running multiple updates, we pull out
+ the id values to update at this point so that they don't change as a
+ result of the progressive updates.
+ """
+ self.query.select_related = False
+ self.query.clear_ordering(True)
+ super(SQLUpdateCompiler, self).pre_sql_setup()
+ count = self.query.count_active_tables()
+ if not self.query.related_updates and count == 1:
+ return
+
+ # We need to use a sub-select in the where clause to filter on things
+ # from other tables.
+ query = self.query.clone(klass=Query)
+ query.bump_prefix()
+ query.extra = {}
+ query.select = []
+ query.add_fields([query.get_meta().pk.name])
+ # Recheck the count - it is possible that fiddling with the select
+ # fields above removes tables from the query. Refs #18304.
+ count = query.count_active_tables()
+ if not self.query.related_updates and count == 1:
+ return
+
+ must_pre_select = count > 1 and not self.connection.features.update_can_self_select
+
+ # Now we adjust the current query: reset the where clause and get rid
+ # of all the tables we don't need (since they're in the sub-select).
+ self.query.where = self.query.where_class()
+ if self.query.related_updates or must_pre_select:
+ # Either we're using the idents in multiple update queries (so
+ # don't want them to change), or the db backend doesn't support
+ # selecting from the updating table (e.g. MySQL).
+ idents = []
+ for rows in query.get_compiler(self.using).execute_sql(MULTI):
+ idents.extend([r[0] for r in rows])
+ self.query.add_filter(('pk__in', idents))
+ self.query.related_ids = idents
+ else:
+ # The fast path. Filters and updates in one query.
+ self.query.add_filter(('pk__in', query))
+ for alias in self.query.tables[1:]:
+ self.query.alias_refcount[alias] = 0
+
+class SQLAggregateCompiler(SQLCompiler):
+ def as_sql(self, qn=None):
+ """
+ Creates the SQL for this query. Returns the SQL string and list of
+ parameters.
+ """
+ if qn is None:
+ qn = self.quote_name_unless_alias
+
+ sql, params = [], []
+ for aggregate in self.query.aggregate_select.values():
+ agg_sql, agg_params = aggregate.as_sql(qn, self.connection)
+ sql.append(agg_sql)
+ params.extend(agg_params)
+ sql = ', '.join(sql)
+ params = tuple(params)
+
+ sql = 'SELECT %s FROM (%s) subquery' % (sql, self.query.subquery)
+ params = params + self.query.sub_params
+ return sql, params
+
+class SQLDateCompiler(SQLCompiler):
+ def results_iter(self):
+ """
+ Returns an iterator over the results from executing this query.
+ """
+ resolve_columns = hasattr(self, 'resolve_columns')
+ if resolve_columns:
+ from django.db.models.fields import DateField
+ fields = [DateField()]
+ else:
+ from django.db.backends.util import typecast_date
+ needs_string_cast = self.connection.features.needs_datetime_string_cast
+
+ offset = len(self.query.extra_select)
+ for rows in self.execute_sql(MULTI):
+ for row in rows:
+ date = row[offset]
+ if resolve_columns:
+ date = self.resolve_columns(row, fields)[offset]
+ elif needs_string_cast:
+ date = typecast_date(str(date))
+ if isinstance(date, datetime.datetime):
+ date = date.date()
+ yield date
+
+class SQLDateTimeCompiler(SQLCompiler):
+ def results_iter(self):
+ """
+ Returns an iterator over the results from executing this query.
+ """
+ resolve_columns = hasattr(self, 'resolve_columns')
+ if resolve_columns:
+ from django.db.models.fields import DateTimeField
+ fields = [DateTimeField()]
+ else:
+ from django.db.backends.util import typecast_timestamp
+ needs_string_cast = self.connection.features.needs_datetime_string_cast
+
+ offset = len(self.query.extra_select)
+ for rows in self.execute_sql(MULTI):
+ for row in rows:
+ datetime = row[offset]
+ if resolve_columns:
+ datetime = self.resolve_columns(row, fields)[offset]
+ elif needs_string_cast:
+ datetime = typecast_timestamp(str(datetime))
+ # Datetimes are artifically returned in UTC on databases that
+ # don't support time zone. Restore the zone used in the query.
+ if settings.USE_TZ:
+ if datetime is None:
+ raise ValueError("Database returned an invalid value "
+ "in QuerySet.datetimes(). Are time zone "
+ "definitions for your database and pytz installed?")
+ datetime = datetime.replace(tzinfo=None)
+ datetime = timezone.make_aware(datetime, self.query.tzinfo)
+ yield datetime
+
+def order_modified_iter(cursor, trim, sentinel):
+ """
+ Yields blocks of rows from a cursor. We use this iterator in the special
+ case when extra output columns have been added to support ordering
+ requirements. We must trim those extra columns before anything else can use
+ the results, since they're only needed to make the SQL valid.
+ """
+ for rows in iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
+ sentinel):
+ yield [r[:-trim] for r in rows]
diff --git a/lib/python2.7/site-packages/django/db/models/sql/constants.py b/lib/python2.7/site-packages/django/db/models/sql/constants.py
new file mode 100644
index 0000000..904f7b2
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/models/sql/constants.py
@@ -0,0 +1,41 @@
+"""
+Constants specific to the SQL storage portion of the ORM.
+"""
+
+from collections import namedtuple
+import re
+
+# Valid query types (a set is used for speedy lookups). These are (currently)
+# considered SQL-specific; other storage systems may choose to use different
+# lookup types.
+QUERY_TERMS = set([
+ 'exact', 'iexact', 'contains', 'icontains', 'gt', 'gte', 'lt', 'lte', 'in',
+ 'startswith', 'istartswith', 'endswith', 'iendswith', 'range', 'year',
+ 'month', 'day', 'week_day', 'hour', 'minute', 'second', 'isnull', 'search',
+ 'regex', 'iregex',
+])
+
+# Size of each "chunk" for get_iterator calls.
+# Larger values are slightly faster at the expense of more storage space.
+GET_ITERATOR_CHUNK_SIZE = 100
+
+# Namedtuples for sql.* internal use.
+
+# Join lists (indexes into the tuples that are values in the alias_map
+# dictionary in the Query class).
+JoinInfo = namedtuple('JoinInfo',
+ 'table_name rhs_alias join_type lhs_alias '
+ 'join_cols nullable join_field')
+
+# Pairs of column clauses to select, and (possibly None) field for the clause.
+SelectInfo = namedtuple('SelectInfo', 'col field')
+
+# How many results to expect from a cursor.execute call
+MULTI = 'multi'
+SINGLE = 'single'
+
+ORDER_PATTERN = re.compile(r'\?|[-+]?[.\w]+$')
+ORDER_DIR = {
+ 'ASC': ('ASC', 'DESC'),
+ 'DESC': ('DESC', 'ASC'),
+}
diff --git a/lib/python2.7/site-packages/django/db/models/sql/datastructures.py b/lib/python2.7/site-packages/django/db/models/sql/datastructures.py
new file mode 100644
index 0000000..daaabbe
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/models/sql/datastructures.py
@@ -0,0 +1,62 @@
+"""
+Useful auxilliary data structures for query construction. Not useful outside
+the SQL domain.
+"""
+
+class EmptyResultSet(Exception):
+ pass
+
+class MultiJoin(Exception):
+ """
+ Used by join construction code to indicate the point at which a
+ multi-valued join was attempted (if the caller wants to treat that
+ exceptionally).
+ """
+ def __init__(self, names_pos, path_with_names):
+ self.level = names_pos
+ # The path travelled, this includes the path to the multijoin.
+ self.names_with_path = path_with_names
+
+class Empty(object):
+ pass
+
+class RawValue(object):
+ def __init__(self, value):
+ self.value = value
+
+class Date(object):
+ """
+ Add a date selection column.
+ """
+ def __init__(self, col, lookup_type):
+ self.col = col
+ self.lookup_type = lookup_type
+
+ def relabeled_clone(self, change_map):
+ return self.__class__((change_map.get(self.col[0], self.col[0]), self.col[1]))
+
+ def as_sql(self, qn, connection):
+ if isinstance(self.col, (list, tuple)):
+ col = '%s.%s' % tuple([qn(c) for c in self.col])
+ else:
+ col = self.col
+ return connection.ops.date_trunc_sql(self.lookup_type, col), []
+
+class DateTime(object):
+ """
+ Add a datetime selection column.
+ """
+ def __init__(self, col, lookup_type, tzname):
+ self.col = col
+ self.lookup_type = lookup_type
+ self.tzname = tzname
+
+ def relabeled_clone(self, change_map):
+ return self.__class__((change_map.get(self.col[0], self.col[0]), self.col[1]))
+
+ def as_sql(self, qn, connection):
+ if isinstance(self.col, (list, tuple)):
+ col = '%s.%s' % tuple([qn(c) for c in self.col])
+ else:
+ col = self.col
+ return connection.ops.datetime_trunc_sql(self.lookup_type, col, self.tzname)
diff --git a/lib/python2.7/site-packages/django/db/models/sql/expressions.py b/lib/python2.7/site-packages/django/db/models/sql/expressions.py
new file mode 100644
index 0000000..31e0899
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/models/sql/expressions.py
@@ -0,0 +1,117 @@
+from django.core.exceptions import FieldError
+from django.db.models.constants import LOOKUP_SEP
+from django.db.models.fields import FieldDoesNotExist
+import copy
+
+class SQLEvaluator(object):
+ def __init__(self, expression, query, allow_joins=True, reuse=None):
+ self.expression = expression
+ self.opts = query.get_meta()
+ self.reuse = reuse
+ self.cols = []
+ self.expression.prepare(self, query, allow_joins)
+
+ def relabeled_clone(self, change_map):
+ clone = copy.copy(self)
+ clone.cols = []
+ for node, col in self.cols:
+ if hasattr(col, 'relabeled_clone'):
+ clone.cols.append((node, col.relabeled_clone(change_map)))
+ else:
+ clone.cols.append((node,
+ (change_map.get(col[0], col[0]), col[1])))
+ return clone
+
+ def get_cols(self):
+ cols = []
+ for node, col in self.cols:
+ if hasattr(node, 'get_cols'):
+ cols.extend(node.get_cols())
+ elif isinstance(col, tuple):
+ cols.append(col)
+ return cols
+
+ def prepare(self):
+ return self
+
+ def as_sql(self, qn, connection):
+ return self.expression.evaluate(self, qn, connection)
+
+ #####################################################
+ # Vistor methods for initial expression preparation #
+ #####################################################
+
+ def prepare_node(self, node, query, allow_joins):
+ for child in node.children:
+ if hasattr(child, 'prepare'):
+ child.prepare(self, query, allow_joins)
+
+ def prepare_leaf(self, node, query, allow_joins):
+ if not allow_joins and LOOKUP_SEP in node.name:
+ raise FieldError("Joined field references are not permitted in this query")
+
+ field_list = node.name.split(LOOKUP_SEP)
+ if node.name in query.aggregates:
+ self.cols.append((node, query.aggregate_select[node.name]))
+ else:
+ try:
+ field, sources, opts, join_list, path = query.setup_joins(
+ field_list, query.get_meta(),
+ query.get_initial_alias(), self.reuse)
+ targets, _, join_list = query.trim_joins(sources, join_list, path)
+ if self.reuse is not None:
+ self.reuse.update(join_list)
+ for t in targets:
+ self.cols.append((node, (join_list[-1], t.column)))
+ except FieldDoesNotExist:
+ raise FieldError("Cannot resolve keyword %r into field. "
+ "Choices are: %s" % (self.name,
+ [f.name for f in self.opts.fields]))
+
+ ##################################################
+ # Vistor methods for final expression evaluation #
+ ##################################################
+
+ def evaluate_node(self, node, qn, connection):
+ expressions = []
+ expression_params = []
+ for child in node.children:
+ if hasattr(child, 'evaluate'):
+ sql, params = child.evaluate(self, qn, connection)
+ else:
+ sql, params = '%s', (child,)
+
+ if len(getattr(child, 'children', [])) > 1:
+ format = '(%s)'
+ else:
+ format = '%s'
+
+ if sql:
+ expressions.append(format % sql)
+ expression_params.extend(params)
+
+ return connection.ops.combine_expression(node.connector, expressions), expression_params
+
+ def evaluate_leaf(self, node, qn, connection):
+ col = None
+ for n, c in self.cols:
+ if n is node:
+ col = c
+ break
+ if col is None:
+ raise ValueError("Given node not found")
+ if hasattr(col, 'as_sql'):
+ return col.as_sql(qn, connection)
+ else:
+ return '%s.%s' % (qn(col[0]), qn(col[1])), []
+
+ def evaluate_date_modifier_node(self, node, qn, connection):
+ timedelta = node.children.pop()
+ sql, params = self.evaluate_node(node, qn, connection)
+ node.children.append(timedelta)
+
+ if timedelta.days == 0 and timedelta.seconds == 0 and \
+ timedelta.microseconds == 0:
+ return sql, params
+
+ return connection.ops.date_interval_sql(sql, node.connector, timedelta), params
diff --git a/lib/python2.7/site-packages/django/db/models/sql/query.py b/lib/python2.7/site-packages/django/db/models/sql/query.py
new file mode 100644
index 0000000..7868c19
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/models/sql/query.py
@@ -0,0 +1,1922 @@
+"""
+Create SQL statements for QuerySets.
+
+The code in here encapsulates all of the SQL construction so that QuerySets
+themselves do not have to (and could be backed by things other than SQL
+databases). The abstraction barrier only works one way: this module has to know
+all about the internals of models in order to get the information it needs.
+"""
+
+import copy
+
+from django.utils.datastructures import SortedDict
+from django.utils.encoding import force_text
+from django.utils.tree import Node
+from django.utils import six
+from django.db import connections, DEFAULT_DB_ALIAS
+from django.db.models.constants import LOOKUP_SEP
+from django.db.models.aggregates import refs_aggregate
+from django.db.models.expressions import ExpressionNode
+from django.db.models.fields import FieldDoesNotExist
+from django.db.models.related import PathInfo
+from django.db.models.sql import aggregates as base_aggregates_module
+from django.db.models.sql.constants import (QUERY_TERMS, ORDER_DIR, SINGLE,
+ ORDER_PATTERN, JoinInfo, SelectInfo)
+from django.db.models.sql.datastructures import EmptyResultSet, Empty, MultiJoin
+from django.db.models.sql.expressions import SQLEvaluator
+from django.db.models.sql.where import (WhereNode, Constraint, EverythingNode,
+ ExtraWhere, AND, OR, EmptyWhere)
+from django.core.exceptions import FieldError
+
+__all__ = ['Query', 'RawQuery']
+
+
+class RawQuery(object):
+ """
+ A single raw SQL query
+ """
+
+ def __init__(self, sql, using, params=None):
+ self.params = params or ()
+ self.sql = sql
+ self.using = using
+ self.cursor = None
+
+ # Mirror some properties of a normal query so that
+ # the compiler can be used to process results.
+ self.low_mark, self.high_mark = 0, None # Used for offset/limit
+ self.extra_select = {}
+ self.aggregate_select = {}
+
+ def clone(self, using):
+ return RawQuery(self.sql, using, params=self.params)
+
+ def convert_values(self, value, field, connection):
+ """Convert the database-returned value into a type that is consistent
+ across database backends.
+
+ By default, this defers to the underlying backend operations, but
+ it can be overridden by Query classes for specific backends.
+ """
+ return connection.ops.convert_values(value, field)
+
+ def get_columns(self):
+ if self.cursor is None:
+ self._execute_query()
+ converter = connections[self.using].introspection.table_name_converter
+ return [converter(column_meta[0])
+ for column_meta in self.cursor.description]
+
+ def __iter__(self):
+ # Always execute a new query for a new iterator.
+ # This could be optimized with a cache at the expense of RAM.
+ self._execute_query()
+ if not connections[self.using].features.can_use_chunked_reads:
+ # If the database can't use chunked reads we need to make sure we
+ # evaluate the entire query up front.
+ result = list(self.cursor)
+ else:
+ result = self.cursor
+ return iter(result)
+
+ def __repr__(self):
+ return "<RawQuery: %r>" % (self.sql % tuple(self.params))
+
+ def _execute_query(self):
+ self.cursor = connections[self.using].cursor()
+ self.cursor.execute(self.sql, self.params)
+
+
+class Query(object):
+ """
+ A single SQL query.
+ """
+ # SQL join types. These are part of the class because their string forms
+ # vary from database to database and can be customised by a subclass.
+ INNER = 'INNER JOIN'
+ LOUTER = 'LEFT OUTER JOIN'
+
+ alias_prefix = 'T'
+ query_terms = QUERY_TERMS
+ aggregates_module = base_aggregates_module
+
+ compiler = 'SQLCompiler'
+
+ def __init__(self, model, where=WhereNode):
+ self.model = model
+ self.alias_refcount = {}
+ # alias_map is the most important data structure regarding joins.
+ # It's used for recording which joins exist in the query and what
+ # type they are. The key is the alias of the joined table (possibly
+ # the table name) and the value is JoinInfo from constants.py.
+ self.alias_map = {}
+ self.table_map = {} # Maps table names to list of aliases.
+ self.join_map = {}
+ self.default_cols = True
+ self.default_ordering = True
+ self.standard_ordering = True
+ self.used_aliases = set()
+ self.filter_is_sticky = False
+ self.included_inherited_models = {}
+
+ # SQL-related attributes
+ # Select and related select clauses as SelectInfo instances.
+ # The select is used for cases where we want to set up the select
+ # clause to contain other than default fields (values(), annotate(),
+ # subqueries...)
+ self.select = []
+ # The related_select_cols is used for columns needed for
+ # select_related - this is populated in compile stage.
+ self.related_select_cols = []
+ self.tables = [] # Aliases in the order they are created.
+ self.where = where()
+ self.where_class = where
+ self.group_by = None
+ self.having = where()
+ self.order_by = []
+ self.low_mark, self.high_mark = 0, None # Used for offset/limit
+ self.distinct = False
+ self.distinct_fields = []
+ self.select_for_update = False
+ self.select_for_update_nowait = False
+ self.select_related = False
+
+ # SQL aggregate-related attributes
+ self.aggregates = SortedDict() # Maps alias -> SQL aggregate function
+ self.aggregate_select_mask = None
+ self._aggregate_select_cache = None
+
+ # Arbitrary maximum limit for select_related. Prevents infinite
+ # recursion. Can be changed by the depth parameter to select_related().
+ self.max_depth = 5
+
+ # These are for extensions. The contents are more or less appended
+ # verbatim to the appropriate clause.
+ self.extra = SortedDict() # Maps col_alias -> (col_sql, params).
+ self.extra_select_mask = None
+ self._extra_select_cache = None
+
+ self.extra_tables = ()
+ self.extra_order_by = ()
+
+ # A tuple that is a set of model field names and either True, if these
+ # are the fields to defer, or False if these are the only fields to
+ # load.
+ self.deferred_loading = (set(), True)
+
+ def __str__(self):
+ """
+ Returns the query as a string of SQL with the parameter values
+ substituted in (use sql_with_params() to see the unsubstituted string).
+
+ Parameter values won't necessarily be quoted correctly, since that is
+ done by the database interface at execution time.
+ """
+ sql, params = self.sql_with_params()
+ return sql % params
+
+ def sql_with_params(self):
+ """
+ Returns the query as an SQL string and the parameters that will be
+ subsituted into the query.
+ """
+ return self.get_compiler(DEFAULT_DB_ALIAS).as_sql()
+
+ def __deepcopy__(self, memo):
+ result = self.clone(memo=memo)
+ memo[id(self)] = result
+ return result
+
+ def prepare(self):
+ return self
+
+ def get_compiler(self, using=None, connection=None):
+ if using is None and connection is None:
+ raise ValueError("Need either using or connection")
+ if using:
+ connection = connections[using]
+
+ # Check that the compiler will be able to execute the query
+ for alias, aggregate in self.aggregate_select.items():
+ connection.ops.check_aggregate_support(aggregate)
+
+ return connection.ops.compiler(self.compiler)(self, connection, using)
+
+ def get_meta(self):
+ """
+ Returns the Options instance (the model._meta) from which to start
+ processing. Normally, this is self.model._meta, but it can be changed
+ by subclasses.
+ """
+ return self.model._meta
+
+ def clone(self, klass=None, memo=None, **kwargs):
+ """
+ Creates a copy of the current instance. The 'kwargs' parameter can be
+ used by clients to update attributes after copying has taken place.
+ """
+ obj = Empty()
+ obj.__class__ = klass or self.__class__
+ obj.model = self.model
+ obj.alias_refcount = self.alias_refcount.copy()
+ obj.alias_map = self.alias_map.copy()
+ obj.table_map = self.table_map.copy()
+ obj.join_map = self.join_map.copy()
+ obj.default_cols = self.default_cols
+ obj.default_ordering = self.default_ordering
+ obj.standard_ordering = self.standard_ordering
+ obj.included_inherited_models = self.included_inherited_models.copy()
+ obj.select = self.select[:]
+ obj.related_select_cols = []
+ obj.tables = self.tables[:]
+ obj.where = self.where.clone()
+ obj.where_class = self.where_class
+ if self.group_by is None:
+ obj.group_by = None
+ else:
+ obj.group_by = self.group_by[:]
+ obj.having = self.having.clone()
+ obj.order_by = self.order_by[:]
+ obj.low_mark, obj.high_mark = self.low_mark, self.high_mark
+ obj.distinct = self.distinct
+ obj.distinct_fields = self.distinct_fields[:]
+ obj.select_for_update = self.select_for_update
+ obj.select_for_update_nowait = self.select_for_update_nowait
+ obj.select_related = self.select_related
+ obj.related_select_cols = []
+ obj.aggregates = self.aggregates.copy()
+ if self.aggregate_select_mask is None:
+ obj.aggregate_select_mask = None
+ else:
+ obj.aggregate_select_mask = self.aggregate_select_mask.copy()
+ # _aggregate_select_cache cannot be copied, as doing so breaks the
+ # (necessary) state in which both aggregates and
+ # _aggregate_select_cache point to the same underlying objects.
+ # It will get re-populated in the cloned queryset the next time it's
+ # used.
+ obj._aggregate_select_cache = None
+ obj.max_depth = self.max_depth
+ obj.extra = self.extra.copy()
+ if self.extra_select_mask is None:
+ obj.extra_select_mask = None
+ else:
+ obj.extra_select_mask = self.extra_select_mask.copy()
+ if self._extra_select_cache is None:
+ obj._extra_select_cache = None
+ else:
+ obj._extra_select_cache = self._extra_select_cache.copy()
+ obj.extra_tables = self.extra_tables
+ obj.extra_order_by = self.extra_order_by
+ obj.deferred_loading = copy.copy(self.deferred_loading[0]), self.deferred_loading[1]
+ if self.filter_is_sticky and self.used_aliases:
+ obj.used_aliases = self.used_aliases.copy()
+ else:
+ obj.used_aliases = set()
+ obj.filter_is_sticky = False
+
+ obj.__dict__.update(kwargs)
+ if hasattr(obj, '_setup_query'):
+ obj._setup_query()
+ return obj
+
+ def convert_values(self, value, field, connection):
+ """Convert the database-returned value into a type that is consistent
+ across database backends.
+
+ By default, this defers to the underlying backend operations, but
+ it can be overridden by Query classes for specific backends.
+ """
+ return connection.ops.convert_values(value, field)
+
+ def resolve_aggregate(self, value, aggregate, connection):
+ """Resolve the value of aggregates returned by the database to
+ consistent (and reasonable) types.
+
+ This is required because of the predisposition of certain backends
+ to return Decimal and long types when they are not needed.
+ """
+ if value is None:
+ if aggregate.is_ordinal:
+ return 0
+ # Return None as-is
+ return value
+ elif aggregate.is_ordinal:
+ # Any ordinal aggregate (e.g., count) returns an int
+ return int(value)
+ elif aggregate.is_computed:
+ # Any computed aggregate (e.g., avg) returns a float
+ return float(value)
+ else:
+ # Return value depends on the type of the field being processed.
+ return self.convert_values(value, aggregate.field, connection)
+
+ def get_aggregation(self, using):
+ """
+ Returns the dictionary with the values of the existing aggregations.
+ """
+ if not self.aggregate_select:
+ return {}
+
+ # If there is a group by clause, aggregating does not add useful
+ # information but retrieves only the first row. Aggregate
+ # over the subquery instead.
+ if self.group_by is not None:
+ from django.db.models.sql.subqueries import AggregateQuery
+ query = AggregateQuery(self.model)
+
+ obj = self.clone()
+
+ # Remove any aggregates marked for reduction from the subquery
+ # and move them to the outer AggregateQuery.
+ for alias, aggregate in self.aggregate_select.items():
+ if aggregate.is_summary:
+ query.aggregate_select[alias] = aggregate
+ del obj.aggregate_select[alias]
+
+ try:
+ query.add_subquery(obj, using)
+ except EmptyResultSet:
+ return dict(
+ (alias, None)
+ for alias in query.aggregate_select
+ )
+ else:
+ query = self
+ self.select = []
+ self.default_cols = False
+ self.extra = {}
+ self.remove_inherited_models()
+
+ query.clear_ordering(True)
+ query.clear_limits()
+ query.select_for_update = False
+ query.select_related = False
+ query.related_select_cols = []
+
+ result = query.get_compiler(using).execute_sql(SINGLE)
+ if result is None:
+ result = [None for q in query.aggregate_select.items()]
+
+ return dict([
+ (alias, self.resolve_aggregate(val, aggregate, connection=connections[using]))
+ for (alias, aggregate), val
+ in zip(query.aggregate_select.items(), result)
+ ])
+
+ def get_count(self, using):
+ """
+ Performs a COUNT() query using the current filter constraints.
+ """
+ obj = self.clone()
+ if len(self.select) > 1 or self.aggregate_select or (self.distinct and self.distinct_fields):
+ # If a select clause exists, then the query has already started to
+ # specify the columns that are to be returned.
+ # In this case, we need to use a subquery to evaluate the count.
+ from django.db.models.sql.subqueries import AggregateQuery
+ subquery = obj
+ subquery.clear_ordering(True)
+ subquery.clear_limits()
+
+ obj = AggregateQuery(obj.model)
+ try:
+ obj.add_subquery(subquery, using=using)
+ except EmptyResultSet:
+ # add_subquery evaluates the query, if it's an EmptyResultSet
+ # then there are can be no results, and therefore there the
+ # count is obviously 0
+ return 0
+
+ obj.add_count_column()
+ number = obj.get_aggregation(using=using)[None]
+
+ # Apply offset and limit constraints manually, since using LIMIT/OFFSET
+ # in SQL (in variants that provide them) doesn't change the COUNT
+ # output.
+ number = max(0, number - self.low_mark)
+ if self.high_mark is not None:
+ number = min(number, self.high_mark - self.low_mark)
+
+ return number
+
+ def has_results(self, using):
+ q = self.clone()
+ q.clear_select_clause()
+ q.add_extra({'a': 1}, None, None, None, None, None)
+ q.set_extra_mask(['a'])
+ q.clear_ordering(True)
+ q.set_limits(high=1)
+ compiler = q.get_compiler(using=using)
+ return bool(compiler.execute_sql(SINGLE))
+
+ def combine(self, rhs, connector):
+ """
+ Merge the 'rhs' query into the current one (with any 'rhs' effects
+ being applied *after* (that is, "to the right of") anything in the
+ current query. 'rhs' is not modified during a call to this function.
+
+ The 'connector' parameter describes how to connect filters from the
+ 'rhs' query.
+ """
+ assert self.model == rhs.model, \
+ "Cannot combine queries on two different base models."
+ assert self.can_filter(), \
+ "Cannot combine queries once a slice has been taken."
+ assert self.distinct == rhs.distinct, \
+ "Cannot combine a unique query with a non-unique query."
+ assert self.distinct_fields == rhs.distinct_fields, \
+ "Cannot combine queries with different distinct fields."
+
+ self.remove_inherited_models()
+ # Work out how to relabel the rhs aliases, if necessary.
+ change_map = {}
+ conjunction = (connector == AND)
+
+ # Determine which existing joins can be reused. When combining the
+ # query with AND we must recreate all joins for m2m filters. When
+ # combining with OR we can reuse joins. The reason is that in AND
+ # case a single row can't fulfill a condition like:
+ # revrel__col=1 & revrel__col=2
+ # But, there might be two different related rows matching this
+ # condition. In OR case a single True is enough, so single row is
+ # enough, too.
+ #
+ # Note that we will be creating duplicate joins for non-m2m joins in
+ # the AND case. The results will be correct but this creates too many
+ # joins. This is something that could be fixed later on.
+ reuse = set() if conjunction else set(self.tables)
+ # Base table must be present in the query - this is the same
+ # table on both sides.
+ self.get_initial_alias()
+ # Now, add the joins from rhs query into the new query (skipping base
+ # table).
+ for alias in rhs.tables[1:]:
+ table, _, join_type, lhs, join_cols, nullable, join_field = rhs.alias_map[alias]
+ promote = (join_type == self.LOUTER)
+ # If the left side of the join was already relabeled, use the
+ # updated alias.
+ lhs = change_map.get(lhs, lhs)
+ new_alias = self.join(
+ (lhs, table, join_cols), reuse=reuse,
+ outer_if_first=not conjunction, nullable=nullable,
+ join_field=join_field)
+ if promote:
+ self.promote_joins([new_alias])
+ # We can't reuse the same join again in the query. If we have two
+ # distinct joins for the same connection in rhs query, then the
+ # combined query must have two joins, too.
+ reuse.discard(new_alias)
+ change_map[alias] = new_alias
+ if not rhs.alias_refcount[alias]:
+ # The alias was unused in the rhs query. Unref it so that it
+ # will be unused in the new query, too. We have to add and
+ # unref the alias so that join promotion has information of
+ # the join type for the unused alias.
+ self.unref_alias(new_alias)
+
+ # So that we don't exclude valid results in an OR query combination,
+ # all joins exclusive to either the lhs or the rhs must be converted
+ # to an outer join. RHS joins were already set to outer joins above,
+ # so check which joins were used only in the lhs query.
+ if not conjunction:
+ rhs_used_joins = set(change_map.values())
+ to_promote = [alias for alias in self.tables
+ if alias not in rhs_used_joins]
+ self.promote_joins(to_promote, True)
+
+ # Now relabel a copy of the rhs where-clause and add it to the current
+ # one.
+ if rhs.where:
+ w = rhs.where.clone()
+ w.relabel_aliases(change_map)
+ if not self.where:
+ # Since 'self' matches everything, add an explicit "include
+ # everything" where-constraint so that connections between the
+ # where clauses won't exclude valid results.
+ self.where.add(EverythingNode(), AND)
+ elif self.where:
+ # rhs has an empty where clause.
+ w = self.where_class()
+ w.add(EverythingNode(), AND)
+ else:
+ w = self.where_class()
+ self.where.add(w, connector)
+
+ # Selection columns and extra extensions are those provided by 'rhs'.
+ self.select = []
+ for col, field in rhs.select:
+ if isinstance(col, (list, tuple)):
+ new_col = change_map.get(col[0], col[0]), col[1]
+ self.select.append(SelectInfo(new_col, field))
+ else:
+ new_col = col.relabeled_clone(change_map)
+ self.select.append(SelectInfo(new_col, field))
+
+ if connector == OR:
+ # It would be nice to be able to handle this, but the queries don't
+ # really make sense (or return consistent value sets). Not worth
+ # the extra complexity when you can write a real query instead.
+ if self.extra and rhs.extra:
+ raise ValueError("When merging querysets using 'or', you "
+ "cannot have extra(select=...) on both sides.")
+ self.extra.update(rhs.extra)
+ extra_select_mask = set()
+ if self.extra_select_mask is not None:
+ extra_select_mask.update(self.extra_select_mask)
+ if rhs.extra_select_mask is not None:
+ extra_select_mask.update(rhs.extra_select_mask)
+ if extra_select_mask:
+ self.set_extra_mask(extra_select_mask)
+ self.extra_tables += rhs.extra_tables
+
+ # Ordering uses the 'rhs' ordering, unless it has none, in which case
+ # the current ordering is used.
+ self.order_by = rhs.order_by[:] if rhs.order_by else self.order_by
+ self.extra_order_by = rhs.extra_order_by or self.extra_order_by
+
+ def deferred_to_data(self, target, callback):
+ """
+ Converts the self.deferred_loading data structure to an alternate data
+ structure, describing the field that *will* be loaded. This is used to
+ compute the columns to select from the database and also by the
+ QuerySet class to work out which fields are being initialised on each
+ model. Models that have all their fields included aren't mentioned in
+ the result, only those that have field restrictions in place.
+
+ The "target" parameter is the instance that is populated (in place).
+ The "callback" is a function that is called whenever a (model, field)
+ pair need to be added to "target". It accepts three parameters:
+ "target", and the model and list of fields being added for that model.
+ """
+ field_names, defer = self.deferred_loading
+ if not field_names:
+ return
+ orig_opts = self.get_meta()
+ seen = {}
+ must_include = {orig_opts.concrete_model: set([orig_opts.pk])}
+ for field_name in field_names:
+ parts = field_name.split(LOOKUP_SEP)
+ cur_model = self.model
+ opts = orig_opts
+ for name in parts[:-1]:
+ old_model = cur_model
+ source = opts.get_field_by_name(name)[0]
+ if is_reverse_o2o(source):
+ cur_model = source.model
+ else:
+ cur_model = source.rel.to
+ opts = cur_model._meta
+ # Even if we're "just passing through" this model, we must add
+ # both the current model's pk and the related reference field
+ # (if it's not a reverse relation) to the things we select.
+ if not is_reverse_o2o(source):
+ must_include[old_model].add(source)
+ add_to_dict(must_include, cur_model, opts.pk)
+ field, model, _, _ = opts.get_field_by_name(parts[-1])
+ if model is None:
+ model = cur_model
+ if not is_reverse_o2o(field):
+ add_to_dict(seen, model, field)
+
+ if defer:
+ # We need to load all fields for each model, except those that
+ # appear in "seen" (for all models that appear in "seen"). The only
+ # slight complexity here is handling fields that exist on parent
+ # models.
+ workset = {}
+ for model, values in six.iteritems(seen):
+ for field, m in model._meta.get_fields_with_model():
+ if field in values:
+ continue
+ add_to_dict(workset, m or model, field)
+ for model, values in six.iteritems(must_include):
+ # If we haven't included a model in workset, we don't add the
+ # corresponding must_include fields for that model, since an
+ # empty set means "include all fields". That's why there's no
+ # "else" branch here.
+ if model in workset:
+ workset[model].update(values)
+ for model, values in six.iteritems(workset):
+ callback(target, model, values)
+ else:
+ for model, values in six.iteritems(must_include):
+ if model in seen:
+ seen[model].update(values)
+ else:
+ # As we've passed through this model, but not explicitly
+ # included any fields, we have to make sure it's mentioned
+ # so that only the "must include" fields are pulled in.
+ seen[model] = values
+ # Now ensure that every model in the inheritance chain is mentioned
+ # in the parent list. Again, it must be mentioned to ensure that
+ # only "must include" fields are pulled in.
+ for model in orig_opts.get_parent_list():
+ if model not in seen:
+ seen[model] = set()
+ for model, values in six.iteritems(seen):
+ callback(target, model, values)
+
+
+ def deferred_to_columns_cb(self, target, model, fields):
+ """
+ Callback used by deferred_to_columns(). The "target" parameter should
+ be a set instance.
+ """
+ table = model._meta.db_table
+ if table not in target:
+ target[table] = set()
+ for field in fields:
+ target[table].add(field.column)
+
+
+ def table_alias(self, table_name, create=False):
+ """
+ Returns a table alias for the given table_name and whether this is a
+ new alias or not.
+
+ If 'create' is true, a new alias is always created. Otherwise, the
+ most recently created alias for the table (if one exists) is reused.
+ """
+ current = self.table_map.get(table_name)
+ if not create and current:
+ alias = current[0]
+ self.alias_refcount[alias] += 1
+ return alias, False
+
+ # Create a new alias for this table.
+ if current:
+ alias = '%s%d' % (self.alias_prefix, len(self.alias_map) + 1)
+ current.append(alias)
+ else:
+ # The first occurence of a table uses the table name directly.
+ alias = table_name
+ self.table_map[alias] = [alias]
+ self.alias_refcount[alias] = 1
+ self.tables.append(alias)
+ return alias, True
+
+ def ref_alias(self, alias):
+ """ Increases the reference count for this alias. """
+ self.alias_refcount[alias] += 1
+
+ def unref_alias(self, alias, amount=1):
+ """ Decreases the reference count for this alias. """
+ self.alias_refcount[alias] -= amount
+
+ def promote_joins(self, aliases, unconditional=False):
+ """
+ Promotes recursively the join type of given aliases and its children to
+ an outer join. If 'unconditional' is False, the join is only promoted if
+ it is nullable or the parent join is an outer join.
+
+ Note about join promotion: When promoting any alias, we make sure all
+ joins which start from that alias are promoted, too. When adding a join
+ in join(), we make sure any join added to already existing LOUTER join
+ is generated as LOUTER. This ensures we don't ever have broken join
+ chains which contain first a LOUTER join, then an INNER JOIN, that is
+ this kind of join should never be generated: a LOUTER b INNER c. The
+ reason for avoiding this type of join chain is that the INNER after
+ the LOUTER will effectively remove any effect the LOUTER had.
+ """
+ aliases = list(aliases)
+ while aliases:
+ alias = aliases.pop(0)
+ if self.alias_map[alias].join_cols[0][1] is None:
+ # This is the base table (first FROM entry) - this table
+ # isn't really joined at all in the query, so we should not
+ # alter its join type.
+ continue
+ parent_alias = self.alias_map[alias].lhs_alias
+ parent_louter = (parent_alias
+ and self.alias_map[parent_alias].join_type == self.LOUTER)
+ already_louter = self.alias_map[alias].join_type == self.LOUTER
+ if ((unconditional or self.alias_map[alias].nullable
+ or parent_louter) and not already_louter):
+ data = self.alias_map[alias]._replace(join_type=self.LOUTER)
+ self.alias_map[alias] = data
+ # Join type of 'alias' changed, so re-examine all aliases that
+ # refer to this one.
+ aliases.extend(
+ join for join in self.alias_map.keys()
+ if (self.alias_map[join].lhs_alias == alias
+ and join not in aliases))
+
+ def reset_refcounts(self, to_counts):
+ """
+ This method will reset reference counts for aliases so that they match
+ the value passed in :param to_counts:.
+ """
+ for alias, cur_refcount in self.alias_refcount.copy().items():
+ unref_amount = cur_refcount - to_counts.get(alias, 0)
+ self.unref_alias(alias, unref_amount)
+
+ def promote_disjunction(self, aliases_before, alias_usage_counts,
+ num_childs):
+ """
+ This method is to be used for promoting joins in ORed filters.
+
+ The principle for promotion is: any alias which is used (it is in
+ alias_usage_counts), is not used by every child of the ORed filter,
+ and isn't pre-existing needs to be promoted to LOUTER join.
+ """
+ for alias, use_count in alias_usage_counts.items():
+ if use_count < num_childs and alias not in aliases_before:
+ self.promote_joins([alias])
+
+ def change_aliases(self, change_map):
+ """
+ Changes the aliases in change_map (which maps old-alias -> new-alias),
+ relabelling any references to them in select columns and the where
+ clause.
+ """
+ assert set(change_map.keys()).intersection(set(change_map.values())) == set()
+
+ def relabel_column(col):
+ if isinstance(col, (list, tuple)):
+ old_alias = col[0]
+ return (change_map.get(old_alias, old_alias), col[1])
+ else:
+ return col.relabeled_clone(change_map)
+ # 1. Update references in "select" (normal columns plus aliases),
+ # "group by", "where" and "having".
+ self.where.relabel_aliases(change_map)
+ self.having.relabel_aliases(change_map)
+ if self.group_by:
+ self.group_by = [relabel_column(col) for col in self.group_by]
+ self.select = [SelectInfo(relabel_column(s.col), s.field)
+ for s in self.select]
+ self.aggregates = SortedDict(
+ (key, relabel_column(col)) for key, col in self.aggregates.items())
+
+ # 2. Rename the alias in the internal table/alias datastructures.
+ for ident, aliases in self.join_map.items():
+ del self.join_map[ident]
+ aliases = tuple([change_map.get(a, a) for a in aliases])
+ ident = (change_map.get(ident[0], ident[0]),) + ident[1:]
+ self.join_map[ident] = aliases
+ for old_alias, new_alias in six.iteritems(change_map):
+ alias_data = self.alias_map[old_alias]
+ alias_data = alias_data._replace(rhs_alias=new_alias)
+ self.alias_refcount[new_alias] = self.alias_refcount[old_alias]
+ del self.alias_refcount[old_alias]
+ self.alias_map[new_alias] = alias_data
+ del self.alias_map[old_alias]
+
+ table_aliases = self.table_map[alias_data.table_name]
+ for pos, alias in enumerate(table_aliases):
+ if alias == old_alias:
+ table_aliases[pos] = new_alias
+ break
+ for pos, alias in enumerate(self.tables):
+ if alias == old_alias:
+ self.tables[pos] = new_alias
+ break
+ for key, alias in self.included_inherited_models.items():
+ if alias in change_map:
+ self.included_inherited_models[key] = change_map[alias]
+
+ # 3. Update any joins that refer to the old alias.
+ for alias, data in six.iteritems(self.alias_map):
+ lhs = data.lhs_alias
+ if lhs in change_map:
+ data = data._replace(lhs_alias=change_map[lhs])
+ self.alias_map[alias] = data
+
+ def bump_prefix(self, exceptions=()):
+ """
+ Changes the alias prefix to the next letter in the alphabet and
+ relabels all the aliases. Even tables that previously had no alias will
+ get an alias after this call (it's mostly used for nested queries and
+ the outer query will already be using the non-aliased table name).
+
+ Subclasses who create their own prefix should override this method to
+ produce a similar result (a new prefix and relabelled aliases).
+
+ The 'exceptions' parameter is a container that holds alias names which
+ should not be changed.
+ """
+ current = ord(self.alias_prefix)
+ assert current < ord('Z')
+ prefix = chr(current + 1)
+ self.alias_prefix = prefix
+ change_map = SortedDict()
+ for pos, alias in enumerate(self.tables):
+ if alias in exceptions:
+ continue
+ new_alias = '%s%d' % (prefix, pos)
+ change_map[alias] = new_alias
+ self.tables[pos] = new_alias
+ self.change_aliases(change_map)
+
+ def get_initial_alias(self):
+ """
+ Returns the first alias for this query, after increasing its reference
+ count.
+ """
+ if self.tables:
+ alias = self.tables[0]
+ self.ref_alias(alias)
+ else:
+ alias = self.join((None, self.get_meta().db_table, None))
+ return alias
+
+ def count_active_tables(self):
+ """
+ Returns the number of tables in this query with a non-zero reference
+ count. Note that after execution, the reference counts are zeroed, so
+ tables added in compiler will not be seen by this method.
+ """
+ return len([1 for count in self.alias_refcount.values() if count])
+
+ def join(self, connection, reuse=None, outer_if_first=False,
+ nullable=False, join_field=None):
+ """
+ Returns an alias for the join in 'connection', either reusing an
+ existing alias for that join or creating a new one. 'connection' is a
+ tuple (lhs, table, join_cols) where 'lhs' is either an existing
+ table alias or a table name. 'join_cols' is a tuple of tuples containing
+ columns to join on ((l_id1, r_id1), (l_id2, r_id2)). The join corresponds
+ to the SQL equivalent of::
+
+ lhs.l_id1 = table.r_id1 AND lhs.l_id2 = table.r_id2
+
+ The 'reuse' parameter can be either None which means all joins
+ (matching the connection) are reusable, or it can be a set containing
+ the aliases that can be reused.
+
+ If 'outer_if_first' is True and a new join is created, it will have the
+ LOUTER join type.
+
+ A join is always created as LOUTER if the lhs alias is LOUTER to make
+ sure we do not generate chains like t1 LOUTER t2 INNER t3.
+
+ If 'nullable' is True, the join can potentially involve NULL values and
+ is a candidate for promotion (to "left outer") when combining querysets.
+
+ The 'join_field' is the field we are joining along (if any).
+ """
+ lhs, table, join_cols = connection
+ assert lhs is None or join_field is not None
+ existing = self.join_map.get(connection, ())
+ if reuse is None:
+ reuse = existing
+ else:
+ reuse = [a for a in existing if a in reuse]
+ for alias in reuse:
+ if join_field and self.alias_map[alias].join_field != join_field:
+ # The join_map doesn't contain join_field (mainly because
+ # fields in Query structs are problematic in pickling), so
+ # check that the existing join is created using the same
+ # join_field used for the under work join.
+ continue
+ self.ref_alias(alias)
+ return alias
+
+ # No reuse is possible, so we need a new alias.
+ alias, _ = self.table_alias(table, True)
+ if not lhs:
+ # Not all tables need to be joined to anything. No join type
+ # means the later columns are ignored.
+ join_type = None
+ elif outer_if_first or self.alias_map[lhs].join_type == self.LOUTER:
+ # We need to use LOUTER join if asked by outer_if_first or if the
+ # LHS table is left-joined in the query.
+ join_type = self.LOUTER
+ else:
+ join_type = self.INNER
+ join = JoinInfo(table, alias, join_type, lhs, join_cols or ((None, None),), nullable,
+ join_field)
+ self.alias_map[alias] = join
+ if connection in self.join_map:
+ self.join_map[connection] += (alias,)
+ else:
+ self.join_map[connection] = (alias,)
+ return alias
+
+ def setup_inherited_models(self):
+ """
+ If the model that is the basis for this QuerySet inherits other models,
+ we need to ensure that those other models have their tables included in
+ the query.
+
+ We do this as a separate step so that subclasses know which
+ tables are going to be active in the query, without needing to compute
+ all the select columns (this method is called from pre_sql_setup(),
+ whereas column determination is a later part, and side-effect, of
+ as_sql()).
+ """
+ opts = self.get_meta()
+ root_alias = self.tables[0]
+ seen = {None: root_alias}
+
+ for field, model in opts.get_fields_with_model():
+ if model not in seen:
+ self.join_parent_model(opts, model, root_alias, seen)
+ self.included_inherited_models = seen
+
+ def join_parent_model(self, opts, model, alias, seen):
+ """
+ Makes sure the given 'model' is joined in the query. If 'model' isn't
+ a parent of 'opts' or if it is None this method is a no-op.
+
+ The 'alias' is the root alias for starting the join, 'seen' is a dict
+ of model -> alias of existing joins. It must also contain a mapping
+ of None -> some alias. This will be returned in the no-op case.
+ """
+ if model in seen:
+ return seen[model]
+ chain = opts.get_base_chain(model)
+ if chain is None:
+ return alias
+ curr_opts = opts
+ for int_model in chain:
+ if int_model in seen:
+ return seen[int_model]
+ # Proxy model have elements in base chain
+ # with no parents, assign the new options
+ # object and skip to the next base in that
+ # case
+ if not curr_opts.parents[int_model]:
+ curr_opts = int_model._meta
+ continue
+ link_field = curr_opts.get_ancestor_link(int_model)
+ _, _, _, joins, _ = self.setup_joins(
+ [link_field.name], curr_opts, alias)
+ curr_opts = int_model._meta
+ alias = seen[int_model] = joins[-1]
+ return alias or seen[None]
+
+ def remove_inherited_models(self):
+ """
+ Undoes the effects of setup_inherited_models(). Should be called
+ whenever select columns (self.select) are set explicitly.
+ """
+ for key, alias in self.included_inherited_models.items():
+ if key:
+ self.unref_alias(alias)
+ self.included_inherited_models = {}
+
+
+ def add_aggregate(self, aggregate, model, alias, is_summary):
+ """
+ Adds a single aggregate expression to the Query
+ """
+ opts = model._meta
+ field_list = aggregate.lookup.split(LOOKUP_SEP)
+ if len(field_list) == 1 and aggregate.lookup in self.aggregates:
+ # Aggregate is over an annotation
+ field_name = field_list[0]
+ col = field_name
+ source = self.aggregates[field_name]
+ if not is_summary:
+ raise FieldError("Cannot compute %s('%s'): '%s' is an aggregate" % (
+ aggregate.name, field_name, field_name))
+ elif ((len(field_list) > 1) or
+ (field_list[0] not in [i.name for i in opts.fields]) or
+ self.group_by is None or
+ not is_summary):
+ # If:
+ # - the field descriptor has more than one part (foo__bar), or
+ # - the field descriptor is referencing an m2m/m2o field, or
+ # - this is a reference to a model field (possibly inherited), or
+ # - this is an annotation over a model field
+ # then we need to explore the joins that are required.
+
+ field, sources, opts, join_list, path = self.setup_joins(
+ field_list, opts, self.get_initial_alias())
+
+ # Process the join chain to see if it can be trimmed
+ targets, _, join_list = self.trim_joins(sources, join_list, path)
+
+ # If the aggregate references a model or field that requires a join,
+ # those joins must be LEFT OUTER - empty join rows must be returned
+ # in order for zeros to be returned for those aggregates.
+ self.promote_joins(join_list, True)
+
+ col = targets[0].column
+ source = sources[0]
+ col = (join_list[-1], col)
+ else:
+ # The simplest cases. No joins required -
+ # just reference the provided column alias.
+ field_name = field_list[0]
+ source = opts.get_field(field_name)
+ col = field_name
+
+ # Add the aggregate to the query
+ aggregate.add_to_query(self, alias, col=col, source=source, is_summary=is_summary)
+
+ def build_filter(self, filter_expr, branch_negated=False, current_negated=False,
+ can_reuse=None):
+ """
+ Builds a WhereNode for a single filter clause, but doesn't add it
+ to this Query. Query.add_q() will then add this filter to the where
+ or having Node.
+
+ The 'branch_negated' tells us if the current branch contains any
+ negations. This will be used to determine if subqueries are needed.
+
+ The 'current_negated' is used to determine if the current filter is
+ negated or not and this will be used to determine if IS NULL filtering
+ is needed.
+
+ The difference between current_netageted and branch_negated is that
+ branch_negated is set on first negation, but current_negated is
+ flipped for each negation.
+
+ Note that add_filter will not do any negating itself, that is done
+ upper in the code by add_q().
+
+ The 'can_reuse' is a set of reusable joins for multijoins.
+
+ The method will create a filter clause that can be added to the current
+ query. However, if the filter isn't added to the query then the caller
+ is responsible for unreffing the joins used.
+ """
+ arg, value = filter_expr
+ parts = arg.split(LOOKUP_SEP)
+ if not parts:
+ raise FieldError("Cannot parse keyword query %r" % arg)
+
+ # Work out the lookup type and remove it from the end of 'parts',
+ # if necessary.
+ lookup_type = 'exact' # Default lookup type
+ num_parts = len(parts)
+ if (len(parts) > 1 and parts[-1] in self.query_terms
+ and arg not in self.aggregates):
+ # Traverse the lookup query to distinguish related fields from
+ # lookup types.
+ lookup_model = self.model
+ for counter, field_name in enumerate(parts):
+ try:
+ lookup_field = lookup_model._meta.get_field(field_name)
+ except FieldDoesNotExist:
+ # Not a field. Bail out.
+ lookup_type = parts.pop()
+ break
+ # Unless we're at the end of the list of lookups, let's attempt
+ # to continue traversing relations.
+ if (counter + 1) < num_parts:
+ try:
+ lookup_model = lookup_field.rel.to
+ except AttributeError:
+ # Not a related field. Bail out.
+ lookup_type = parts.pop()
+ break
+
+ clause = self.where_class()
+ # Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all
+ # uses of None as a query value.
+ if value is None:
+ if lookup_type != 'exact':
+ raise ValueError("Cannot use None as a query value")
+ lookup_type = 'isnull'
+ value = True
+ elif callable(value):
+ value = value()
+ elif isinstance(value, ExpressionNode):
+ # If value is a query expression, evaluate it
+ value = SQLEvaluator(value, self, reuse=can_reuse)
+ # For Oracle '' is equivalent to null. The check needs to be done
+ # at this stage because join promotion can't be done at compiler
+ # stage. Using DEFAULT_DB_ALIAS isn't nice, but it is the best we
+ # can do here. Similar thing is done in is_nullable(), too.
+ if (connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls and
+ lookup_type == 'exact' and value == ''):
+ value = True
+ lookup_type = 'isnull'
+
+ for alias, aggregate in self.aggregates.items():
+ if alias in (parts[0], LOOKUP_SEP.join(parts)):
+ clause.add((aggregate, lookup_type, value), AND)
+ return clause
+
+ opts = self.get_meta()
+ alias = self.get_initial_alias()
+ allow_many = not branch_negated
+
+ try:
+ field, sources, opts, join_list, path = self.setup_joins(
+ parts, opts, alias, can_reuse, allow_many,
+ allow_explicit_fk=True)
+ if can_reuse is not None:
+ can_reuse.update(join_list)
+ except MultiJoin as e:
+ return self.split_exclude(filter_expr, LOOKUP_SEP.join(parts[:e.level]),
+ can_reuse, e.names_with_path)
+
+ if (lookup_type == 'isnull' and value is True and not current_negated and
+ len(join_list) > 1):
+ # If the comparison is against NULL, we may need to use some left
+ # outer joins when creating the join chain. This is only done when
+ # needed, as it's less efficient at the database level.
+ self.promote_joins(join_list)
+
+ # Process the join list to see if we can remove any inner joins from
+ # the far end (fewer tables in a query is better). Note that join
+ # promotion must happen before join trimming to have the join type
+ # information available when reusing joins.
+ targets, alias, join_list = self.trim_joins(sources, join_list, path)
+
+ if hasattr(field, 'get_lookup_constraint'):
+ constraint = field.get_lookup_constraint(self.where_class, alias, targets, sources,
+ lookup_type, value)
+ else:
+ constraint = (Constraint(alias, targets[0].column, field), lookup_type, value)
+ clause.add(constraint, AND)
+ if current_negated and (lookup_type != 'isnull' or value is False):
+ self.promote_joins(join_list)
+ if (lookup_type != 'isnull' and (
+ self.is_nullable(targets[0]) or
+ self.alias_map[join_list[-1]].join_type == self.LOUTER)):
+ # The condition added here will be SQL like this:
+ # NOT (col IS NOT NULL), where the first NOT is added in
+ # upper layers of code. The reason for addition is that if col
+ # is null, then col != someval will result in SQL "unknown"
+ # which isn't the same as in Python. The Python None handling
+ # is wanted, and it can be gotten by
+ # (col IS NULL OR col != someval)
+ # <=>
+ # NOT (col IS NOT NULL AND col = someval).
+ clause.add((Constraint(alias, targets[0].column, None), 'isnull', False), AND)
+ return clause
+
+ def add_filter(self, filter_clause):
+ self.where.add(self.build_filter(filter_clause), 'AND')
+
+ def need_having(self, obj):
+ """
+ Returns whether or not all elements of this q_object need to be put
+ together in the HAVING clause.
+ """
+ if not isinstance(obj, Node):
+ return (refs_aggregate(obj[0].split(LOOKUP_SEP), self.aggregates)
+ or (hasattr(obj[1], 'contains_aggregate')
+ and obj[1].contains_aggregate(self.aggregates)))
+ return any(self.need_having(c) for c in obj.children)
+
+ def split_having_parts(self, q_object, negated=False):
+ """
+ Returns a list of q_objects which need to go into the having clause
+ instead of the where clause. Removes the splitted out nodes from the
+ given q_object. Note that the q_object is altered, so cloning it is
+ needed.
+ """
+ having_parts = []
+ for c in q_object.children[:]:
+ # When constucting the having nodes we need to take care to
+ # preserve the negation status from the upper parts of the tree
+ if isinstance(c, Node):
+ # For each negated child, flip the in_negated flag.
+ in_negated = c.negated ^ negated
+ if c.connector == OR and self.need_having(c):
+ # A subtree starting from OR clause must go into having in
+ # whole if any part of that tree references an aggregate.
+ q_object.children.remove(c)
+ having_parts.append(c)
+ c.negated = in_negated
+ else:
+ having_parts.extend(
+ self.split_having_parts(c, in_negated)[1])
+ elif self.need_having(c):
+ q_object.children.remove(c)
+ new_q = self.where_class(children=[c], negated=negated)
+ having_parts.append(new_q)
+ return q_object, having_parts
+
+ def add_q(self, q_object):
+ """
+ A preprocessor for the internal _add_q(). Responsible for
+ splitting the given q_object into where and having parts and
+ setting up some internal variables.
+ """
+ if not self.need_having(q_object):
+ where_part, having_parts = q_object, []
+ else:
+ where_part, having_parts = self.split_having_parts(
+ q_object.clone(), q_object.negated)
+ used_aliases = self.used_aliases
+ clause = self._add_q(where_part, used_aliases)
+ self.where.add(clause, AND)
+ for hp in having_parts:
+ clause = self._add_q(hp, used_aliases)
+ self.having.add(clause, AND)
+ if self.filter_is_sticky:
+ self.used_aliases = used_aliases
+
+ def _add_q(self, q_object, used_aliases, branch_negated=False,
+ current_negated=False):
+ """
+ Adds a Q-object to the current filter.
+ """
+ connector = q_object.connector
+ current_negated = current_negated ^ q_object.negated
+ branch_negated = branch_negated or q_object.negated
+ target_clause = self.where_class(connector=connector,
+ negated=q_object.negated)
+ # Treat case NOT (a AND b) like case ((NOT a) OR (NOT b)) for join
+ # promotion. See ticket #21748.
+ effective_connector = connector
+ if current_negated:
+ effective_connector = OR if effective_connector == AND else AND
+ if effective_connector == OR:
+ alias_usage_counts = dict()
+ aliases_before = set(self.tables)
+ for child in q_object.children:
+ if effective_connector == OR:
+ refcounts_before = self.alias_refcount.copy()
+ if isinstance(child, Node):
+ child_clause = self._add_q(
+ child, used_aliases, branch_negated,
+ current_negated)
+ else:
+ child_clause = self.build_filter(
+ child, can_reuse=used_aliases, branch_negated=branch_negated,
+ current_negated=current_negated)
+ target_clause.add(child_clause, connector)
+ if effective_connector == OR:
+ used = alias_diff(refcounts_before, self.alias_refcount)
+ for alias in used:
+ alias_usage_counts[alias] = alias_usage_counts.get(alias, 0) + 1
+ if effective_connector == OR:
+ self.promote_disjunction(aliases_before, alias_usage_counts,
+ len(q_object.children))
+ return target_clause
+
+ def names_to_path(self, names, opts, allow_many, allow_explicit_fk):
+ """
+ Walks the names path and turns them PathInfo tuples. Note that a
+ single name in 'names' can generate multiple PathInfos (m2m for
+ example).
+
+ 'names' is the path of names to travle, 'opts' is the model Options we
+ start the name resolving from, 'allow_many' and 'allow_explicit_fk'
+ are as for setup_joins().
+
+ Returns a list of PathInfo tuples. In addition returns the final field
+ (the last used join field), and target (which is a field guaranteed to
+ contain the same value as the final field).
+ """
+ path, names_with_path = [], []
+ for pos, name in enumerate(names):
+ cur_names_with_path = (name, [])
+ if name == 'pk':
+ name = opts.pk.name
+ try:
+ field, model, direct, m2m = opts.get_field_by_name(name)
+ except FieldDoesNotExist:
+ for f in opts.fields:
+ if allow_explicit_fk and name == f.attname:
+ # XXX: A hack to allow foo_id to work in values() for
+ # backwards compatibility purposes. If we dropped that
+ # feature, this could be removed.
+ field, model, direct, m2m = opts.get_field_by_name(f.name)
+ break
+ else:
+ available = opts.get_all_field_names() + list(self.aggregate_select)
+ raise FieldError("Cannot resolve keyword %r into field. "
+ "Choices are: %s" % (name, ", ".join(available)))
+ # Check if we need any joins for concrete inheritance cases (the
+ # field lives in parent, but we are currently in one of its
+ # children)
+ if model:
+ # The field lives on a base class of the current model.
+ # Skip the chain of proxy to the concrete proxied model
+ proxied_model = opts.concrete_model
+
+ for int_model in opts.get_base_chain(model):
+ if int_model is proxied_model:
+ opts = int_model._meta
+ else:
+ final_field = opts.parents[int_model]
+ targets = (final_field.rel.get_related_field(),)
+ opts = int_model._meta
+ path.append(PathInfo(final_field.model._meta, opts, targets, final_field, False, True))
+ cur_names_with_path[1].append(PathInfo(final_field.model._meta, opts, targets, final_field, False, True))
+ if hasattr(field, 'get_path_info'):
+ pathinfos = field.get_path_info()
+ if not allow_many:
+ for inner_pos, p in enumerate(pathinfos):
+ if p.m2m:
+ cur_names_with_path[1].extend(pathinfos[0:inner_pos + 1])
+ names_with_path.append(cur_names_with_path)
+ raise MultiJoin(pos + 1, names_with_path)
+ last = pathinfos[-1]
+ path.extend(pathinfos)
+ final_field = last.join_field
+ opts = last.to_opts
+ targets = last.target_fields
+ cur_names_with_path[1].extend(pathinfos)
+ names_with_path.append(cur_names_with_path)
+ else:
+ # Local non-relational field.
+ final_field = field
+ targets = (field,)
+ break
+
+ if pos != len(names) - 1:
+ if pos == len(names) - 2:
+ raise FieldError(
+ "Join on field %r not permitted. Did you misspell %r for "
+ "the lookup type?" % (name, names[pos + 1]))
+ else:
+ raise FieldError("Join on field %r not permitted." % name)
+ return path, final_field, targets
+
+ def setup_joins(self, names, opts, alias, can_reuse=None, allow_many=True,
+ allow_explicit_fk=False, outer_if_first=False):
+ """
+ Compute the necessary table joins for the passage through the fields
+ given in 'names'. 'opts' is the Options class for the current model
+ (which gives the table we are starting from), 'alias' is the alias for
+ the table to start the joining from.
+
+ The 'can_reuse' defines the reverse foreign key joins we can reuse. It
+ can be None in which case all joins are reusable or a set of aliases
+ that can be reused. Note that non-reverse foreign keys are always
+ reusable when using setup_joins().
+
+ If 'allow_many' is False, then any reverse foreign key seen will
+ generate a MultiJoin exception.
+
+ The 'allow_explicit_fk' controls if field.attname is allowed in the
+ lookups.
+
+ Returns the final field involved in the joins, the target field (used
+ for any 'where' constraint), the final 'opts' value, the joins and the
+ field path travelled to generate the joins.
+
+ The target field is the field containing the concrete value. Final
+ field can be something different, for example foreign key pointing to
+ that value. Final field is needed for example in some value
+ conversions (convert 'obj' in fk__id=obj to pk val using the foreign
+ key field for example).
+ """
+ joins = [alias]
+ # First, generate the path for the names
+ path, final_field, targets = self.names_to_path(
+ names, opts, allow_many, allow_explicit_fk)
+ # Then, add the path to the query's joins. Note that we can't trim
+ # joins at this stage - we will need the information about join type
+ # of the trimmed joins.
+ for pos, join in enumerate(path):
+ opts = join.to_opts
+ if join.direct:
+ nullable = self.is_nullable(join.join_field)
+ else:
+ nullable = True
+ connection = alias, opts.db_table, join.join_field.get_joining_columns()
+ reuse = can_reuse if join.m2m else None
+ alias = self.join(
+ connection, reuse=reuse, nullable=nullable, join_field=join.join_field,
+ outer_if_first=outer_if_first)
+ joins.append(alias)
+ if hasattr(final_field, 'field'):
+ final_field = final_field.field
+ return final_field, targets, opts, joins, path
+
+ def trim_joins(self, targets, joins, path):
+ """
+ The 'target' parameter is the final field being joined to, 'joins'
+ is the full list of join aliases. The 'path' contain the PathInfos
+ used to create the joins.
+
+ Returns the final target field and table alias and the new active
+ joins.
+
+ We will always trim any direct join if we have the target column
+ available already in the previous table. Reverse joins can't be
+ trimmed as we don't know if there is anything on the other side of
+ the join.
+ """
+ for pos, info in enumerate(reversed(path)):
+ if len(joins) == 1 or not info.direct:
+ break
+ join_targets = set(t.column for t in info.join_field.foreign_related_fields)
+ cur_targets = set(t.column for t in targets)
+ if not cur_targets.issubset(join_targets):
+ break
+ targets = tuple(r[0] for r in info.join_field.related_fields if r[1].column in cur_targets)
+ self.unref_alias(joins.pop())
+ return targets, joins[-1], joins
+
+ def split_exclude(self, filter_expr, prefix, can_reuse, names_with_path):
+ """
+ When doing an exclude against any kind of N-to-many relation, we need
+ to use a subquery. This method constructs the nested query, given the
+ original exclude filter (filter_expr) and the portion up to the first
+ N-to-many relation field.
+
+ As an example we could have original filter ~Q(child__name='foo').
+ We would get here with filter_expr = child__name, prefix = child and
+ can_reuse is a set of joins usable for filters in the original query.
+
+ We will turn this into equivalent of:
+ WHERE NOT (pk IN (SELECT parent_id FROM thetable
+ WHERE name = 'foo' AND parent_id IS NOT NULL))
+
+ It might be worth it to consider using WHERE NOT EXISTS as that has
+ saner null handling, and is easier for the backend's optimizer to
+ handle.
+ """
+ # Generate the inner query.
+ query = Query(self.model)
+ query.where.add(query.build_filter(filter_expr), AND)
+ query.bump_prefix()
+ query.clear_ordering(True)
+ # Try to have as simple as possible subquery -> trim leading joins from
+ # the subquery.
+ trimmed_prefix, contains_louter = query.trim_start(names_with_path)
+ query.remove_inherited_models()
+
+ # Add extra check to make sure the selected field will not be null
+ # since we are adding a IN <subquery> clause. This prevents the
+ # database from tripping over IN (...,NULL,...) selects and returning
+ # nothing
+ if self.is_nullable(query.select[0].field):
+ alias, col = query.select[0].col
+ query.where.add((Constraint(alias, col, query.select[0].field), 'isnull', False), AND)
+
+ condition = self.build_filter(
+ ('%s__in' % trimmed_prefix, query),
+ current_negated=True, branch_negated=True, can_reuse=can_reuse)
+ if contains_louter:
+ or_null_condition = self.build_filter(
+ ('%s__isnull' % trimmed_prefix, True),
+ current_negated=True, branch_negated=True, can_reuse=can_reuse)
+ condition.add(or_null_condition, OR)
+ # Note that the end result will be:
+ # (outercol NOT IN innerq AND outercol IS NOT NULL) OR outercol IS NULL.
+ # This might look crazy but due to how IN works, this seems to be
+ # correct. If the IS NOT NULL check is removed then outercol NOT
+ # IN will return UNKNOWN. If the IS NULL check is removed, then if
+ # outercol IS NULL we will not match the row.
+ return condition
+
+ def set_empty(self):
+ self.where = EmptyWhere()
+ self.having = EmptyWhere()
+
+ def is_empty(self):
+ return isinstance(self.where, EmptyWhere) or isinstance(self.having, EmptyWhere)
+
+ def set_limits(self, low=None, high=None):
+ """
+ Adjusts the limits on the rows retrieved. We use low/high to set these,
+ as it makes it more Pythonic to read and write. When the SQL query is
+ created, they are converted to the appropriate offset and limit values.
+
+ Any limits passed in here are applied relative to the existing
+ constraints. So low is added to the current low value and both will be
+ clamped to any existing high value.
+ """
+ if high is not None:
+ if self.high_mark is not None:
+ self.high_mark = min(self.high_mark, self.low_mark + high)
+ else:
+ self.high_mark = self.low_mark + high
+ if low is not None:
+ if self.high_mark is not None:
+ self.low_mark = min(self.high_mark, self.low_mark + low)
+ else:
+ self.low_mark = self.low_mark + low
+
+ def clear_limits(self):
+ """
+ Clears any existing limits.
+ """
+ self.low_mark, self.high_mark = 0, None
+
+ def can_filter(self):
+ """
+ Returns True if adding filters to this instance is still possible.
+
+ Typically, this means no limits or offsets have been put on the results.
+ """
+ return not self.low_mark and self.high_mark is None
+
+ def clear_select_clause(self):
+ """
+ Removes all fields from SELECT clause.
+ """
+ self.select = []
+ self.default_cols = False
+ self.select_related = False
+ self.set_extra_mask(())
+ self.set_aggregate_mask(())
+
+ def clear_select_fields(self):
+ """
+ Clears the list of fields to select (but not extra_select columns).
+ Some queryset types completely replace any existing list of select
+ columns.
+ """
+ self.select = []
+
+ def add_distinct_fields(self, *field_names):
+ """
+ Adds and resolves the given fields to the query's "distinct on" clause.
+ """
+ self.distinct_fields = field_names
+ self.distinct = True
+
+ def add_fields(self, field_names, allow_m2m=True):
+ """
+ Adds the given (model) fields to the select set. The field names are
+ added in the order specified.
+ """
+ alias = self.get_initial_alias()
+ opts = self.get_meta()
+
+ try:
+ for name in field_names:
+ field, targets, u2, joins, path = self.setup_joins(
+ name.split(LOOKUP_SEP), opts, alias, None, allow_m2m,
+ allow_explicit_fk=True, outer_if_first=True)
+
+ # Trim last join if possible
+ targets, final_alias, remaining_joins = self.trim_joins(targets, joins[-2:], path)
+ joins = joins[:-2] + remaining_joins
+
+ self.promote_joins(joins[1:])
+ for target in targets:
+ self.select.append(SelectInfo((final_alias, target.column), target))
+ except MultiJoin:
+ raise FieldError("Invalid field name: '%s'" % name)
+ except FieldError:
+ if LOOKUP_SEP in name:
+ # For lookups spanning over relationships, show the error
+ # from the model on which the lookup failed.
+ raise
+ else:
+ names = sorted(opts.get_all_field_names() + list(self.extra)
+ + list(self.aggregate_select))
+ raise FieldError("Cannot resolve keyword %r into field. "
+ "Choices are: %s" % (name, ", ".join(names)))
+ self.remove_inherited_models()
+
+ def add_ordering(self, *ordering):
+ """
+ Adds items from the 'ordering' sequence to the query's "order by"
+ clause. These items are either field names (not column names) --
+ possibly with a direction prefix ('-' or '?') -- or ordinals,
+ corresponding to column positions in the 'select' list.
+
+ If 'ordering' is empty, all ordering is cleared from the query.
+ """
+ errors = []
+ for item in ordering:
+ if not ORDER_PATTERN.match(item):
+ errors.append(item)
+ if errors:
+ raise FieldError('Invalid order_by arguments: %s' % errors)
+ if ordering:
+ self.order_by.extend(ordering)
+ else:
+ self.default_ordering = False
+
+ def clear_ordering(self, force_empty):
+ """
+ Removes any ordering settings. If 'force_empty' is True, there will be
+ no ordering in the resulting query (not even the model's default).
+ """
+ self.order_by = []
+ self.extra_order_by = ()
+ if force_empty:
+ self.default_ordering = False
+
+ def set_group_by(self):
+ """
+ Expands the GROUP BY clause required by the query.
+
+ This will usually be the set of all non-aggregate fields in the
+ return data. If the database backend supports grouping by the
+ primary key, and the query would be equivalent, the optimization
+ will be made automatically.
+ """
+ self.group_by = []
+
+ for col, _ in self.select:
+ self.group_by.append(col)
+
+ def add_count_column(self):
+ """
+ Converts the query to do count(...) or count(distinct(pk)) in order to
+ get its size.
+ """
+ if not self.distinct:
+ if not self.select:
+ count = self.aggregates_module.Count('*', is_summary=True)
+ else:
+ assert len(self.select) == 1, \
+ "Cannot add count col with multiple cols in 'select': %r" % self.select
+ count = self.aggregates_module.Count(self.select[0].col)
+ else:
+ opts = self.get_meta()
+ if not self.select:
+ count = self.aggregates_module.Count(
+ (self.join((None, opts.db_table, None)), opts.pk.column),
+ is_summary=True, distinct=True)
+ else:
+ # Because of SQL portability issues, multi-column, distinct
+ # counts need a sub-query -- see get_count() for details.
+ assert len(self.select) == 1, \
+ "Cannot add count col with multiple cols in 'select'."
+
+ count = self.aggregates_module.Count(self.select[0].col, distinct=True)
+ # Distinct handling is done in Count(), so don't do it at this
+ # level.
+ self.distinct = False
+
+ # Set only aggregate to be the count column.
+ # Clear out the select cache to reflect the new unmasked aggregates.
+ self.aggregates = {None: count}
+ self.set_aggregate_mask(None)
+ self.group_by = None
+
+ def add_select_related(self, fields):
+ """
+ Sets up the select_related data structure so that we only select
+ certain related models (as opposed to all models, when
+ self.select_related=True).
+ """
+ field_dict = {}
+ for field in fields:
+ d = field_dict
+ for part in field.split(LOOKUP_SEP):
+ d = d.setdefault(part, {})
+ self.select_related = field_dict
+ self.related_select_cols = []
+
+ def add_extra(self, select, select_params, where, params, tables, order_by):
+ """
+ Adds data to the various extra_* attributes for user-created additions
+ to the query.
+ """
+ if select:
+ # We need to pair any placeholder markers in the 'select'
+ # dictionary with their parameters in 'select_params' so that
+ # subsequent updates to the select dictionary also adjust the
+ # parameters appropriately.
+ select_pairs = SortedDict()
+ if select_params:
+ param_iter = iter(select_params)
+ else:
+ param_iter = iter([])
+ for name, entry in select.items():
+ entry = force_text(entry)
+ entry_params = []
+ pos = entry.find("%s")
+ while pos != -1:
+ entry_params.append(next(param_iter))
+ pos = entry.find("%s", pos + 2)
+ select_pairs[name] = (entry, entry_params)
+ # This is order preserving, since self.extra_select is a SortedDict.
+ self.extra.update(select_pairs)
+ if where or params:
+ self.where.add(ExtraWhere(where, params), AND)
+ if tables:
+ self.extra_tables += tuple(tables)
+ if order_by:
+ self.extra_order_by = order_by
+
+ def clear_deferred_loading(self):
+ """
+ Remove any fields from the deferred loading set.
+ """
+ self.deferred_loading = (set(), True)
+
+ def add_deferred_loading(self, field_names):
+ """
+ Add the given list of model field names to the set of fields to
+ exclude from loading from the database when automatic column selection
+ is done. The new field names are added to any existing field names that
+ are deferred (or removed from any existing field names that are marked
+ as the only ones for immediate loading).
+ """
+ # Fields on related models are stored in the literal double-underscore
+ # format, so that we can use a set datastructure. We do the foo__bar
+ # splitting and handling when computing the SQL colum names (as part of
+ # get_columns()).
+ existing, defer = self.deferred_loading
+ if defer:
+ # Add to existing deferred names.
+ self.deferred_loading = existing.union(field_names), True
+ else:
+ # Remove names from the set of any existing "immediate load" names.
+ self.deferred_loading = existing.difference(field_names), False
+
+ def add_immediate_loading(self, field_names):
+ """
+ Add the given list of model field names to the set of fields to
+ retrieve when the SQL is executed ("immediate loading" fields). The
+ field names replace any existing immediate loading field names. If
+ there are field names already specified for deferred loading, those
+ names are removed from the new field_names before storing the new names
+ for immediate loading. (That is, immediate loading overrides any
+ existing immediate values, but respects existing deferrals.)
+ """
+ existing, defer = self.deferred_loading
+ field_names = set(field_names)
+ if 'pk' in field_names:
+ field_names.remove('pk')
+ field_names.add(self.get_meta().pk.name)
+
+ if defer:
+ # Remove any existing deferred names from the current set before
+ # setting the new names.
+ self.deferred_loading = field_names.difference(existing), False
+ else:
+ # Replace any existing "immediate load" field names.
+ self.deferred_loading = field_names, False
+
+ def get_loaded_field_names(self):
+ """
+ If any fields are marked to be deferred, returns a dictionary mapping
+ models to a set of names in those fields that will be loaded. If a
+ model is not in the returned dictionary, none of it's fields are
+ deferred.
+
+ If no fields are marked for deferral, returns an empty dictionary.
+ """
+ # We cache this because we call this function multiple times
+ # (compiler.fill_related_selections, query.iterator)
+ try:
+ return self._loaded_field_names_cache
+ except AttributeError:
+ collection = {}
+ self.deferred_to_data(collection, self.get_loaded_field_names_cb)
+ self._loaded_field_names_cache = collection
+ return collection
+
+ def get_loaded_field_names_cb(self, target, model, fields):
+ """
+ Callback used by get_deferred_field_names().
+ """
+ target[model] = set([f.name for f in fields])
+
+ def set_aggregate_mask(self, names):
+ "Set the mask of aggregates that will actually be returned by the SELECT"
+ if names is None:
+ self.aggregate_select_mask = None
+ else:
+ self.aggregate_select_mask = set(names)
+ self._aggregate_select_cache = None
+
+ def set_extra_mask(self, names):
+ """
+ Set the mask of extra select items that will be returned by SELECT,
+ we don't actually remove them from the Query since they might be used
+ later
+ """
+ if names is None:
+ self.extra_select_mask = None
+ else:
+ self.extra_select_mask = set(names)
+ self._extra_select_cache = None
+
+ def _aggregate_select(self):
+ """The SortedDict of aggregate columns that are not masked, and should
+ be used in the SELECT clause.
+
+ This result is cached for optimization purposes.
+ """
+ if self._aggregate_select_cache is not None:
+ return self._aggregate_select_cache
+ elif self.aggregate_select_mask is not None:
+ self._aggregate_select_cache = SortedDict([
+ (k,v) for k,v in self.aggregates.items()
+ if k in self.aggregate_select_mask
+ ])
+ return self._aggregate_select_cache
+ else:
+ return self.aggregates
+ aggregate_select = property(_aggregate_select)
+
+ def _extra_select(self):
+ if self._extra_select_cache is not None:
+ return self._extra_select_cache
+ elif self.extra_select_mask is not None:
+ self._extra_select_cache = SortedDict([
+ (k,v) for k,v in self.extra.items()
+ if k in self.extra_select_mask
+ ])
+ return self._extra_select_cache
+ else:
+ return self.extra
+ extra_select = property(_extra_select)
+
+ def trim_start(self, names_with_path):
+ """
+ Trims joins from the start of the join path. The candidates for trim
+ are the PathInfos in names_with_path structure that are m2m joins.
+
+ Also sets the select column so the start matches the join.
+
+ This method is meant to be used for generating the subquery joins &
+ cols in split_exclude().
+
+ Returns a lookup usable for doing outerq.filter(lookup=self). Returns
+ also if the joins in the prefix contain a LEFT OUTER join.
+ _"""
+ all_paths = []
+ for _, paths in names_with_path:
+ all_paths.extend(paths)
+ contains_louter = False
+ for pos, path in enumerate(all_paths):
+ if path.m2m:
+ break
+ if self.alias_map[self.tables[pos + 1]].join_type == self.LOUTER:
+ contains_louter = True
+ self.unref_alias(self.tables[pos])
+ # The path.join_field is a Rel, lets get the other side's field
+ join_field = path.join_field.field
+ # Build the filter prefix.
+ trimmed_prefix = []
+ paths_in_prefix = pos
+ for name, path in names_with_path:
+ if paths_in_prefix - len(path) < 0:
+ break
+ trimmed_prefix.append(name)
+ paths_in_prefix -= len(path)
+ trimmed_prefix.append(
+ join_field.foreign_related_fields[0].name)
+ trimmed_prefix = LOOKUP_SEP.join(trimmed_prefix)
+ # Lets still see if we can trim the first join from the inner query
+ # (that is, self). We can't do this for LEFT JOINs because we would
+ # miss those rows that have nothing on the outer side.
+ if self.alias_map[self.tables[pos + 1]].join_type != self.LOUTER:
+ select_fields = [r[0] for r in join_field.related_fields]
+ select_alias = self.tables[pos + 1]
+ self.unref_alias(self.tables[pos])
+ extra_restriction = join_field.get_extra_restriction(
+ self.where_class, None, self.tables[pos + 1])
+ if extra_restriction:
+ self.where.add(extra_restriction, AND)
+ else:
+ # TODO: It might be possible to trim more joins from the start of the
+ # inner query if it happens to have a longer join chain containing the
+ # values in select_fields. Lets punt this one for now.
+ select_fields = [r[1] for r in join_field.related_fields]
+ select_alias = self.tables[pos]
+ self.select = [SelectInfo((select_alias, f.column), f) for f in select_fields]
+ return trimmed_prefix, contains_louter
+
+ def is_nullable(self, field):
+ """
+ A helper to check if the given field should be treated as nullable.
+
+ Some backends treat '' as null and Django treats such fields as
+ nullable for those backends. In such situations field.null can be
+ False even if we should treat the field as nullable.
+ """
+ # We need to use DEFAULT_DB_ALIAS here, as QuerySet does not have
+ # (nor should it have) knowledge of which connection is going to be
+ # used. The proper fix would be to defer all decisions where
+ # is_nullable() is needed to the compiler stage, but that is not easy
+ # to do currently.
+ if ((connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls)
+ and field.empty_strings_allowed):
+ return True
+ else:
+ return field.null
+
+def get_order_dir(field, default='ASC'):
+ """
+ Returns the field name and direction for an order specification. For
+ example, '-foo' is returned as ('foo', 'DESC').
+
+ The 'default' param is used to indicate which way no prefix (or a '+'
+ prefix) should sort. The '-' prefix always sorts the opposite way.
+ """
+ dirn = ORDER_DIR[default]
+ if field[0] == '-':
+ return field[1:], dirn[1]
+ return field, dirn[0]
+
+
+def add_to_dict(data, key, value):
+ """
+ A helper function to add "value" to the set of values for "key", whether or
+ not "key" already exists.
+ """
+ if key in data:
+ data[key].add(value)
+ else:
+ data[key] = set([value])
+
+def is_reverse_o2o(field):
+ """
+ A little helper to check if the given field is reverse-o2o. The field is
+ expected to be some sort of relation field or related object.
+ """
+ return not hasattr(field, 'rel') and field.field.unique
+
+def alias_diff(refcounts_before, refcounts_after):
+ """
+ Given the before and after copies of refcounts works out which aliases
+ have been added to the after copy.
+ """
+ # Use -1 as default value so that any join that is created, then trimmed
+ # is seen as added.
+ return set(t for t in refcounts_after
+ if refcounts_after[t] > refcounts_before.get(t, -1))
diff --git a/lib/python2.7/site-packages/django/db/models/sql/subqueries.py b/lib/python2.7/site-packages/django/db/models/sql/subqueries.py
new file mode 100644
index 0000000..6dc0005
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/models/sql/subqueries.py
@@ -0,0 +1,297 @@
+"""
+Query subclasses which provide extra functionality beyond simple data retrieval.
+"""
+
+from django.conf import settings
+from django.core.exceptions import FieldError
+from django.db import connections
+from django.db.models.constants import LOOKUP_SEP
+from django.db.models.fields import DateField, DateTimeField, FieldDoesNotExist
+from django.db.models.sql.constants import *
+from django.db.models.sql.datastructures import Date, DateTime
+from django.db.models.sql.query import Query
+from django.db.models.sql.where import AND, Constraint
+from django.utils.functional import Promise
+from django.utils.encoding import force_text
+from django.utils import six
+from django.utils import timezone
+
+
+__all__ = ['DeleteQuery', 'UpdateQuery', 'InsertQuery', 'DateQuery',
+ 'DateTimeQuery', 'AggregateQuery']
+
+class DeleteQuery(Query):
+ """
+ Delete queries are done through this class, since they are more constrained
+ than general queries.
+ """
+
+ compiler = 'SQLDeleteCompiler'
+
+ def do_query(self, table, where, using):
+ self.tables = [table]
+ self.where = where
+ self.get_compiler(using).execute_sql(None)
+
+ def delete_batch(self, pk_list, using, field=None):
+ """
+ Set up and execute delete queries for all the objects in pk_list.
+
+ More than one physical query may be executed if there are a
+ lot of values in pk_list.
+ """
+ if not field:
+ field = self.get_meta().pk
+ for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
+ where = self.where_class()
+ where.add((Constraint(None, field.column, field), 'in',
+ pk_list[offset:offset + GET_ITERATOR_CHUNK_SIZE]), AND)
+ self.do_query(self.get_meta().db_table, where, using=using)
+
+ def delete_qs(self, query, using):
+ """
+ Delete the queryset in one SQL query (if possible). For simple queries
+ this is done by copying the query.query.where to self.query, for
+ complex queries by using subquery.
+ """
+ innerq = query.query
+ # Make sure the inner query has at least one table in use.
+ innerq.get_initial_alias()
+ # The same for our new query.
+ self.get_initial_alias()
+ innerq_used_tables = [t for t in innerq.tables
+ if innerq.alias_refcount[t]]
+ if ((not innerq_used_tables or innerq_used_tables == self.tables)
+ and not len(innerq.having)):
+ # There is only the base table in use in the query, and there are
+ # no aggregate filtering going on.
+ self.where = innerq.where
+ else:
+ pk = query.model._meta.pk
+ if not connections[using].features.update_can_self_select:
+ # We can't do the delete using subquery.
+ values = list(query.values_list('pk', flat=True))
+ if not values:
+ return
+ self.delete_batch(values, using)
+ return
+ else:
+ innerq.clear_select_clause()
+ innerq.select = [SelectInfo((self.get_initial_alias(), pk.column), None)]
+ values = innerq
+ where = self.where_class()
+ where.add((Constraint(None, pk.column, pk), 'in', values), AND)
+ self.where = where
+ self.get_compiler(using).execute_sql(None)
+
+
+class UpdateQuery(Query):
+ """
+ Represents an "update" SQL query.
+ """
+
+ compiler = 'SQLUpdateCompiler'
+
+ def __init__(self, *args, **kwargs):
+ super(UpdateQuery, self).__init__(*args, **kwargs)
+ self._setup_query()
+
+ def _setup_query(self):
+ """
+ Runs on initialization and after cloning. Any attributes that would
+ normally be set in __init__ should go in here, instead, so that they
+ are also set up after a clone() call.
+ """
+ self.values = []
+ self.related_ids = None
+ if not hasattr(self, 'related_updates'):
+ self.related_updates = {}
+
+ def clone(self, klass=None, **kwargs):
+ return super(UpdateQuery, self).clone(klass,
+ related_updates=self.related_updates.copy(), **kwargs)
+
+ def update_batch(self, pk_list, values, using):
+ pk_field = self.get_meta().pk
+ self.add_update_values(values)
+ for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
+ self.where = self.where_class()
+ self.where.add((Constraint(None, pk_field.column, pk_field), 'in',
+ pk_list[offset:offset + GET_ITERATOR_CHUNK_SIZE]),
+ AND)
+ self.get_compiler(using).execute_sql(None)
+
+ def add_update_values(self, values):
+ """
+ Convert a dictionary of field name to value mappings into an update
+ query. This is the entry point for the public update() method on
+ querysets.
+ """
+ values_seq = []
+ for name, val in six.iteritems(values):
+ field, model, direct, m2m = self.get_meta().get_field_by_name(name)
+ if not direct or m2m:
+ raise FieldError('Cannot update model field %r (only non-relations and foreign keys permitted).' % field)
+ if model:
+ self.add_related_update(model, field, val)
+ continue
+ values_seq.append((field, model, val))
+ return self.add_update_fields(values_seq)
+
+ def add_update_fields(self, values_seq):
+ """
+ Turn a sequence of (field, model, value) triples into an update query.
+ Used by add_update_values() as well as the "fast" update path when
+ saving models.
+ """
+ # Check that no Promise object passes to the query. Refs #10498.
+ values_seq = [(value[0], value[1], force_text(value[2]))
+ if isinstance(value[2], Promise) else value
+ for value in values_seq]
+ self.values.extend(values_seq)
+
+ def add_related_update(self, model, field, value):
+ """
+ Adds (name, value) to an update query for an ancestor model.
+
+ Updates are coalesced so that we only run one update query per ancestor.
+ """
+ try:
+ self.related_updates[model].append((field, None, value))
+ except KeyError:
+ self.related_updates[model] = [(field, None, value)]
+
+ def get_related_updates(self):
+ """
+ Returns a list of query objects: one for each update required to an
+ ancestor model. Each query will have the same filtering conditions as
+ the current query but will only update a single table.
+ """
+ if not self.related_updates:
+ return []
+ result = []
+ for model, values in six.iteritems(self.related_updates):
+ query = UpdateQuery(model)
+ query.values = values
+ if self.related_ids is not None:
+ query.add_filter(('pk__in', self.related_ids))
+ result.append(query)
+ return result
+
+class InsertQuery(Query):
+ compiler = 'SQLInsertCompiler'
+
+ def __init__(self, *args, **kwargs):
+ super(InsertQuery, self).__init__(*args, **kwargs)
+ self.fields = []
+ self.objs = []
+
+ def clone(self, klass=None, **kwargs):
+ extras = {
+ 'fields': self.fields[:],
+ 'objs': self.objs[:],
+ 'raw': self.raw,
+ }
+ extras.update(kwargs)
+ return super(InsertQuery, self).clone(klass, **extras)
+
+ def insert_values(self, fields, objs, raw=False):
+ """
+ Set up the insert query from the 'insert_values' dictionary. The
+ dictionary gives the model field names and their target values.
+
+ If 'raw_values' is True, the values in the 'insert_values' dictionary
+ are inserted directly into the query, rather than passed as SQL
+ parameters. This provides a way to insert NULL and DEFAULT keywords
+ into the query, for example.
+ """
+ self.fields = fields
+ # Check that no Promise object reaches the DB. Refs #10498.
+ for field in fields:
+ for obj in objs:
+ value = getattr(obj, field.attname)
+ if isinstance(value, Promise):
+ setattr(obj, field.attname, force_text(value))
+ self.objs = objs
+ self.raw = raw
+
+class DateQuery(Query):
+ """
+ A DateQuery is a normal query, except that it specifically selects a single
+ date field. This requires some special handling when converting the results
+ back to Python objects, so we put it in a separate class.
+ """
+
+ compiler = 'SQLDateCompiler'
+
+ def add_select(self, field_name, lookup_type, order='ASC'):
+ """
+ Converts the query into an extraction query.
+ """
+ try:
+ result = self.setup_joins(
+ field_name.split(LOOKUP_SEP),
+ self.get_meta(),
+ self.get_initial_alias(),
+ )
+ except FieldError:
+ raise FieldDoesNotExist("%s has no field named '%s'" % (
+ self.get_meta().object_name, field_name
+ ))
+ field = result[0]
+ self._check_field(field) # overridden in DateTimeQuery
+ alias = result[3][-1]
+ select = self._get_select((alias, field.column), lookup_type)
+ self.clear_select_clause()
+ self.select = [SelectInfo(select, None)]
+ self.distinct = True
+ self.order_by = [1] if order == 'ASC' else [-1]
+
+ if field.null:
+ self.add_filter(("%s__isnull" % field_name, False))
+
+ def _check_field(self, field):
+ assert isinstance(field, DateField), \
+ "%r isn't a DateField." % field.name
+ if settings.USE_TZ:
+ assert not isinstance(field, DateTimeField), \
+ "%r is a DateTimeField, not a DateField." % field.name
+
+ def _get_select(self, col, lookup_type):
+ return Date(col, lookup_type)
+
+class DateTimeQuery(DateQuery):
+ """
+ A DateTimeQuery is like a DateQuery but for a datetime field. If time zone
+ support is active, the tzinfo attribute contains the time zone to use for
+ converting the values before truncating them. Otherwise it's set to None.
+ """
+
+ compiler = 'SQLDateTimeCompiler'
+
+ def clone(self, klass=None, memo=None, **kwargs):
+ if 'tzinfo' not in kwargs and hasattr(self, 'tzinfo'):
+ kwargs['tzinfo'] = self.tzinfo
+ return super(DateTimeQuery, self).clone(klass, memo, **kwargs)
+
+ def _check_field(self, field):
+ assert isinstance(field, DateTimeField), \
+ "%r isn't a DateTimeField." % field.name
+
+ def _get_select(self, col, lookup_type):
+ if self.tzinfo is None:
+ tzname = None
+ else:
+ tzname = timezone._get_timezone_name(self.tzinfo)
+ return DateTime(col, lookup_type, tzname)
+
+class AggregateQuery(Query):
+ """
+ An AggregateQuery takes another query as a parameter to the FROM
+ clause and only selects the elements in the provided list.
+ """
+
+ compiler = 'SQLAggregateCompiler'
+
+ def add_subquery(self, query, using):
+ self.subquery, self.sub_params = query.get_compiler(using).as_sql(with_col_aliases=True)
diff --git a/lib/python2.7/site-packages/django/db/models/sql/where.py b/lib/python2.7/site-packages/django/db/models/sql/where.py
new file mode 100644
index 0000000..2a342d4
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/models/sql/where.py
@@ -0,0 +1,419 @@
+"""
+Code to manage the creation and SQL rendering of 'where' constraints.
+"""
+
+from __future__ import absolute_import
+
+import datetime
+from itertools import repeat
+
+from django.conf import settings
+from django.db.models.fields import DateTimeField, Field
+from django.db.models.sql.datastructures import EmptyResultSet, Empty
+from django.db.models.sql.aggregates import Aggregate
+from django.utils.itercompat import is_iterator
+from django.utils.six.moves import xrange
+from django.utils import timezone
+from django.utils import tree
+
+# Connection types
+AND = 'AND'
+OR = 'OR'
+
+class EmptyShortCircuit(Exception):
+ """
+ Internal exception used to indicate that a "matches nothing" node should be
+ added to the where-clause.
+ """
+ pass
+
+class WhereNode(tree.Node):
+ """
+ Used to represent the SQL where-clause.
+
+ The class is tied to the Query class that created it (in order to create
+ the correct SQL).
+
+ A child is usually a tuple of:
+ (Constraint(alias, targetcol, field), lookup_type, value)
+ where value can be either raw Python value, or Query, ExpressionNode or
+ something else knowing how to turn itself into SQL.
+
+ However, a child could also be any class with as_sql() and either
+ relabeled_clone() method or relabel_aliases() and clone() methods. The
+ second alternative should be used if the alias is not the only mutable
+ variable.
+ """
+ default = AND
+
+ def _prepare_data(self, data):
+ """
+ Prepare data for addition to the tree. If the data is a list or tuple,
+ it is expected to be of the form (obj, lookup_type, value), where obj
+ is a Constraint object, and is then slightly munged before being
+ stored (to avoid storing any reference to field objects). Otherwise,
+ the 'data' is stored unchanged and can be any class with an 'as_sql()'
+ method.
+ """
+ if not isinstance(data, (list, tuple)):
+ return data
+ obj, lookup_type, value = data
+ if is_iterator(value):
+ # Consume any generators immediately, so that we can determine
+ # emptiness and transform any non-empty values correctly.
+ value = list(value)
+
+ # The "value_annotation" parameter is used to pass auxilliary information
+ # about the value(s) to the query construction. Specifically, datetime
+ # and empty values need special handling. Other types could be used
+ # here in the future (using Python types is suggested for consistency).
+ if (isinstance(value, datetime.datetime)
+ or (isinstance(obj.field, DateTimeField) and lookup_type != 'isnull')):
+ value_annotation = datetime.datetime
+ elif hasattr(value, 'value_annotation'):
+ value_annotation = value.value_annotation
+ else:
+ value_annotation = bool(value)
+
+ if hasattr(obj, "prepare"):
+ value = obj.prepare(lookup_type, value)
+ return (obj, lookup_type, value_annotation, value)
+
+ def as_sql(self, qn, connection):
+ """
+ Returns the SQL version of the where clause and the value to be
+ substituted in. Returns '', [] if this node matches everything,
+ None, [] if this node is empty, and raises EmptyResultSet if this
+ node can't match anything.
+ """
+ # Note that the logic here is made slightly more complex than
+ # necessary because there are two kind of empty nodes: Nodes
+ # containing 0 children, and nodes that are known to match everything.
+ # A match-everything node is different than empty node (which also
+ # technically matches everything) for backwards compatibility reasons.
+ # Refs #5261.
+ result = []
+ result_params = []
+ everything_childs, nothing_childs = 0, 0
+ non_empty_childs = len(self.children)
+
+ for child in self.children:
+ try:
+ if hasattr(child, 'as_sql'):
+ sql, params = child.as_sql(qn=qn, connection=connection)
+ else:
+ # A leaf node in the tree.
+ sql, params = self.make_atom(child, qn, connection)
+ except EmptyResultSet:
+ nothing_childs += 1
+ else:
+ if sql:
+ result.append(sql)
+ result_params.extend(params)
+ else:
+ if sql is None:
+ # Skip empty childs totally.
+ non_empty_childs -= 1
+ continue
+ everything_childs += 1
+ # Check if this node matches nothing or everything.
+ # First check the amount of full nodes and empty nodes
+ # to make this node empty/full.
+ if self.connector == AND:
+ full_needed, empty_needed = non_empty_childs, 1
+ else:
+ full_needed, empty_needed = 1, non_empty_childs
+ # Now, check if this node is full/empty using the
+ # counts.
+ if empty_needed - nothing_childs <= 0:
+ if self.negated:
+ return '', []
+ else:
+ raise EmptyResultSet
+ if full_needed - everything_childs <= 0:
+ if self.negated:
+ raise EmptyResultSet
+ else:
+ return '', []
+
+ if non_empty_childs == 0:
+ # All the child nodes were empty, so this one is empty, too.
+ return None, []
+ conn = ' %s ' % self.connector
+ sql_string = conn.join(result)
+ if sql_string:
+ if self.negated:
+ # Some backends (Oracle at least) need parentheses
+ # around the inner SQL in the negated case, even if the
+ # inner SQL contains just a single expression.
+ sql_string = 'NOT (%s)' % sql_string
+ elif len(result) > 1:
+ sql_string = '(%s)' % sql_string
+ return sql_string, result_params
+
+ def get_cols(self):
+ cols = []
+ for child in self.children:
+ if hasattr(child, 'get_cols'):
+ cols.extend(child.get_cols())
+ else:
+ if isinstance(child[0], Constraint):
+ cols.append((child[0].alias, child[0].col))
+ if hasattr(child[3], 'get_cols'):
+ cols.extend(child[3].get_cols())
+ return cols
+
+ def make_atom(self, child, qn, connection):
+ """
+ Turn a tuple (Constraint(table_alias, column_name, db_type),
+ lookup_type, value_annotation, params) into valid SQL.
+
+ The first item of the tuple may also be an Aggregate.
+
+ Returns the string for the SQL fragment and the parameters to use for
+ it.
+ """
+ lvalue, lookup_type, value_annotation, params_or_value = child
+ field_internal_type = lvalue.field.get_internal_type() if lvalue.field else None
+
+ if isinstance(lvalue, Constraint):
+ try:
+ lvalue, params = lvalue.process(lookup_type, params_or_value, connection)
+ except EmptyShortCircuit:
+ raise EmptyResultSet
+ elif isinstance(lvalue, Aggregate):
+ params = lvalue.field.get_db_prep_lookup(lookup_type, params_or_value, connection)
+ else:
+ raise TypeError("'make_atom' expects a Constraint or an Aggregate "
+ "as the first item of its 'child' argument.")
+
+ if isinstance(lvalue, tuple):
+ # A direct database column lookup.
+ field_sql, field_params = self.sql_for_columns(lvalue, qn, connection, field_internal_type), []
+ else:
+ # A smart object with an as_sql() method.
+ field_sql, field_params = lvalue.as_sql(qn, connection)
+
+ is_datetime_field = value_annotation is datetime.datetime
+ cast_sql = connection.ops.datetime_cast_sql() if is_datetime_field else '%s'
+
+ if hasattr(params, 'as_sql'):
+ extra, params = params.as_sql(qn, connection)
+ cast_sql = ''
+ else:
+ extra = ''
+
+ params = field_params + params
+
+ if (len(params) == 1 and params[0] == '' and lookup_type == 'exact'
+ and connection.features.interprets_empty_strings_as_nulls):
+ lookup_type = 'isnull'
+ value_annotation = True
+
+ if lookup_type in connection.operators:
+ format = "%s %%s %%s" % (connection.ops.lookup_cast(lookup_type),)
+ return (format % (field_sql,
+ connection.operators[lookup_type] % cast_sql,
+ extra), params)
+
+ if lookup_type == 'in':
+ if not value_annotation:
+ raise EmptyResultSet
+ if extra:
+ return ('%s IN %s' % (field_sql, extra), params)
+ max_in_list_size = connection.ops.max_in_list_size()
+ if max_in_list_size and len(params) > max_in_list_size:
+ # Break up the params list into an OR of manageable chunks.
+ in_clause_elements = ['(']
+ for offset in xrange(0, len(params), max_in_list_size):
+ if offset > 0:
+ in_clause_elements.append(' OR ')
+ in_clause_elements.append('%s IN (' % field_sql)
+ group_size = min(len(params) - offset, max_in_list_size)
+ param_group = ', '.join(repeat('%s', group_size))
+ in_clause_elements.append(param_group)
+ in_clause_elements.append(')')
+ in_clause_elements.append(')')
+ return ''.join(in_clause_elements), params
+ else:
+ return ('%s IN (%s)' % (field_sql,
+ ', '.join(repeat('%s', len(params)))),
+ params)
+ elif lookup_type in ('range', 'year'):
+ return ('%s BETWEEN %%s and %%s' % field_sql, params)
+ elif is_datetime_field and lookup_type in ('month', 'day', 'week_day',
+ 'hour', 'minute', 'second'):
+ tzname = timezone.get_current_timezone_name() if settings.USE_TZ else None
+ sql, tz_params = connection.ops.datetime_extract_sql(lookup_type, field_sql, tzname)
+ return ('%s = %%s' % sql, tz_params + params)
+ elif lookup_type in ('month', 'day', 'week_day'):
+ return ('%s = %%s'
+ % connection.ops.date_extract_sql(lookup_type, field_sql), params)
+ elif lookup_type == 'isnull':
+ assert value_annotation in (True, False), "Invalid value_annotation for isnull"
+ return ('%s IS %sNULL' % (field_sql, ('' if value_annotation else 'NOT ')), ())
+ elif lookup_type == 'search':
+ return (connection.ops.fulltext_search_sql(field_sql), params)
+ elif lookup_type in ('regex', 'iregex'):
+ return connection.ops.regex_lookup(lookup_type) % (field_sql, cast_sql), params
+
+ raise TypeError('Invalid lookup_type: %r' % lookup_type)
+
+ def sql_for_columns(self, data, qn, connection, internal_type=None):
+ """
+ Returns the SQL fragment used for the left-hand side of a column
+ constraint (for example, the "T1.foo" portion in the clause
+ "WHERE ... T1.foo = 6") and a list of parameters.
+ """
+ table_alias, name, db_type = data
+ if table_alias:
+ lhs = '%s.%s' % (qn(table_alias), qn(name))
+ else:
+ lhs = qn(name)
+ return connection.ops.field_cast_sql(db_type, internal_type) % lhs
+
+ def relabel_aliases(self, change_map):
+ """
+ Relabels the alias values of any children. 'change_map' is a dictionary
+ mapping old (current) alias values to the new values.
+ """
+ for pos, child in enumerate(self.children):
+ if hasattr(child, 'relabel_aliases'):
+ # For example another WhereNode
+ child.relabel_aliases(change_map)
+ elif isinstance(child, (list, tuple)):
+ # tuple starting with Constraint
+ child = (child[0].relabeled_clone(change_map),) + child[1:]
+ if hasattr(child[3], 'relabeled_clone'):
+ child = (child[0], child[1], child[2]) + (
+ child[3].relabeled_clone(change_map),)
+ self.children[pos] = child
+
+ def clone(self):
+ """
+ Creates a clone of the tree. Must only be called on root nodes (nodes
+ with empty subtree_parents). Childs must be either (Contraint, lookup,
+ value) tuples, or objects supporting .clone().
+ """
+ clone = self.__class__._new_instance(
+ children=[], connector=self.connector, negated=self.negated)
+ for child in self.children:
+ if hasattr(child, 'clone'):
+ clone.children.append(child.clone())
+ else:
+ clone.children.append(child)
+ return clone
+
+class EmptyWhere(WhereNode):
+
+ def add(self, data, connector):
+ return
+
+ def as_sql(self, qn=None, connection=None):
+ raise EmptyResultSet
+
+class EverythingNode(object):
+ """
+ A node that matches everything.
+ """
+
+ def as_sql(self, qn=None, connection=None):
+ return '', []
+
+
+class NothingNode(object):
+ """
+ A node that matches nothing.
+ """
+ def as_sql(self, qn=None, connection=None):
+ raise EmptyResultSet
+
+
+class ExtraWhere(object):
+ def __init__(self, sqls, params):
+ self.sqls = sqls
+ self.params = params
+
+ def as_sql(self, qn=None, connection=None):
+ sqls = ["(%s)" % sql for sql in self.sqls]
+ return " AND ".join(sqls), list(self.params or ())
+
+
+class Constraint(object):
+ """
+ An object that can be passed to WhereNode.add() and knows how to
+ pre-process itself prior to including in the WhereNode.
+ """
+ def __init__(self, alias, col, field):
+ self.alias, self.col, self.field = alias, col, field
+
+ def prepare(self, lookup_type, value):
+ if self.field:
+ return self.field.get_prep_lookup(lookup_type, value)
+ return value
+
+ def process(self, lookup_type, value, connection):
+ """
+ Returns a tuple of data suitable for inclusion in a WhereNode
+ instance.
+ """
+ # Because of circular imports, we need to import this here.
+ from django.db.models.base import ObjectDoesNotExist
+ try:
+ if self.field:
+ params = self.field.get_db_prep_lookup(lookup_type, value,
+ connection=connection, prepared=True)
+ db_type = self.field.db_type(connection=connection)
+ else:
+ # This branch is used at times when we add a comparison to NULL
+ # (we don't really want to waste time looking up the associated
+ # field object at the calling location).
+ params = Field().get_db_prep_lookup(lookup_type, value,
+ connection=connection, prepared=True)
+ db_type = None
+ except ObjectDoesNotExist:
+ raise EmptyShortCircuit
+
+ return (self.alias, self.col, db_type), params
+
+ def relabeled_clone(self, change_map):
+ if self.alias not in change_map:
+ return self
+ else:
+ new = Empty()
+ new.__class__ = self.__class__
+ new.alias, new.col, new.field = change_map[self.alias], self.col, self.field
+ return new
+
+class SubqueryConstraint(object):
+ def __init__(self, alias, columns, targets, query_object):
+ self.alias = alias
+ self.columns = columns
+ self.targets = targets
+ self.query_object = query_object
+
+ def as_sql(self, qn, connection):
+ query = self.query_object
+
+ # QuerySet was sent
+ if hasattr(query, 'values'):
+ if query._db and connection.alias != query._db:
+ raise ValueError("Can't do subqueries with queries on different DBs.")
+ # Do not override already existing values.
+ if not hasattr(query, 'field_names'):
+ query = query.values(*self.targets)
+ else:
+ query = query._clone()
+ query = query.query
+ query.clear_ordering(True)
+
+ query_compiler = query.get_compiler(connection=connection)
+ return query_compiler.as_subquery_condition(self.alias, self.columns, qn)
+
+ def relabel_aliases(self, change_map):
+ self.alias = change_map.get(self.alias, self.alias)
+
+ def clone(self):
+ return self.__class__(
+ self.alias, self.columns, self.targets,
+ self.query_object)
diff --git a/lib/python2.7/site-packages/django/db/transaction.py b/lib/python2.7/site-packages/django/db/transaction.py
new file mode 100644
index 0000000..f75137a
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/transaction.py
@@ -0,0 +1,541 @@
+"""
+This module implements a transaction manager that can be used to define
+transaction handling in a request or view function. It is used by transaction
+control middleware and decorators.
+
+The transaction manager can be in managed or in auto state. Auto state means the
+system is using a commit-on-save strategy (actually it's more like
+commit-on-change). As soon as the .save() or .delete() (or related) methods are
+called, a commit is made.
+
+Managed transactions don't do those commits, but will need some kind of manual
+or implicit commits or rollbacks.
+"""
+
+import warnings
+
+from functools import wraps
+
+from django.db import (
+ connections, DEFAULT_DB_ALIAS,
+ DatabaseError, Error, ProgrammingError)
+from django.utils.decorators import available_attrs
+
+
+class TransactionManagementError(ProgrammingError):
+ """
+ This exception is thrown when transaction management is used improperly.
+ """
+ pass
+
+################
+# Private APIs #
+################
+
+def get_connection(using=None):
+ """
+ Get a database connection by name, or the default database connection
+ if no name is provided.
+ """
+ if using is None:
+ using = DEFAULT_DB_ALIAS
+ return connections[using]
+
+###########################
+# Deprecated private APIs #
+###########################
+
+def abort(using=None):
+ """
+ Roll back any ongoing transactions and clean the transaction management
+ state of the connection.
+
+ This method is to be used only in cases where using balanced
+ leave_transaction_management() calls isn't possible. For example after a
+ request has finished, the transaction state isn't known, yet the connection
+ must be cleaned up for the next request.
+ """
+ get_connection(using).abort()
+
+def enter_transaction_management(managed=True, using=None, forced=False):
+ """
+ Enters transaction management for a running thread. It must be balanced with
+ the appropriate leave_transaction_management call, since the actual state is
+ managed as a stack.
+
+ The state and dirty flag are carried over from the surrounding block or
+ from the settings, if there is no surrounding block (dirty is always false
+ when no current block is running).
+ """
+ get_connection(using).enter_transaction_management(managed, forced)
+
+def leave_transaction_management(using=None):
+ """
+ Leaves transaction management for a running thread. A dirty flag is carried
+ over to the surrounding block, as a commit will commit all changes, even
+ those from outside. (Commits are on connection level.)
+ """
+ get_connection(using).leave_transaction_management()
+
+def is_dirty(using=None):
+ """
+ Returns True if the current transaction requires a commit for changes to
+ happen.
+ """
+ return get_connection(using).is_dirty()
+
+def set_dirty(using=None):
+ """
+ Sets a dirty flag for the current thread and code streak. This can be used
+ to decide in a managed block of code to decide whether there are open
+ changes waiting for commit.
+ """
+ get_connection(using).set_dirty()
+
+def set_clean(using=None):
+ """
+ Resets a dirty flag for the current thread and code streak. This can be used
+ to decide in a managed block of code to decide whether a commit or rollback
+ should happen.
+ """
+ get_connection(using).set_clean()
+
+def is_managed(using=None):
+ warnings.warn("'is_managed' is deprecated.",
+ PendingDeprecationWarning, stacklevel=2)
+
+def managed(flag=True, using=None):
+ warnings.warn("'managed' no longer serves a purpose.",
+ PendingDeprecationWarning, stacklevel=2)
+
+def commit_unless_managed(using=None):
+ warnings.warn("'commit_unless_managed' is now a no-op.",
+ PendingDeprecationWarning, stacklevel=2)
+
+def rollback_unless_managed(using=None):
+ warnings.warn("'rollback_unless_managed' is now a no-op.",
+ PendingDeprecationWarning, stacklevel=2)
+
+###############
+# Public APIs #
+###############
+
+def get_autocommit(using=None):
+ """
+ Get the autocommit status of the connection.
+ """
+ return get_connection(using).get_autocommit()
+
+def set_autocommit(autocommit, using=None):
+ """
+ Set the autocommit status of the connection.
+ """
+ return get_connection(using).set_autocommit(autocommit)
+
+def commit(using=None):
+ """
+ Commits a transaction and resets the dirty flag.
+ """
+ get_connection(using).commit()
+
+def rollback(using=None):
+ """
+ Rolls back a transaction and resets the dirty flag.
+ """
+ get_connection(using).rollback()
+
+def savepoint(using=None):
+ """
+ Creates a savepoint (if supported and required by the backend) inside the
+ current transaction. Returns an identifier for the savepoint that will be
+ used for the subsequent rollback or commit.
+ """
+ return get_connection(using).savepoint()
+
+def savepoint_rollback(sid, using=None):
+ """
+ Rolls back the most recent savepoint (if one exists). Does nothing if
+ savepoints are not supported.
+ """
+ get_connection(using).savepoint_rollback(sid)
+
+def savepoint_commit(sid, using=None):
+ """
+ Commits the most recent savepoint (if one exists). Does nothing if
+ savepoints are not supported.
+ """
+ get_connection(using).savepoint_commit(sid)
+
+def clean_savepoints(using=None):
+ """
+ Resets the counter used to generate unique savepoint ids in this thread.
+ """
+ get_connection(using).clean_savepoints()
+
+def get_rollback(using=None):
+ """
+ Gets the "needs rollback" flag -- for *advanced use* only.
+ """
+ return get_connection(using).get_rollback()
+
+def set_rollback(rollback, using=None):
+ """
+ Sets or unsets the "needs rollback" flag -- for *advanced use* only.
+
+ When `rollback` is `True`, it triggers a rollback when exiting the
+ innermost enclosing atomic block that has `savepoint=True` (that's the
+ default). Use this to force a rollback without raising an exception.
+
+ When `rollback` is `False`, it prevents such a rollback. Use this only
+ after rolling back to a known-good state! Otherwise, you break the atomic
+ block and data corruption may occur.
+ """
+ return get_connection(using).set_rollback(rollback)
+
+#################################
+# Decorators / context managers #
+#################################
+
+class Atomic(object):
+ """
+ This class guarantees the atomic execution of a given block.
+
+ An instance can be used either as a decorator or as a context manager.
+
+ When it's used as a decorator, __call__ wraps the execution of the
+ decorated function in the instance itself, used as a context manager.
+
+ When it's used as a context manager, __enter__ creates a transaction or a
+ savepoint, depending on whether a transaction is already in progress, and
+ __exit__ commits the transaction or releases the savepoint on normal exit,
+ and rolls back the transaction or to the savepoint on exceptions.
+
+ It's possible to disable the creation of savepoints if the goal is to
+ ensure that some code runs within a transaction without creating overhead.
+
+ A stack of savepoints identifiers is maintained as an attribute of the
+ connection. None denotes the absence of a savepoint.
+
+ This allows reentrancy even if the same AtomicWrapper is reused. For
+ example, it's possible to define `oa = @atomic('other')` and use `@oa` or
+ `with oa:` multiple times.
+
+ Since database connections are thread-local, this is thread-safe.
+ """
+
+ def __init__(self, using, savepoint):
+ self.using = using
+ self.savepoint = savepoint
+
+ def __enter__(self):
+ connection = get_connection(self.using)
+
+ if not connection.in_atomic_block:
+ # Reset state when entering an outermost atomic block.
+ connection.commit_on_exit = True
+ connection.needs_rollback = False
+ if not connection.get_autocommit():
+ # Some database adapters (namely sqlite3) don't handle
+ # transactions and savepoints properly when autocommit is off.
+ # Turning autocommit back on isn't an option; it would trigger
+ # a premature commit. Give up if that happens.
+ if connection.features.autocommits_when_autocommit_is_off:
+ raise TransactionManagementError(
+ "Your database backend doesn't behave properly when "
+ "autocommit is off. Turn it on before using 'atomic'.")
+ # When entering an atomic block with autocommit turned off,
+ # Django should only use savepoints and shouldn't commit.
+ # This requires at least a savepoint for the outermost block.
+ if not self.savepoint:
+ raise TransactionManagementError(
+ "The outermost 'atomic' block cannot use "
+ "savepoint = False when autocommit is off.")
+ # Pretend we're already in an atomic block to bypass the code
+ # that disables autocommit to enter a transaction, and make a
+ # note to deal with this case in __exit__.
+ connection.in_atomic_block = True
+ connection.commit_on_exit = False
+
+ if connection.in_atomic_block:
+ # We're already in a transaction; create a savepoint, unless we
+ # were told not to or we're already waiting for a rollback. The
+ # second condition avoids creating useless savepoints and prevents
+ # overwriting needs_rollback until the rollback is performed.
+ if self.savepoint and not connection.needs_rollback:
+ sid = connection.savepoint()
+ connection.savepoint_ids.append(sid)
+ else:
+ connection.savepoint_ids.append(None)
+ else:
+ # We aren't in a transaction yet; create one.
+ # The usual way to start a transaction is to turn autocommit off.
+ # However, some database adapters (namely sqlite3) don't handle
+ # transactions and savepoints properly when autocommit is off.
+ # In such cases, start an explicit transaction instead, which has
+ # the side-effect of disabling autocommit.
+ if connection.features.autocommits_when_autocommit_is_off:
+ connection._start_transaction_under_autocommit()
+ connection.autocommit = False
+ else:
+ connection.set_autocommit(False)
+ connection.in_atomic_block = True
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ connection = get_connection(self.using)
+
+ if connection.savepoint_ids:
+ sid = connection.savepoint_ids.pop()
+ else:
+ # Prematurely unset this flag to allow using commit or rollback.
+ connection.in_atomic_block = False
+
+ try:
+ if connection.closed_in_transaction:
+ # The database will perform a rollback by itself.
+ # Wait until we exit the outermost block.
+ pass
+
+ elif exc_type is None and not connection.needs_rollback:
+ if connection.in_atomic_block:
+ # Release savepoint if there is one
+ if sid is not None:
+ try:
+ connection.savepoint_commit(sid)
+ except DatabaseError:
+ try:
+ connection.savepoint_rollback(sid)
+ except Error:
+ # If rolling back to a savepoint fails, mark for
+ # rollback at a higher level and avoid shadowing
+ # the original exception.
+ connection.needs_rollback = True
+ raise
+ else:
+ # Commit transaction
+ try:
+ connection.commit()
+ except DatabaseError:
+ try:
+ connection.rollback()
+ except Error:
+ # An error during rollback means that something
+ # went wrong with the connection. Drop it.
+ connection.close()
+ raise
+ else:
+ # This flag will be set to True again if there isn't a savepoint
+ # allowing to perform the rollback at this level.
+ connection.needs_rollback = False
+ if connection.in_atomic_block:
+ # Roll back to savepoint if there is one, mark for rollback
+ # otherwise.
+ if sid is None:
+ connection.needs_rollback = True
+ else:
+ try:
+ connection.savepoint_rollback(sid)
+ except Error:
+ # If rolling back to a savepoint fails, mark for
+ # rollback at a higher level and avoid shadowing
+ # the original exception.
+ connection.needs_rollback = True
+ else:
+ # Roll back transaction
+ try:
+ connection.rollback()
+ except Error:
+ # An error during rollback means that something
+ # went wrong with the connection. Drop it.
+ connection.close()
+
+ finally:
+ # Outermost block exit when autocommit was enabled.
+ if not connection.in_atomic_block:
+ if connection.closed_in_transaction:
+ connection.connection = None
+ elif connection.features.autocommits_when_autocommit_is_off:
+ connection.autocommit = True
+ else:
+ connection.set_autocommit(True)
+ # Outermost block exit when autocommit was disabled.
+ elif not connection.savepoint_ids and not connection.commit_on_exit:
+ if connection.closed_in_transaction:
+ connection.connection = None
+ else:
+ connection.in_atomic_block = False
+
+ def __call__(self, func):
+ @wraps(func, assigned=available_attrs(func))
+ def inner(*args, **kwargs):
+ with self:
+ return func(*args, **kwargs)
+ return inner
+
+
+def atomic(using=None, savepoint=True):
+ # Bare decorator: @atomic -- although the first argument is called
+ # `using`, it's actually the function being decorated.
+ if callable(using):
+ return Atomic(DEFAULT_DB_ALIAS, savepoint)(using)
+ # Decorator: @atomic(...) or context manager: with atomic(...): ...
+ else:
+ return Atomic(using, savepoint)
+
+
+def _non_atomic_requests(view, using):
+ try:
+ view._non_atomic_requests.add(using)
+ except AttributeError:
+ view._non_atomic_requests = set([using])
+ return view
+
+
+def non_atomic_requests(using=None):
+ if callable(using):
+ return _non_atomic_requests(using, DEFAULT_DB_ALIAS)
+ else:
+ if using is None:
+ using = DEFAULT_DB_ALIAS
+ return lambda view: _non_atomic_requests(view, using)
+
+
+############################################
+# Deprecated decorators / context managers #
+############################################
+
+class Transaction(object):
+ """
+ Acts as either a decorator, or a context manager. If it's a decorator it
+ takes a function and returns a wrapped function. If it's a contextmanager
+ it's used with the ``with`` statement. In either event entering/exiting
+ are called before and after, respectively, the function/block is executed.
+
+ autocommit, commit_on_success, and commit_manually contain the
+ implementations of entering and exiting.
+ """
+ def __init__(self, entering, exiting, using):
+ self.entering = entering
+ self.exiting = exiting
+ self.using = using
+
+ def __enter__(self):
+ self.entering(self.using)
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ self.exiting(exc_type, self.using)
+
+ def __call__(self, func):
+ @wraps(func)
+ def inner(*args, **kwargs):
+ with self:
+ return func(*args, **kwargs)
+ return inner
+
+def _transaction_func(entering, exiting, using):
+ """
+ Takes 3 things, an entering function (what to do to start this block of
+ transaction management), an exiting function (what to do to end it, on both
+ success and failure, and using which can be: None, indiciating using is
+ DEFAULT_DB_ALIAS, a callable, indicating that using is DEFAULT_DB_ALIAS and
+ to return the function already wrapped.
+
+ Returns either a Transaction objects, which is both a decorator and a
+ context manager, or a wrapped function, if using is a callable.
+ """
+ # Note that although the first argument is *called* `using`, it
+ # may actually be a function; @autocommit and @autocommit('foo')
+ # are both allowed forms.
+ if using is None:
+ using = DEFAULT_DB_ALIAS
+ if callable(using):
+ return Transaction(entering, exiting, DEFAULT_DB_ALIAS)(using)
+ return Transaction(entering, exiting, using)
+
+
+def autocommit(using=None):
+ """
+ Decorator that activates commit on save. This is Django's default behavior;
+ this decorator is useful if you globally activated transaction management in
+ your settings file and want the default behavior in some view functions.
+ """
+ warnings.warn("autocommit is deprecated in favor of set_autocommit.",
+ PendingDeprecationWarning, stacklevel=2)
+
+ def entering(using):
+ enter_transaction_management(managed=False, using=using)
+
+ def exiting(exc_type, using):
+ leave_transaction_management(using=using)
+
+ return _transaction_func(entering, exiting, using)
+
+def commit_on_success(using=None):
+ """
+ This decorator activates commit on response. This way, if the view function
+ runs successfully, a commit is made; if the viewfunc produces an exception,
+ a rollback is made. This is one of the most common ways to do transaction
+ control in Web apps.
+ """
+ warnings.warn("commit_on_success is deprecated in favor of atomic.",
+ PendingDeprecationWarning, stacklevel=2)
+
+ def entering(using):
+ enter_transaction_management(using=using)
+
+ def exiting(exc_type, using):
+ try:
+ if exc_type is not None:
+ if is_dirty(using=using):
+ rollback(using=using)
+ else:
+ if is_dirty(using=using):
+ try:
+ commit(using=using)
+ except:
+ rollback(using=using)
+ raise
+ finally:
+ leave_transaction_management(using=using)
+
+ return _transaction_func(entering, exiting, using)
+
+def commit_manually(using=None):
+ """
+ Decorator that activates manual transaction control. It just disables
+ automatic transaction control and doesn't do any commit/rollback of its
+ own -- it's up to the user to call the commit and rollback functions
+ themselves.
+ """
+ warnings.warn("commit_manually is deprecated in favor of set_autocommit.",
+ PendingDeprecationWarning, stacklevel=2)
+
+ def entering(using):
+ enter_transaction_management(using=using)
+
+ def exiting(exc_type, using):
+ leave_transaction_management(using=using)
+
+ return _transaction_func(entering, exiting, using)
+
+def commit_on_success_unless_managed(using=None, savepoint=False):
+ """
+ Transitory API to preserve backwards-compatibility while refactoring.
+
+ Once the legacy transaction management is fully deprecated, this should
+ simply be replaced by atomic. Until then, it's necessary to guarantee that
+ a commit occurs on exit, which atomic doesn't do when it's nested.
+
+ Unlike atomic, savepoint defaults to False because that's closer to the
+ legacy behavior.
+ """
+ connection = get_connection(using)
+ if connection.get_autocommit() or connection.in_atomic_block:
+ return atomic(using, savepoint)
+ else:
+ def entering(using):
+ pass
+
+ def exiting(exc_type, using):
+ set_dirty(using=using)
+
+ return _transaction_func(entering, exiting, using)
diff --git a/lib/python2.7/site-packages/django/db/utils.py b/lib/python2.7/site-packages/django/db/utils.py
new file mode 100644
index 0000000..eb5708c
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/utils.py
@@ -0,0 +1,282 @@
+from functools import wraps
+import os
+import pkgutil
+from threading import local
+import warnings
+
+from django.conf import settings
+from django.core.exceptions import ImproperlyConfigured
+from django.utils.functional import cached_property
+from django.utils.importlib import import_module
+from django.utils.module_loading import import_by_path
+from django.utils._os import upath
+from django.utils import six
+
+
+DEFAULT_DB_ALIAS = 'default'
+
+
+class Error(Exception if six.PY3 else StandardError):
+ pass
+
+
+class InterfaceError(Error):
+ pass
+
+
+class DatabaseError(Error):
+ pass
+
+
+class DataError(DatabaseError):
+ pass
+
+
+class OperationalError(DatabaseError):
+ pass
+
+
+class IntegrityError(DatabaseError):
+ pass
+
+
+class InternalError(DatabaseError):
+ pass
+
+
+class ProgrammingError(DatabaseError):
+ pass
+
+
+class NotSupportedError(DatabaseError):
+ pass
+
+
+class DatabaseErrorWrapper(object):
+ """
+ Context manager and decorator that re-throws backend-specific database
+ exceptions using Django's common wrappers.
+ """
+
+ def __init__(self, wrapper):
+ """
+ wrapper is a database wrapper.
+
+ It must have a Database attribute defining PEP-249 exceptions.
+ """
+ self.wrapper = wrapper
+
+ def __enter__(self):
+ pass
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ if exc_type is None:
+ return
+ for dj_exc_type in (
+ DataError,
+ OperationalError,
+ IntegrityError,
+ InternalError,
+ ProgrammingError,
+ NotSupportedError,
+ DatabaseError,
+ InterfaceError,
+ Error,
+ ):
+ db_exc_type = getattr(self.wrapper.Database, dj_exc_type.__name__)
+ if issubclass(exc_type, db_exc_type):
+ # Under Python 2.6, exc_value can still be a string.
+ try:
+ args = tuple(exc_value.args)
+ except AttributeError:
+ args = (exc_value,)
+ dj_exc_value = dj_exc_type(*args)
+ dj_exc_value.__cause__ = exc_value
+ # Only set the 'errors_occurred' flag for errors that may make
+ # the connection unusable.
+ if dj_exc_type not in (DataError, IntegrityError):
+ self.wrapper.errors_occurred = True
+ six.reraise(dj_exc_type, dj_exc_value, traceback)
+
+ def __call__(self, func):
+ # Note that we are intentionally not using @wraps here for performance
+ # reasons. Refs #21109.
+ def inner(*args, **kwargs):
+ with self:
+ return func(*args, **kwargs)
+ return inner
+
+
+def load_backend(backend_name):
+ # Look for a fully qualified database backend name
+ try:
+ return import_module('%s.base' % backend_name)
+ except ImportError as e_user:
+ # The database backend wasn't found. Display a helpful error message
+ # listing all possible (built-in) database backends.
+ backend_dir = os.path.join(os.path.dirname(upath(__file__)), 'backends')
+ try:
+ builtin_backends = [
+ name for _, name, ispkg in pkgutil.iter_modules([backend_dir])
+ if ispkg and name != 'dummy']
+ except EnvironmentError:
+ builtin_backends = []
+ if backend_name not in ['django.db.backends.%s' % b for b in
+ builtin_backends]:
+ backend_reprs = map(repr, sorted(builtin_backends))
+ error_msg = ("%r isn't an available database backend.\n"
+ "Try using 'django.db.backends.XXX', where XXX "
+ "is one of:\n %s\nError was: %s" %
+ (backend_name, ", ".join(backend_reprs), e_user))
+ raise ImproperlyConfigured(error_msg)
+ else:
+ # If there's some other error, this must be an error in Django
+ raise
+
+
+class ConnectionDoesNotExist(Exception):
+ pass
+
+
+class ConnectionHandler(object):
+ def __init__(self, databases=None):
+ """
+ databases is an optional dictionary of database definitions (structured
+ like settings.DATABASES).
+ """
+ self._databases = databases
+ self._connections = local()
+
+ @cached_property
+ def databases(self):
+ if self._databases is None:
+ self._databases = settings.DATABASES
+ if self._databases == {}:
+ self._databases = {
+ DEFAULT_DB_ALIAS: {
+ 'ENGINE': 'django.db.backends.dummy',
+ },
+ }
+ if DEFAULT_DB_ALIAS not in self._databases:
+ raise ImproperlyConfigured("You must define a '%s' database" % DEFAULT_DB_ALIAS)
+ return self._databases
+
+ def ensure_defaults(self, alias):
+ """
+ Puts the defaults into the settings dictionary for a given connection
+ where no settings is provided.
+ """
+ try:
+ conn = self.databases[alias]
+ except KeyError:
+ raise ConnectionDoesNotExist("The connection %s doesn't exist" % alias)
+
+ conn.setdefault('ATOMIC_REQUESTS', False)
+ if settings.TRANSACTIONS_MANAGED:
+ warnings.warn(
+ "TRANSACTIONS_MANAGED is deprecated. Use AUTOCOMMIT instead.",
+ PendingDeprecationWarning, stacklevel=2)
+ conn.setdefault('AUTOCOMMIT', False)
+ conn.setdefault('AUTOCOMMIT', True)
+ conn.setdefault('ENGINE', 'django.db.backends.dummy')
+ if conn['ENGINE'] == 'django.db.backends.' or not conn['ENGINE']:
+ conn['ENGINE'] = 'django.db.backends.dummy'
+ conn.setdefault('CONN_MAX_AGE', 0)
+ conn.setdefault('OPTIONS', {})
+ conn.setdefault('TIME_ZONE', 'UTC' if settings.USE_TZ else settings.TIME_ZONE)
+ for setting in ['NAME', 'USER', 'PASSWORD', 'HOST', 'PORT']:
+ conn.setdefault(setting, '')
+ for setting in ['TEST_CHARSET', 'TEST_COLLATION', 'TEST_NAME', 'TEST_MIRROR']:
+ conn.setdefault(setting, None)
+
+ def __getitem__(self, alias):
+ if hasattr(self._connections, alias):
+ return getattr(self._connections, alias)
+
+ self.ensure_defaults(alias)
+ db = self.databases[alias]
+ backend = load_backend(db['ENGINE'])
+ conn = backend.DatabaseWrapper(db, alias)
+ setattr(self._connections, alias, conn)
+ return conn
+
+ def __setitem__(self, key, value):
+ setattr(self._connections, key, value)
+
+ def __delitem__(self, key):
+ delattr(self._connections, key)
+
+ def __iter__(self):
+ return iter(self.databases)
+
+ def all(self):
+ return [self[alias] for alias in self]
+
+
+class ConnectionRouter(object):
+ def __init__(self, routers=None):
+ """
+ If routers is not specified, will default to settings.DATABASE_ROUTERS.
+ """
+ self._routers = routers
+
+ @cached_property
+ def routers(self):
+ if self._routers is None:
+ self._routers = settings.DATABASE_ROUTERS
+ routers = []
+ for r in self._routers:
+ if isinstance(r, six.string_types):
+ router = import_by_path(r)()
+ else:
+ router = r
+ routers.append(router)
+ return routers
+
+ def _router_func(action):
+ def _route_db(self, model, **hints):
+ chosen_db = None
+ for router in self.routers:
+ try:
+ method = getattr(router, action)
+ except AttributeError:
+ # If the router doesn't have a method, skip to the next one.
+ pass
+ else:
+ chosen_db = method(model, **hints)
+ if chosen_db:
+ return chosen_db
+ try:
+ return hints['instance']._state.db or DEFAULT_DB_ALIAS
+ except KeyError:
+ return DEFAULT_DB_ALIAS
+ return _route_db
+
+ db_for_read = _router_func('db_for_read')
+ db_for_write = _router_func('db_for_write')
+
+ def allow_relation(self, obj1, obj2, **hints):
+ for router in self.routers:
+ try:
+ method = router.allow_relation
+ except AttributeError:
+ # If the router doesn't have a method, skip to the next one.
+ pass
+ else:
+ allow = method(obj1, obj2, **hints)
+ if allow is not None:
+ return allow
+ return obj1._state.db == obj2._state.db
+
+ def allow_syncdb(self, db, model):
+ for router in self.routers:
+ try:
+ method = router.allow_syncdb
+ except AttributeError:
+ # If the router doesn't have a method, skip to the next one.
+ pass
+ else:
+ allow = method(db, model)
+ if allow is not None:
+ return allow
+ return True