summaryrefslogtreecommitdiff
path: root/lib/python2.7/site-packages/django/db/models/sql
diff options
context:
space:
mode:
Diffstat (limited to 'lib/python2.7/site-packages/django/db/models/sql')
-rw-r--r--lib/python2.7/site-packages/django/db/models/sql/__init__.py9
-rw-r--r--lib/python2.7/site-packages/django/db/models/sql/aggregates.py125
-rw-r--r--lib/python2.7/site-packages/django/db/models/sql/compiler.py1128
-rw-r--r--lib/python2.7/site-packages/django/db/models/sql/constants.py41
-rw-r--r--lib/python2.7/site-packages/django/db/models/sql/datastructures.py62
-rw-r--r--lib/python2.7/site-packages/django/db/models/sql/expressions.py117
-rw-r--r--lib/python2.7/site-packages/django/db/models/sql/query.py1922
-rw-r--r--lib/python2.7/site-packages/django/db/models/sql/subqueries.py297
-rw-r--r--lib/python2.7/site-packages/django/db/models/sql/where.py419
9 files changed, 4120 insertions, 0 deletions
diff --git a/lib/python2.7/site-packages/django/db/models/sql/__init__.py b/lib/python2.7/site-packages/django/db/models/sql/__init__.py
new file mode 100644
index 0000000..df5b74e
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/models/sql/__init__.py
@@ -0,0 +1,9 @@
+from __future__ import absolute_import
+
+from django.db.models.sql.datastructures import EmptyResultSet
+from django.db.models.sql.subqueries import *
+from django.db.models.sql.query import *
+from django.db.models.sql.where import AND, OR
+
+
+__all__ = ['Query', 'AND', 'OR', 'EmptyResultSet']
diff --git a/lib/python2.7/site-packages/django/db/models/sql/aggregates.py b/lib/python2.7/site-packages/django/db/models/sql/aggregates.py
new file mode 100644
index 0000000..2bd2b2f
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/models/sql/aggregates.py
@@ -0,0 +1,125 @@
+"""
+Classes to represent the default SQL aggregate functions
+"""
+import copy
+
+from django.db.models.fields import IntegerField, FloatField
+
+# Fake fields used to identify aggregate types in data-conversion operations.
+ordinal_aggregate_field = IntegerField()
+computed_aggregate_field = FloatField()
+
+class Aggregate(object):
+ """
+ Default SQL Aggregate.
+ """
+ is_ordinal = False
+ is_computed = False
+ sql_template = '%(function)s(%(field)s)'
+
+ def __init__(self, col, source=None, is_summary=False, **extra):
+ """Instantiate an SQL aggregate
+
+ * col is a column reference describing the subject field
+ of the aggregate. It can be an alias, or a tuple describing
+ a table and column name.
+ * source is the underlying field or aggregate definition for
+ the column reference. If the aggregate is not an ordinal or
+ computed type, this reference is used to determine the coerced
+ output type of the aggregate.
+ * extra is a dictionary of additional data to provide for the
+ aggregate definition
+
+ Also utilizes the class variables:
+ * sql_function, the name of the SQL function that implements the
+ aggregate.
+ * sql_template, a template string that is used to render the
+ aggregate into SQL.
+ * is_ordinal, a boolean indicating if the output of this aggregate
+ is an integer (e.g., a count)
+ * is_computed, a boolean indicating if this output of this aggregate
+ is a computed float (e.g., an average), regardless of the input
+ type.
+
+ """
+ self.col = col
+ self.source = source
+ self.is_summary = is_summary
+ self.extra = extra
+
+ # Follow the chain of aggregate sources back until you find an
+ # actual field, or an aggregate that forces a particular output
+ # type. This type of this field will be used to coerce values
+ # retrieved from the database.
+ tmp = self
+
+ while tmp and isinstance(tmp, Aggregate):
+ if getattr(tmp, 'is_ordinal', False):
+ tmp = ordinal_aggregate_field
+ elif getattr(tmp, 'is_computed', False):
+ tmp = computed_aggregate_field
+ else:
+ tmp = tmp.source
+
+ self.field = tmp
+
+ def relabeled_clone(self, change_map):
+ clone = copy.copy(self)
+ if isinstance(self.col, (list, tuple)):
+ clone.col = (change_map.get(self.col[0], self.col[0]), self.col[1])
+ return clone
+
+ def as_sql(self, qn, connection):
+ "Return the aggregate, rendered as SQL with parameters."
+ params = []
+
+ if hasattr(self.col, 'as_sql'):
+ field_name, params = self.col.as_sql(qn, connection)
+ elif isinstance(self.col, (list, tuple)):
+ field_name = '.'.join([qn(c) for c in self.col])
+ else:
+ field_name = self.col
+
+ substitutions = {
+ 'function': self.sql_function,
+ 'field': field_name
+ }
+ substitutions.update(self.extra)
+
+ return self.sql_template % substitutions, params
+
+
+class Avg(Aggregate):
+ is_computed = True
+ sql_function = 'AVG'
+
+class Count(Aggregate):
+ is_ordinal = True
+ sql_function = 'COUNT'
+ sql_template = '%(function)s(%(distinct)s%(field)s)'
+
+ def __init__(self, col, distinct=False, **extra):
+ super(Count, self).__init__(col, distinct='DISTINCT ' if distinct else '', **extra)
+
+class Max(Aggregate):
+ sql_function = 'MAX'
+
+class Min(Aggregate):
+ sql_function = 'MIN'
+
+class StdDev(Aggregate):
+ is_computed = True
+
+ def __init__(self, col, sample=False, **extra):
+ super(StdDev, self).__init__(col, **extra)
+ self.sql_function = 'STDDEV_SAMP' if sample else 'STDDEV_POP'
+
+class Sum(Aggregate):
+ sql_function = 'SUM'
+
+class Variance(Aggregate):
+ is_computed = True
+
+ def __init__(self, col, sample=False, **extra):
+ super(Variance, self).__init__(col, **extra)
+ self.sql_function = 'VAR_SAMP' if sample else 'VAR_POP'
diff --git a/lib/python2.7/site-packages/django/db/models/sql/compiler.py b/lib/python2.7/site-packages/django/db/models/sql/compiler.py
new file mode 100644
index 0000000..ea7f9f4
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/models/sql/compiler.py
@@ -0,0 +1,1128 @@
+import datetime
+
+from django.conf import settings
+from django.core.exceptions import FieldError
+from django.db.backends.util import truncate_name
+from django.db.models.constants import LOOKUP_SEP
+from django.db.models.query_utils import select_related_descend, QueryWrapper
+from django.db.models.sql.constants import (SINGLE, MULTI, ORDER_DIR,
+ GET_ITERATOR_CHUNK_SIZE, SelectInfo)
+from django.db.models.sql.datastructures import EmptyResultSet
+from django.db.models.sql.expressions import SQLEvaluator
+from django.db.models.sql.query import get_order_dir, Query
+from django.db.transaction import TransactionManagementError
+from django.db.utils import DatabaseError
+from django.utils import six
+from django.utils.six.moves import zip
+from django.utils import timezone
+
+
+class SQLCompiler(object):
+ def __init__(self, query, connection, using):
+ self.query = query
+ self.connection = connection
+ self.using = using
+ self.quote_cache = {}
+ # When ordering a queryset with distinct on a column not part of the
+ # select set, the ordering column needs to be added to the select
+ # clause. This information is needed both in SQL construction and
+ # masking away the ordering selects from the returned row.
+ self.ordering_aliases = []
+ self.ordering_params = []
+
+ def pre_sql_setup(self):
+ """
+ Does any necessary class setup immediately prior to producing SQL. This
+ is for things that can't necessarily be done in __init__ because we
+ might not have all the pieces in place at that time.
+ # TODO: after the query has been executed, the altered state should be
+ # cleaned. We are not using a clone() of the query here.
+ """
+ if not self.query.tables:
+ self.query.join((None, self.query.get_meta().db_table, None))
+ if (not self.query.select and self.query.default_cols and not
+ self.query.included_inherited_models):
+ self.query.setup_inherited_models()
+ if self.query.select_related and not self.query.related_select_cols:
+ self.fill_related_selections()
+
+ def quote_name_unless_alias(self, name):
+ """
+ A wrapper around connection.ops.quote_name that doesn't quote aliases
+ for table names. This avoids problems with some SQL dialects that treat
+ quoted strings specially (e.g. PostgreSQL).
+ """
+ if name in self.quote_cache:
+ return self.quote_cache[name]
+ if ((name in self.query.alias_map and name not in self.query.table_map) or
+ name in self.query.extra_select):
+ self.quote_cache[name] = name
+ return name
+ r = self.connection.ops.quote_name(name)
+ self.quote_cache[name] = r
+ return r
+
+ def as_sql(self, with_limits=True, with_col_aliases=False):
+ """
+ Creates the SQL for this query. Returns the SQL string and list of
+ parameters.
+
+ If 'with_limits' is False, any limit/offset information is not included
+ in the query.
+ """
+ if with_limits and self.query.low_mark == self.query.high_mark:
+ return '', ()
+
+ self.pre_sql_setup()
+ # After executing the query, we must get rid of any joins the query
+ # setup created. So, take note of alias counts before the query ran.
+ # However we do not want to get rid of stuff done in pre_sql_setup(),
+ # as the pre_sql_setup will modify query state in a way that forbids
+ # another run of it.
+ self.refcounts_before = self.query.alias_refcount.copy()
+ out_cols, s_params = self.get_columns(with_col_aliases)
+ ordering, o_params, ordering_group_by = self.get_ordering()
+
+ distinct_fields = self.get_distinct()
+
+ # This must come after 'select', 'ordering' and 'distinct' -- see
+ # docstring of get_from_clause() for details.
+ from_, f_params = self.get_from_clause()
+
+ qn = self.quote_name_unless_alias
+
+ where, w_params = self.query.where.as_sql(qn=qn, connection=self.connection)
+ having, h_params = self.query.having.as_sql(qn=qn, connection=self.connection)
+ having_group_by = self.query.having.get_cols()
+ params = []
+ for val in six.itervalues(self.query.extra_select):
+ params.extend(val[1])
+
+ result = ['SELECT']
+
+ if self.query.distinct:
+ result.append(self.connection.ops.distinct_sql(distinct_fields))
+ params.extend(o_params)
+ result.append(', '.join(out_cols + self.ordering_aliases))
+ params.extend(s_params)
+ params.extend(self.ordering_params)
+
+ result.append('FROM')
+ result.extend(from_)
+ params.extend(f_params)
+
+ if where:
+ result.append('WHERE %s' % where)
+ params.extend(w_params)
+
+ grouping, gb_params = self.get_grouping(having_group_by, ordering_group_by)
+ if grouping:
+ if distinct_fields:
+ raise NotImplementedError(
+ "annotate() + distinct(fields) not implemented.")
+ if not ordering:
+ ordering = self.connection.ops.force_no_ordering()
+ result.append('GROUP BY %s' % ', '.join(grouping))
+ params.extend(gb_params)
+
+ if having:
+ result.append('HAVING %s' % having)
+ params.extend(h_params)
+
+ if ordering:
+ result.append('ORDER BY %s' % ', '.join(ordering))
+
+ if with_limits:
+ if self.query.high_mark is not None:
+ result.append('LIMIT %d' % (self.query.high_mark - self.query.low_mark))
+ if self.query.low_mark:
+ if self.query.high_mark is None:
+ val = self.connection.ops.no_limit_value()
+ if val:
+ result.append('LIMIT %d' % val)
+ result.append('OFFSET %d' % self.query.low_mark)
+
+ if self.query.select_for_update and self.connection.features.has_select_for_update:
+ if self.connection.get_autocommit():
+ raise TransactionManagementError("select_for_update cannot be used outside of a transaction.")
+
+ # If we've been asked for a NOWAIT query but the backend does not support it,
+ # raise a DatabaseError otherwise we could get an unexpected deadlock.
+ nowait = self.query.select_for_update_nowait
+ if nowait and not self.connection.features.has_select_for_update_nowait:
+ raise DatabaseError('NOWAIT is not supported on this database backend.')
+ result.append(self.connection.ops.for_update_sql(nowait=nowait))
+
+ # Finally do cleanup - get rid of the joins we created above.
+ self.query.reset_refcounts(self.refcounts_before)
+
+ return ' '.join(result), tuple(params)
+
+ def as_nested_sql(self):
+ """
+ Perform the same functionality as the as_sql() method, returning an
+ SQL string and parameters. However, the alias prefixes are bumped
+ beforehand (in a copy -- the current query isn't changed), and any
+ ordering is removed if the query is unsliced.
+
+ Used when nesting this query inside another.
+ """
+ obj = self.query.clone()
+ if obj.low_mark == 0 and obj.high_mark is None:
+ # If there is no slicing in use, then we can safely drop all ordering
+ obj.clear_ordering(True)
+ obj.bump_prefix()
+ return obj.get_compiler(connection=self.connection).as_sql()
+
+ def get_columns(self, with_aliases=False):
+ """
+ Returns the list of columns to use in the select statement, as well as
+ a list any extra parameters that need to be included. If no columns
+ have been specified, returns all columns relating to fields in the
+ model.
+
+ If 'with_aliases' is true, any column names that are duplicated
+ (without the table names) are given unique aliases. This is needed in
+ some cases to avoid ambiguity with nested queries.
+ """
+ qn = self.quote_name_unless_alias
+ qn2 = self.connection.ops.quote_name
+ result = ['(%s) AS %s' % (col[0], qn2(alias)) for alias, col in six.iteritems(self.query.extra_select)]
+ params = []
+ aliases = set(self.query.extra_select.keys())
+ if with_aliases:
+ col_aliases = aliases.copy()
+ else:
+ col_aliases = set()
+ if self.query.select:
+ only_load = self.deferred_to_columns()
+ for col, _ in self.query.select:
+ if isinstance(col, (list, tuple)):
+ alias, column = col
+ table = self.query.alias_map[alias].table_name
+ if table in only_load and column not in only_load[table]:
+ continue
+ r = '%s.%s' % (qn(alias), qn(column))
+ if with_aliases:
+ if col[1] in col_aliases:
+ c_alias = 'Col%d' % len(col_aliases)
+ result.append('%s AS %s' % (r, c_alias))
+ aliases.add(c_alias)
+ col_aliases.add(c_alias)
+ else:
+ result.append('%s AS %s' % (r, qn2(col[1])))
+ aliases.add(r)
+ col_aliases.add(col[1])
+ else:
+ result.append(r)
+ aliases.add(r)
+ col_aliases.add(col[1])
+ else:
+ col_sql, col_params = col.as_sql(qn, self.connection)
+ result.append(col_sql)
+ params.extend(col_params)
+
+ if hasattr(col, 'alias'):
+ aliases.add(col.alias)
+ col_aliases.add(col.alias)
+
+ elif self.query.default_cols:
+ cols, new_aliases = self.get_default_columns(with_aliases,
+ col_aliases)
+ result.extend(cols)
+ aliases.update(new_aliases)
+
+ max_name_length = self.connection.ops.max_name_length()
+ for alias, aggregate in self.query.aggregate_select.items():
+ agg_sql, agg_params = aggregate.as_sql(qn, self.connection)
+ if alias is None:
+ result.append(agg_sql)
+ else:
+ result.append('%s AS %s' % (agg_sql, qn(truncate_name(alias, max_name_length))))
+ params.extend(agg_params)
+
+ for (table, col), _ in self.query.related_select_cols:
+ r = '%s.%s' % (qn(table), qn(col))
+ if with_aliases and col in col_aliases:
+ c_alias = 'Col%d' % len(col_aliases)
+ result.append('%s AS %s' % (r, c_alias))
+ aliases.add(c_alias)
+ col_aliases.add(c_alias)
+ else:
+ result.append(r)
+ aliases.add(r)
+ col_aliases.add(col)
+
+ self._select_aliases = aliases
+ return result, params
+
+ def get_default_columns(self, with_aliases=False, col_aliases=None,
+ start_alias=None, opts=None, as_pairs=False, from_parent=None):
+ """
+ Computes the default columns for selecting every field in the base
+ model. Will sometimes be called to pull in related models (e.g. via
+ select_related), in which case "opts" and "start_alias" will be given
+ to provide a starting point for the traversal.
+
+ Returns a list of strings, quoted appropriately for use in SQL
+ directly, as well as a set of aliases used in the select statement (if
+ 'as_pairs' is True, returns a list of (alias, col_name) pairs instead
+ of strings as the first component and None as the second component).
+ """
+ result = []
+ if opts is None:
+ opts = self.query.get_meta()
+ qn = self.quote_name_unless_alias
+ qn2 = self.connection.ops.quote_name
+ aliases = set()
+ only_load = self.deferred_to_columns()
+ if not start_alias:
+ start_alias = self.query.get_initial_alias()
+ # The 'seen_models' is used to optimize checking the needed parent
+ # alias for a given field. This also includes None -> start_alias to
+ # be used by local fields.
+ seen_models = {None: start_alias}
+
+ for field, model in opts.get_concrete_fields_with_model():
+ if from_parent and model is not None and issubclass(from_parent, model):
+ # Avoid loading data for already loaded parents.
+ continue
+ alias = self.query.join_parent_model(opts, model, start_alias,
+ seen_models)
+ table = self.query.alias_map[alias].table_name
+ if table in only_load and field.column not in only_load[table]:
+ continue
+ if as_pairs:
+ result.append((alias, field))
+ aliases.add(alias)
+ continue
+ if with_aliases and field.column in col_aliases:
+ c_alias = 'Col%d' % len(col_aliases)
+ result.append('%s.%s AS %s' % (qn(alias),
+ qn2(field.column), c_alias))
+ col_aliases.add(c_alias)
+ aliases.add(c_alias)
+ else:
+ r = '%s.%s' % (qn(alias), qn2(field.column))
+ result.append(r)
+ aliases.add(r)
+ if with_aliases:
+ col_aliases.add(field.column)
+ return result, aliases
+
+ def get_distinct(self):
+ """
+ Returns a quoted list of fields to use in DISTINCT ON part of the query.
+
+ Note that this method can alter the tables in the query, and thus it
+ must be called before get_from_clause().
+ """
+ qn = self.quote_name_unless_alias
+ qn2 = self.connection.ops.quote_name
+ result = []
+ opts = self.query.get_meta()
+
+ for name in self.query.distinct_fields:
+ parts = name.split(LOOKUP_SEP)
+ field, cols, alias, _, _ = self._setup_joins(parts, opts, None)
+ cols, alias = self._final_join_removal(cols, alias)
+ for col in cols:
+ result.append("%s.%s" % (qn(alias), qn2(col)))
+ return result
+
+ def get_ordering(self):
+ """
+ Returns a tuple containing a list representing the SQL elements in the
+ "order by" clause, and the list of SQL elements that need to be added
+ to the GROUP BY clause as a result of the ordering.
+
+ Also sets the ordering_aliases attribute on this instance to a list of
+ extra aliases needed in the select.
+
+ Determining the ordering SQL can change the tables we need to include,
+ so this should be run *before* get_from_clause().
+ """
+ if self.query.extra_order_by:
+ ordering = self.query.extra_order_by
+ elif not self.query.default_ordering:
+ ordering = self.query.order_by
+ else:
+ ordering = (self.query.order_by
+ or self.query.get_meta().ordering
+ or [])
+ qn = self.quote_name_unless_alias
+ qn2 = self.connection.ops.quote_name
+ distinct = self.query.distinct
+ select_aliases = self._select_aliases
+ result = []
+ group_by = []
+ ordering_aliases = []
+ if self.query.standard_ordering:
+ asc, desc = ORDER_DIR['ASC']
+ else:
+ asc, desc = ORDER_DIR['DESC']
+
+ # It's possible, due to model inheritance, that normal usage might try
+ # to include the same field more than once in the ordering. We track
+ # the table/column pairs we use and discard any after the first use.
+ processed_pairs = set()
+
+ params = []
+ ordering_params = []
+ for pos, field in enumerate(ordering):
+ if field == '?':
+ result.append(self.connection.ops.random_function_sql())
+ continue
+ if isinstance(field, int):
+ if field < 0:
+ order = desc
+ field = -field
+ else:
+ order = asc
+ result.append('%s %s' % (field, order))
+ group_by.append((str(field), []))
+ continue
+ col, order = get_order_dir(field, asc)
+ if col in self.query.aggregate_select:
+ result.append('%s %s' % (qn(col), order))
+ continue
+ if '.' in field:
+ # This came in through an extra(order_by=...) addition. Pass it
+ # on verbatim.
+ table, col = col.split('.', 1)
+ if (table, col) not in processed_pairs:
+ elt = '%s.%s' % (qn(table), col)
+ processed_pairs.add((table, col))
+ if not distinct or elt in select_aliases:
+ result.append('%s %s' % (elt, order))
+ group_by.append((elt, []))
+ elif get_order_dir(field)[0] not in self.query.extra:
+ # 'col' is of the form 'field' or 'field1__field2' or
+ # '-field1__field2__field', etc.
+ for table, cols, order in self.find_ordering_name(field,
+ self.query.get_meta(), default_order=asc):
+ for col in cols:
+ if (table, col) not in processed_pairs:
+ elt = '%s.%s' % (qn(table), qn2(col))
+ processed_pairs.add((table, col))
+ if distinct and elt not in select_aliases:
+ ordering_aliases.append(elt)
+ result.append('%s %s' % (elt, order))
+ group_by.append((elt, []))
+ else:
+ elt = qn2(col)
+ if col not in self.query.extra_select:
+ sql = "(%s) AS %s" % (self.query.extra[col][0], elt)
+ ordering_aliases.append(sql)
+ ordering_params.extend(self.query.extra[col][1])
+ else:
+ if distinct and col not in select_aliases:
+ ordering_aliases.append(elt)
+ ordering_params.extend(params)
+ result.append('%s %s' % (elt, order))
+ group_by.append(self.query.extra[col])
+ self.ordering_aliases = ordering_aliases
+ self.ordering_params = ordering_params
+ return result, params, group_by
+
+ def find_ordering_name(self, name, opts, alias=None, default_order='ASC',
+ already_seen=None):
+ """
+ Returns the table alias (the name might be ambiguous, the alias will
+ not be) and column name for ordering by the given 'name' parameter.
+ The 'name' is of the form 'field1__field2__...__fieldN'.
+ """
+ name, order = get_order_dir(name, default_order)
+ pieces = name.split(LOOKUP_SEP)
+ field, cols, alias, joins, opts = self._setup_joins(pieces, opts, alias)
+
+ # If we get to this point and the field is a relation to another model,
+ # append the default ordering for that model.
+ if field.rel and len(joins) > 1 and opts.ordering:
+ # Firstly, avoid infinite loops.
+ if not already_seen:
+ already_seen = set()
+ join_tuple = tuple([self.query.alias_map[j].table_name for j in joins])
+ if join_tuple in already_seen:
+ raise FieldError('Infinite loop caused by ordering.')
+ already_seen.add(join_tuple)
+
+ results = []
+ for item in opts.ordering:
+ results.extend(self.find_ordering_name(item, opts, alias,
+ order, already_seen))
+ return results
+ cols, alias = self._final_join_removal(cols, alias)
+ return [(alias, cols, order)]
+
+ def _setup_joins(self, pieces, opts, alias):
+ """
+ A helper method for get_ordering and get_distinct. This method will
+ call query.setup_joins, handle refcounts and then promote the joins.
+
+ Note that get_ordering and get_distinct must produce same target
+ columns on same input, as the prefixes of get_ordering and get_distinct
+ must match. Executing SQL where this is not true is an error.
+ """
+ if not alias:
+ alias = self.query.get_initial_alias()
+ field, targets, opts, joins, _ = self.query.setup_joins(
+ pieces, opts, alias)
+ # We will later on need to promote those joins that were added to the
+ # query afresh above.
+ joins_to_promote = [j for j in joins if self.query.alias_refcount[j] < 2]
+ alias = joins[-1]
+ cols = [target.column for target in targets]
+ if not field.rel:
+ # To avoid inadvertent trimming of a necessary alias, use the
+ # refcount to show that we are referencing a non-relation field on
+ # the model.
+ self.query.ref_alias(alias)
+
+ # Must use left outer joins for nullable fields and their relations.
+ # Ordering or distinct must not affect the returned set, and INNER
+ # JOINS for nullable fields could do this.
+ self.query.promote_joins(joins_to_promote)
+ return field, cols, alias, joins, opts
+
+ def _final_join_removal(self, cols, alias):
+ """
+ A helper method for get_distinct and get_ordering. This method will
+ trim extra not-needed joins from the tail of the join chain.
+
+ This is very similar to what is done in trim_joins, but we will
+ trim LEFT JOINS here. It would be a good idea to consolidate this
+ method and query.trim_joins().
+ """
+ if alias:
+ while 1:
+ join = self.query.alias_map[alias]
+ lhs_cols, rhs_cols = zip(*[(lhs_col, rhs_col) for lhs_col, rhs_col in join.join_cols])
+ if set(cols) != set(rhs_cols):
+ break
+
+ cols = [lhs_cols[rhs_cols.index(col)] for col in cols]
+ self.query.unref_alias(alias)
+ alias = join.lhs_alias
+ return cols, alias
+
+ def get_from_clause(self):
+ """
+ Returns a list of strings that are joined together to go after the
+ "FROM" part of the query, as well as a list any extra parameters that
+ need to be included. Sub-classes, can override this to create a
+ from-clause via a "select".
+
+ This should only be called after any SQL construction methods that
+ might change the tables we need. This means the select columns,
+ ordering and distinct must be done first.
+ """
+ result = []
+ qn = self.quote_name_unless_alias
+ qn2 = self.connection.ops.quote_name
+ first = True
+ from_params = []
+ for alias in self.query.tables:
+ if not self.query.alias_refcount[alias]:
+ continue
+ try:
+ name, alias, join_type, lhs, join_cols, _, join_field = self.query.alias_map[alias]
+ except KeyError:
+ # Extra tables can end up in self.tables, but not in the
+ # alias_map if they aren't in a join. That's OK. We skip them.
+ continue
+ alias_str = '' if alias == name else (' %s' % alias)
+ if join_type and not first:
+ extra_cond = join_field.get_extra_restriction(
+ self.query.where_class, alias, lhs)
+ if extra_cond:
+ extra_sql, extra_params = extra_cond.as_sql(
+ qn, self.connection)
+ extra_sql = 'AND (%s)' % extra_sql
+ from_params.extend(extra_params)
+ else:
+ extra_sql = ""
+ result.append('%s %s%s ON ('
+ % (join_type, qn(name), alias_str))
+ for index, (lhs_col, rhs_col) in enumerate(join_cols):
+ if index != 0:
+ result.append(' AND ')
+ result.append('%s.%s = %s.%s' %
+ (qn(lhs), qn2(lhs_col), qn(alias), qn2(rhs_col)))
+ result.append('%s)' % extra_sql)
+ else:
+ connector = '' if first else ', '
+ result.append('%s%s%s' % (connector, qn(name), alias_str))
+ first = False
+ for t in self.query.extra_tables:
+ alias, unused = self.query.table_alias(t)
+ # Only add the alias if it's not already present (the table_alias()
+ # calls increments the refcount, so an alias refcount of one means
+ # this is the only reference.
+ if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1:
+ connector = '' if first else ', '
+ result.append('%s%s' % (connector, qn(alias)))
+ first = False
+ return result, from_params
+
+ def get_grouping(self, having_group_by, ordering_group_by):
+ """
+ Returns a tuple representing the SQL elements in the "group by" clause.
+ """
+ qn = self.quote_name_unless_alias
+ result, params = [], []
+ if self.query.group_by is not None:
+ select_cols = self.query.select + self.query.related_select_cols
+ # Just the column, not the fields.
+ select_cols = [s[0] for s in select_cols]
+ if (len(self.query.get_meta().concrete_fields) == len(self.query.select)
+ and self.connection.features.allows_group_by_pk):
+ self.query.group_by = [
+ (self.query.get_meta().db_table, self.query.get_meta().pk.column)
+ ]
+ select_cols = []
+ seen = set()
+ cols = self.query.group_by + having_group_by + select_cols
+ for col in cols:
+ col_params = ()
+ if isinstance(col, (list, tuple)):
+ sql = '%s.%s' % (qn(col[0]), qn(col[1]))
+ elif hasattr(col, 'as_sql'):
+ sql, col_params = col.as_sql(qn, self.connection)
+ else:
+ sql = '(%s)' % str(col)
+ if sql not in seen:
+ result.append(sql)
+ params.extend(col_params)
+ seen.add(sql)
+
+ # Still, we need to add all stuff in ordering (except if the backend can
+ # group by just by PK).
+ if ordering_group_by and not self.connection.features.allows_group_by_pk:
+ for order, order_params in ordering_group_by:
+ # Even if we have seen the same SQL string, it might have
+ # different params, so, we add same SQL in "has params" case.
+ if order not in seen or order_params:
+ result.append(order)
+ params.extend(order_params)
+ seen.add(order)
+
+ # Unconditionally add the extra_select items.
+ for extra_select, extra_params in self.query.extra_select.values():
+ sql = '(%s)' % str(extra_select)
+ result.append(sql)
+ params.extend(extra_params)
+
+ return result, params
+
+ def fill_related_selections(self, opts=None, root_alias=None, cur_depth=1,
+ requested=None, restricted=None, nullable=None):
+ """
+ Fill in the information needed for a select_related query. The current
+ depth is measured as the number of connections away from the root model
+ (for example, cur_depth=1 means we are looking at models with direct
+ connections to the root model).
+ """
+ if not restricted and self.query.max_depth and cur_depth > self.query.max_depth:
+ # We've recursed far enough; bail out.
+ return
+
+ if not opts:
+ opts = self.query.get_meta()
+ root_alias = self.query.get_initial_alias()
+ self.query.related_select_cols = []
+ only_load = self.query.get_loaded_field_names()
+
+ # Setup for the case when only particular related fields should be
+ # included in the related selection.
+ if requested is None:
+ if isinstance(self.query.select_related, dict):
+ requested = self.query.select_related
+ restricted = True
+ else:
+ restricted = False
+
+ for f, model in opts.get_fields_with_model():
+ # The get_fields_with_model() returns None for fields that live
+ # in the field's local model. So, for those fields we want to use
+ # the f.model - that is the field's local model.
+ field_model = model or f.model
+ if not select_related_descend(f, restricted, requested,
+ only_load.get(field_model)):
+ continue
+ promote = nullable or f.null
+ _, _, _, joins, _ = self.query.setup_joins(
+ [f.name], opts, root_alias, outer_if_first=promote)
+ alias = joins[-1]
+ columns, _ = self.get_default_columns(start_alias=alias,
+ opts=f.rel.to._meta, as_pairs=True)
+ self.query.related_select_cols.extend(
+ SelectInfo((col[0], col[1].column), col[1]) for col in columns)
+ if restricted:
+ next = requested.get(f.name, {})
+ else:
+ next = False
+ new_nullable = f.null or promote
+ self.fill_related_selections(f.rel.to._meta, alias, cur_depth + 1,
+ next, restricted, new_nullable)
+
+ if restricted:
+ related_fields = [
+ (o.field, o.model)
+ for o in opts.get_all_related_objects()
+ if o.field.unique
+ ]
+ for f, model in related_fields:
+ if not select_related_descend(f, restricted, requested,
+ only_load.get(model), reverse=True):
+ continue
+
+ _, _, _, joins, _ = self.query.setup_joins(
+ [f.related_query_name()], opts, root_alias, outer_if_first=True)
+ alias = joins[-1]
+ from_parent = (opts.model if issubclass(model, opts.model)
+ else None)
+ columns, _ = self.get_default_columns(start_alias=alias,
+ opts=model._meta, as_pairs=True, from_parent=from_parent)
+ self.query.related_select_cols.extend(
+ SelectInfo((col[0], col[1].column), col[1]) for col in columns)
+ next = requested.get(f.related_query_name(), {})
+ # Use True here because we are looking at the _reverse_ side of
+ # the relation, which is always nullable.
+ new_nullable = True
+ self.fill_related_selections(model._meta, alias, cur_depth + 1,
+ next, restricted, new_nullable)
+
+ def deferred_to_columns(self):
+ """
+ Converts the self.deferred_loading data structure to mapping of table
+ names to sets of column names which are to be loaded. Returns the
+ dictionary.
+ """
+ columns = {}
+ self.query.deferred_to_data(columns, self.query.deferred_to_columns_cb)
+ return columns
+
+ def results_iter(self):
+ """
+ Returns an iterator over the results from executing this query.
+ """
+ resolve_columns = hasattr(self, 'resolve_columns')
+ fields = None
+ has_aggregate_select = bool(self.query.aggregate_select)
+ for rows in self.execute_sql(MULTI):
+ for row in rows:
+ if has_aggregate_select:
+ loaded_fields = self.query.get_loaded_field_names().get(self.query.model, set()) or self.query.select
+ aggregate_start = len(self.query.extra_select) + len(loaded_fields)
+ aggregate_end = aggregate_start + len(self.query.aggregate_select)
+ if resolve_columns:
+ if fields is None:
+ # We only set this up here because
+ # related_select_cols isn't populated until
+ # execute_sql() has been called.
+
+ # We also include types of fields of related models that
+ # will be included via select_related() for the benefit
+ # of MySQL/MySQLdb when boolean fields are involved
+ # (#15040).
+
+ # This code duplicates the logic for the order of fields
+ # found in get_columns(). It would be nice to clean this up.
+ if self.query.select:
+ fields = [f.field for f in self.query.select]
+ elif self.query.default_cols:
+ fields = self.query.get_meta().concrete_fields
+ else:
+ fields = []
+ fields = fields + [f.field for f in self.query.related_select_cols]
+
+ # If the field was deferred, exclude it from being passed
+ # into `resolve_columns` because it wasn't selected.
+ only_load = self.deferred_to_columns()
+ if only_load:
+ fields = [f for f in fields if f.model._meta.db_table not in only_load or
+ f.column in only_load[f.model._meta.db_table]]
+ if has_aggregate_select:
+ # pad None in to fields for aggregates
+ fields = fields[:aggregate_start] + [
+ None for x in range(0, aggregate_end - aggregate_start)
+ ] + fields[aggregate_start:]
+ row = self.resolve_columns(row, fields)
+
+ if has_aggregate_select:
+ row = tuple(row[:aggregate_start]) + tuple([
+ self.query.resolve_aggregate(value, aggregate, self.connection)
+ for (alias, aggregate), value
+ in zip(self.query.aggregate_select.items(), row[aggregate_start:aggregate_end])
+ ]) + tuple(row[aggregate_end:])
+
+ yield row
+
+ def execute_sql(self, result_type=MULTI):
+ """
+ Run the query against the database and returns the result(s). The
+ return value is a single data item if result_type is SINGLE, or an
+ iterator over the results if the result_type is MULTI.
+
+ result_type is either MULTI (use fetchmany() to retrieve all rows),
+ SINGLE (only retrieve a single row), or None. In this last case, the
+ cursor is returned if any query is executed, since it's used by
+ subclasses such as InsertQuery). It's possible, however, that no query
+ is needed, as the filters describe an empty set. In that case, None is
+ returned, to avoid any unnecessary database interaction.
+ """
+ try:
+ sql, params = self.as_sql()
+ if not sql:
+ raise EmptyResultSet
+ except EmptyResultSet:
+ if result_type == MULTI:
+ return iter([])
+ else:
+ return
+
+ cursor = self.connection.cursor()
+ cursor.execute(sql, params)
+
+ if not result_type:
+ return cursor
+ if result_type == SINGLE:
+ if self.ordering_aliases:
+ return cursor.fetchone()[:-len(self.ordering_aliases)]
+ return cursor.fetchone()
+
+ # The MULTI case.
+ if self.ordering_aliases:
+ result = order_modified_iter(cursor, len(self.ordering_aliases),
+ self.connection.features.empty_fetchmany_value)
+ else:
+ result = iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
+ self.connection.features.empty_fetchmany_value)
+ if not self.connection.features.can_use_chunked_reads:
+ # If we are using non-chunked reads, we return the same data
+ # structure as normally, but ensure it is all read into memory
+ # before going any further.
+ return list(result)
+ return result
+
+ def as_subquery_condition(self, alias, columns, qn):
+ qn2 = self.connection.ops.quote_name
+ if len(columns) == 1:
+ sql, params = self.as_sql()
+ return '%s.%s IN (%s)' % (qn(alias), qn2(columns[0]), sql), params
+
+ for index, select_col in enumerate(self.query.select):
+ lhs = '%s.%s' % (qn(select_col.col[0]), qn2(select_col.col[1]))
+ rhs = '%s.%s' % (qn(alias), qn2(columns[index]))
+ self.query.where.add(
+ QueryWrapper('%s = %s' % (lhs, rhs), []), 'AND')
+
+ sql, params = self.as_sql()
+ return 'EXISTS (%s)' % sql, params
+
+
+class SQLInsertCompiler(SQLCompiler):
+
+ def __init__(self, *args, **kwargs):
+ self.return_id = False
+ super(SQLInsertCompiler, self).__init__(*args, **kwargs)
+
+ def placeholder(self, field, val):
+ if field is None:
+ # A field value of None means the value is raw.
+ return val
+ elif hasattr(field, 'get_placeholder'):
+ # Some fields (e.g. geo fields) need special munging before
+ # they can be inserted.
+ return field.get_placeholder(val, self.connection)
+ else:
+ # Return the common case for the placeholder
+ return '%s'
+
+ def as_sql(self):
+ # We don't need quote_name_unless_alias() here, since these are all
+ # going to be column names (so we can avoid the extra overhead).
+ qn = self.connection.ops.quote_name
+ opts = self.query.get_meta()
+ result = ['INSERT INTO %s' % qn(opts.db_table)]
+
+ has_fields = bool(self.query.fields)
+ fields = self.query.fields if has_fields else [opts.pk]
+ result.append('(%s)' % ', '.join([qn(f.column) for f in fields]))
+
+ if has_fields:
+ params = values = [
+ [
+ f.get_db_prep_save(getattr(obj, f.attname) if self.query.raw else f.pre_save(obj, True), connection=self.connection)
+ for f in fields
+ ]
+ for obj in self.query.objs
+ ]
+ else:
+ values = [[self.connection.ops.pk_default_value()] for obj in self.query.objs]
+ params = [[]]
+ fields = [None]
+ can_bulk = (not any(hasattr(field, "get_placeholder") for field in fields) and
+ not self.return_id and self.connection.features.has_bulk_insert)
+
+ if can_bulk:
+ placeholders = [["%s"] * len(fields)]
+ else:
+ placeholders = [
+ [self.placeholder(field, v) for field, v in zip(fields, val)]
+ for val in values
+ ]
+ # Oracle Spatial needs to remove some values due to #10888
+ params = self.connection.ops.modify_insert_params(placeholders, params)
+ if self.return_id and self.connection.features.can_return_id_from_insert:
+ params = params[0]
+ col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column))
+ result.append("VALUES (%s)" % ", ".join(placeholders[0]))
+ r_fmt, r_params = self.connection.ops.return_insert_id()
+ # Skip empty r_fmt to allow subclasses to customize behaviour for
+ # 3rd party backends. Refs #19096.
+ if r_fmt:
+ result.append(r_fmt % col)
+ params += r_params
+ return [(" ".join(result), tuple(params))]
+ if can_bulk:
+ result.append(self.connection.ops.bulk_insert_sql(fields, len(values)))
+ return [(" ".join(result), tuple([v for val in values for v in val]))]
+ else:
+ return [
+ (" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals)
+ for p, vals in zip(placeholders, params)
+ ]
+
+ def execute_sql(self, return_id=False):
+ assert not (return_id and len(self.query.objs) != 1)
+ self.return_id = return_id
+ cursor = self.connection.cursor()
+ for sql, params in self.as_sql():
+ cursor.execute(sql, params)
+ if not (return_id and cursor):
+ return
+ if self.connection.features.can_return_id_from_insert:
+ return self.connection.ops.fetch_returned_insert_id(cursor)
+ return self.connection.ops.last_insert_id(cursor,
+ self.query.get_meta().db_table, self.query.get_meta().pk.column)
+
+
+class SQLDeleteCompiler(SQLCompiler):
+ def as_sql(self):
+ """
+ Creates the SQL for this query. Returns the SQL string and list of
+ parameters.
+ """
+ assert len(self.query.tables) == 1, \
+ "Can only delete from one table at a time."
+ qn = self.quote_name_unless_alias
+ result = ['DELETE FROM %s' % qn(self.query.tables[0])]
+ where, params = self.query.where.as_sql(qn=qn, connection=self.connection)
+ if where:
+ result.append('WHERE %s' % where)
+ return ' '.join(result), tuple(params)
+
+class SQLUpdateCompiler(SQLCompiler):
+ def as_sql(self):
+ """
+ Creates the SQL for this query. Returns the SQL string and list of
+ parameters.
+ """
+ self.pre_sql_setup()
+ if not self.query.values:
+ return '', ()
+ table = self.query.tables[0]
+ qn = self.quote_name_unless_alias
+ result = ['UPDATE %s' % qn(table)]
+ result.append('SET')
+ values, update_params = [], []
+ for field, model, val in self.query.values:
+ if hasattr(val, 'prepare_database_save'):
+ val = val.prepare_database_save(field)
+ else:
+ val = field.get_db_prep_save(val, connection=self.connection)
+
+ # Getting the placeholder for the field.
+ if hasattr(field, 'get_placeholder'):
+ placeholder = field.get_placeholder(val, self.connection)
+ else:
+ placeholder = '%s'
+
+ if hasattr(val, 'evaluate'):
+ val = SQLEvaluator(val, self.query, allow_joins=False)
+ name = field.column
+ if hasattr(val, 'as_sql'):
+ sql, params = val.as_sql(qn, self.connection)
+ values.append('%s = %s' % (qn(name), sql))
+ update_params.extend(params)
+ elif val is not None:
+ values.append('%s = %s' % (qn(name), placeholder))
+ update_params.append(val)
+ else:
+ values.append('%s = NULL' % qn(name))
+ if not values:
+ return '', ()
+ result.append(', '.join(values))
+ where, params = self.query.where.as_sql(qn=qn, connection=self.connection)
+ if where:
+ result.append('WHERE %s' % where)
+ return ' '.join(result), tuple(update_params + params)
+
+ def execute_sql(self, result_type):
+ """
+ Execute the specified update. Returns the number of rows affected by
+ the primary update query. The "primary update query" is the first
+ non-empty query that is executed. Row counts for any subsequent,
+ related queries are not available.
+ """
+ cursor = super(SQLUpdateCompiler, self).execute_sql(result_type)
+ rows = cursor.rowcount if cursor else 0
+ is_empty = cursor is None
+ del cursor
+ for query in self.query.get_related_updates():
+ aux_rows = query.get_compiler(self.using).execute_sql(result_type)
+ if is_empty:
+ rows = aux_rows
+ is_empty = False
+ return rows
+
+ def pre_sql_setup(self):
+ """
+ If the update depends on results from other tables, we need to do some
+ munging of the "where" conditions to match the format required for
+ (portable) SQL updates. That is done here.
+
+ Further, if we are going to be running multiple updates, we pull out
+ the id values to update at this point so that they don't change as a
+ result of the progressive updates.
+ """
+ self.query.select_related = False
+ self.query.clear_ordering(True)
+ super(SQLUpdateCompiler, self).pre_sql_setup()
+ count = self.query.count_active_tables()
+ if not self.query.related_updates and count == 1:
+ return
+
+ # We need to use a sub-select in the where clause to filter on things
+ # from other tables.
+ query = self.query.clone(klass=Query)
+ query.bump_prefix()
+ query.extra = {}
+ query.select = []
+ query.add_fields([query.get_meta().pk.name])
+ # Recheck the count - it is possible that fiddling with the select
+ # fields above removes tables from the query. Refs #18304.
+ count = query.count_active_tables()
+ if not self.query.related_updates and count == 1:
+ return
+
+ must_pre_select = count > 1 and not self.connection.features.update_can_self_select
+
+ # Now we adjust the current query: reset the where clause and get rid
+ # of all the tables we don't need (since they're in the sub-select).
+ self.query.where = self.query.where_class()
+ if self.query.related_updates or must_pre_select:
+ # Either we're using the idents in multiple update queries (so
+ # don't want them to change), or the db backend doesn't support
+ # selecting from the updating table (e.g. MySQL).
+ idents = []
+ for rows in query.get_compiler(self.using).execute_sql(MULTI):
+ idents.extend([r[0] for r in rows])
+ self.query.add_filter(('pk__in', idents))
+ self.query.related_ids = idents
+ else:
+ # The fast path. Filters and updates in one query.
+ self.query.add_filter(('pk__in', query))
+ for alias in self.query.tables[1:]:
+ self.query.alias_refcount[alias] = 0
+
+class SQLAggregateCompiler(SQLCompiler):
+ def as_sql(self, qn=None):
+ """
+ Creates the SQL for this query. Returns the SQL string and list of
+ parameters.
+ """
+ if qn is None:
+ qn = self.quote_name_unless_alias
+
+ sql, params = [], []
+ for aggregate in self.query.aggregate_select.values():
+ agg_sql, agg_params = aggregate.as_sql(qn, self.connection)
+ sql.append(agg_sql)
+ params.extend(agg_params)
+ sql = ', '.join(sql)
+ params = tuple(params)
+
+ sql = 'SELECT %s FROM (%s) subquery' % (sql, self.query.subquery)
+ params = params + self.query.sub_params
+ return sql, params
+
+class SQLDateCompiler(SQLCompiler):
+ def results_iter(self):
+ """
+ Returns an iterator over the results from executing this query.
+ """
+ resolve_columns = hasattr(self, 'resolve_columns')
+ if resolve_columns:
+ from django.db.models.fields import DateField
+ fields = [DateField()]
+ else:
+ from django.db.backends.util import typecast_date
+ needs_string_cast = self.connection.features.needs_datetime_string_cast
+
+ offset = len(self.query.extra_select)
+ for rows in self.execute_sql(MULTI):
+ for row in rows:
+ date = row[offset]
+ if resolve_columns:
+ date = self.resolve_columns(row, fields)[offset]
+ elif needs_string_cast:
+ date = typecast_date(str(date))
+ if isinstance(date, datetime.datetime):
+ date = date.date()
+ yield date
+
+class SQLDateTimeCompiler(SQLCompiler):
+ def results_iter(self):
+ """
+ Returns an iterator over the results from executing this query.
+ """
+ resolve_columns = hasattr(self, 'resolve_columns')
+ if resolve_columns:
+ from django.db.models.fields import DateTimeField
+ fields = [DateTimeField()]
+ else:
+ from django.db.backends.util import typecast_timestamp
+ needs_string_cast = self.connection.features.needs_datetime_string_cast
+
+ offset = len(self.query.extra_select)
+ for rows in self.execute_sql(MULTI):
+ for row in rows:
+ datetime = row[offset]
+ if resolve_columns:
+ datetime = self.resolve_columns(row, fields)[offset]
+ elif needs_string_cast:
+ datetime = typecast_timestamp(str(datetime))
+ # Datetimes are artifically returned in UTC on databases that
+ # don't support time zone. Restore the zone used in the query.
+ if settings.USE_TZ:
+ if datetime is None:
+ raise ValueError("Database returned an invalid value "
+ "in QuerySet.datetimes(). Are time zone "
+ "definitions for your database and pytz installed?")
+ datetime = datetime.replace(tzinfo=None)
+ datetime = timezone.make_aware(datetime, self.query.tzinfo)
+ yield datetime
+
+def order_modified_iter(cursor, trim, sentinel):
+ """
+ Yields blocks of rows from a cursor. We use this iterator in the special
+ case when extra output columns have been added to support ordering
+ requirements. We must trim those extra columns before anything else can use
+ the results, since they're only needed to make the SQL valid.
+ """
+ for rows in iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
+ sentinel):
+ yield [r[:-trim] for r in rows]
diff --git a/lib/python2.7/site-packages/django/db/models/sql/constants.py b/lib/python2.7/site-packages/django/db/models/sql/constants.py
new file mode 100644
index 0000000..904f7b2
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/models/sql/constants.py
@@ -0,0 +1,41 @@
+"""
+Constants specific to the SQL storage portion of the ORM.
+"""
+
+from collections import namedtuple
+import re
+
+# Valid query types (a set is used for speedy lookups). These are (currently)
+# considered SQL-specific; other storage systems may choose to use different
+# lookup types.
+QUERY_TERMS = set([
+ 'exact', 'iexact', 'contains', 'icontains', 'gt', 'gte', 'lt', 'lte', 'in',
+ 'startswith', 'istartswith', 'endswith', 'iendswith', 'range', 'year',
+ 'month', 'day', 'week_day', 'hour', 'minute', 'second', 'isnull', 'search',
+ 'regex', 'iregex',
+])
+
+# Size of each "chunk" for get_iterator calls.
+# Larger values are slightly faster at the expense of more storage space.
+GET_ITERATOR_CHUNK_SIZE = 100
+
+# Namedtuples for sql.* internal use.
+
+# Join lists (indexes into the tuples that are values in the alias_map
+# dictionary in the Query class).
+JoinInfo = namedtuple('JoinInfo',
+ 'table_name rhs_alias join_type lhs_alias '
+ 'join_cols nullable join_field')
+
+# Pairs of column clauses to select, and (possibly None) field for the clause.
+SelectInfo = namedtuple('SelectInfo', 'col field')
+
+# How many results to expect from a cursor.execute call
+MULTI = 'multi'
+SINGLE = 'single'
+
+ORDER_PATTERN = re.compile(r'\?|[-+]?[.\w]+$')
+ORDER_DIR = {
+ 'ASC': ('ASC', 'DESC'),
+ 'DESC': ('DESC', 'ASC'),
+}
diff --git a/lib/python2.7/site-packages/django/db/models/sql/datastructures.py b/lib/python2.7/site-packages/django/db/models/sql/datastructures.py
new file mode 100644
index 0000000..daaabbe
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/models/sql/datastructures.py
@@ -0,0 +1,62 @@
+"""
+Useful auxilliary data structures for query construction. Not useful outside
+the SQL domain.
+"""
+
+class EmptyResultSet(Exception):
+ pass
+
+class MultiJoin(Exception):
+ """
+ Used by join construction code to indicate the point at which a
+ multi-valued join was attempted (if the caller wants to treat that
+ exceptionally).
+ """
+ def __init__(self, names_pos, path_with_names):
+ self.level = names_pos
+ # The path travelled, this includes the path to the multijoin.
+ self.names_with_path = path_with_names
+
+class Empty(object):
+ pass
+
+class RawValue(object):
+ def __init__(self, value):
+ self.value = value
+
+class Date(object):
+ """
+ Add a date selection column.
+ """
+ def __init__(self, col, lookup_type):
+ self.col = col
+ self.lookup_type = lookup_type
+
+ def relabeled_clone(self, change_map):
+ return self.__class__((change_map.get(self.col[0], self.col[0]), self.col[1]))
+
+ def as_sql(self, qn, connection):
+ if isinstance(self.col, (list, tuple)):
+ col = '%s.%s' % tuple([qn(c) for c in self.col])
+ else:
+ col = self.col
+ return connection.ops.date_trunc_sql(self.lookup_type, col), []
+
+class DateTime(object):
+ """
+ Add a datetime selection column.
+ """
+ def __init__(self, col, lookup_type, tzname):
+ self.col = col
+ self.lookup_type = lookup_type
+ self.tzname = tzname
+
+ def relabeled_clone(self, change_map):
+ return self.__class__((change_map.get(self.col[0], self.col[0]), self.col[1]))
+
+ def as_sql(self, qn, connection):
+ if isinstance(self.col, (list, tuple)):
+ col = '%s.%s' % tuple([qn(c) for c in self.col])
+ else:
+ col = self.col
+ return connection.ops.datetime_trunc_sql(self.lookup_type, col, self.tzname)
diff --git a/lib/python2.7/site-packages/django/db/models/sql/expressions.py b/lib/python2.7/site-packages/django/db/models/sql/expressions.py
new file mode 100644
index 0000000..31e0899
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/models/sql/expressions.py
@@ -0,0 +1,117 @@
+from django.core.exceptions import FieldError
+from django.db.models.constants import LOOKUP_SEP
+from django.db.models.fields import FieldDoesNotExist
+import copy
+
+class SQLEvaluator(object):
+ def __init__(self, expression, query, allow_joins=True, reuse=None):
+ self.expression = expression
+ self.opts = query.get_meta()
+ self.reuse = reuse
+ self.cols = []
+ self.expression.prepare(self, query, allow_joins)
+
+ def relabeled_clone(self, change_map):
+ clone = copy.copy(self)
+ clone.cols = []
+ for node, col in self.cols:
+ if hasattr(col, 'relabeled_clone'):
+ clone.cols.append((node, col.relabeled_clone(change_map)))
+ else:
+ clone.cols.append((node,
+ (change_map.get(col[0], col[0]), col[1])))
+ return clone
+
+ def get_cols(self):
+ cols = []
+ for node, col in self.cols:
+ if hasattr(node, 'get_cols'):
+ cols.extend(node.get_cols())
+ elif isinstance(col, tuple):
+ cols.append(col)
+ return cols
+
+ def prepare(self):
+ return self
+
+ def as_sql(self, qn, connection):
+ return self.expression.evaluate(self, qn, connection)
+
+ #####################################################
+ # Vistor methods for initial expression preparation #
+ #####################################################
+
+ def prepare_node(self, node, query, allow_joins):
+ for child in node.children:
+ if hasattr(child, 'prepare'):
+ child.prepare(self, query, allow_joins)
+
+ def prepare_leaf(self, node, query, allow_joins):
+ if not allow_joins and LOOKUP_SEP in node.name:
+ raise FieldError("Joined field references are not permitted in this query")
+
+ field_list = node.name.split(LOOKUP_SEP)
+ if node.name in query.aggregates:
+ self.cols.append((node, query.aggregate_select[node.name]))
+ else:
+ try:
+ field, sources, opts, join_list, path = query.setup_joins(
+ field_list, query.get_meta(),
+ query.get_initial_alias(), self.reuse)
+ targets, _, join_list = query.trim_joins(sources, join_list, path)
+ if self.reuse is not None:
+ self.reuse.update(join_list)
+ for t in targets:
+ self.cols.append((node, (join_list[-1], t.column)))
+ except FieldDoesNotExist:
+ raise FieldError("Cannot resolve keyword %r into field. "
+ "Choices are: %s" % (self.name,
+ [f.name for f in self.opts.fields]))
+
+ ##################################################
+ # Vistor methods for final expression evaluation #
+ ##################################################
+
+ def evaluate_node(self, node, qn, connection):
+ expressions = []
+ expression_params = []
+ for child in node.children:
+ if hasattr(child, 'evaluate'):
+ sql, params = child.evaluate(self, qn, connection)
+ else:
+ sql, params = '%s', (child,)
+
+ if len(getattr(child, 'children', [])) > 1:
+ format = '(%s)'
+ else:
+ format = '%s'
+
+ if sql:
+ expressions.append(format % sql)
+ expression_params.extend(params)
+
+ return connection.ops.combine_expression(node.connector, expressions), expression_params
+
+ def evaluate_leaf(self, node, qn, connection):
+ col = None
+ for n, c in self.cols:
+ if n is node:
+ col = c
+ break
+ if col is None:
+ raise ValueError("Given node not found")
+ if hasattr(col, 'as_sql'):
+ return col.as_sql(qn, connection)
+ else:
+ return '%s.%s' % (qn(col[0]), qn(col[1])), []
+
+ def evaluate_date_modifier_node(self, node, qn, connection):
+ timedelta = node.children.pop()
+ sql, params = self.evaluate_node(node, qn, connection)
+ node.children.append(timedelta)
+
+ if timedelta.days == 0 and timedelta.seconds == 0 and \
+ timedelta.microseconds == 0:
+ return sql, params
+
+ return connection.ops.date_interval_sql(sql, node.connector, timedelta), params
diff --git a/lib/python2.7/site-packages/django/db/models/sql/query.py b/lib/python2.7/site-packages/django/db/models/sql/query.py
new file mode 100644
index 0000000..7868c19
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/models/sql/query.py
@@ -0,0 +1,1922 @@
+"""
+Create SQL statements for QuerySets.
+
+The code in here encapsulates all of the SQL construction so that QuerySets
+themselves do not have to (and could be backed by things other than SQL
+databases). The abstraction barrier only works one way: this module has to know
+all about the internals of models in order to get the information it needs.
+"""
+
+import copy
+
+from django.utils.datastructures import SortedDict
+from django.utils.encoding import force_text
+from django.utils.tree import Node
+from django.utils import six
+from django.db import connections, DEFAULT_DB_ALIAS
+from django.db.models.constants import LOOKUP_SEP
+from django.db.models.aggregates import refs_aggregate
+from django.db.models.expressions import ExpressionNode
+from django.db.models.fields import FieldDoesNotExist
+from django.db.models.related import PathInfo
+from django.db.models.sql import aggregates as base_aggregates_module
+from django.db.models.sql.constants import (QUERY_TERMS, ORDER_DIR, SINGLE,
+ ORDER_PATTERN, JoinInfo, SelectInfo)
+from django.db.models.sql.datastructures import EmptyResultSet, Empty, MultiJoin
+from django.db.models.sql.expressions import SQLEvaluator
+from django.db.models.sql.where import (WhereNode, Constraint, EverythingNode,
+ ExtraWhere, AND, OR, EmptyWhere)
+from django.core.exceptions import FieldError
+
+__all__ = ['Query', 'RawQuery']
+
+
+class RawQuery(object):
+ """
+ A single raw SQL query
+ """
+
+ def __init__(self, sql, using, params=None):
+ self.params = params or ()
+ self.sql = sql
+ self.using = using
+ self.cursor = None
+
+ # Mirror some properties of a normal query so that
+ # the compiler can be used to process results.
+ self.low_mark, self.high_mark = 0, None # Used for offset/limit
+ self.extra_select = {}
+ self.aggregate_select = {}
+
+ def clone(self, using):
+ return RawQuery(self.sql, using, params=self.params)
+
+ def convert_values(self, value, field, connection):
+ """Convert the database-returned value into a type that is consistent
+ across database backends.
+
+ By default, this defers to the underlying backend operations, but
+ it can be overridden by Query classes for specific backends.
+ """
+ return connection.ops.convert_values(value, field)
+
+ def get_columns(self):
+ if self.cursor is None:
+ self._execute_query()
+ converter = connections[self.using].introspection.table_name_converter
+ return [converter(column_meta[0])
+ for column_meta in self.cursor.description]
+
+ def __iter__(self):
+ # Always execute a new query for a new iterator.
+ # This could be optimized with a cache at the expense of RAM.
+ self._execute_query()
+ if not connections[self.using].features.can_use_chunked_reads:
+ # If the database can't use chunked reads we need to make sure we
+ # evaluate the entire query up front.
+ result = list(self.cursor)
+ else:
+ result = self.cursor
+ return iter(result)
+
+ def __repr__(self):
+ return "<RawQuery: %r>" % (self.sql % tuple(self.params))
+
+ def _execute_query(self):
+ self.cursor = connections[self.using].cursor()
+ self.cursor.execute(self.sql, self.params)
+
+
+class Query(object):
+ """
+ A single SQL query.
+ """
+ # SQL join types. These are part of the class because their string forms
+ # vary from database to database and can be customised by a subclass.
+ INNER = 'INNER JOIN'
+ LOUTER = 'LEFT OUTER JOIN'
+
+ alias_prefix = 'T'
+ query_terms = QUERY_TERMS
+ aggregates_module = base_aggregates_module
+
+ compiler = 'SQLCompiler'
+
+ def __init__(self, model, where=WhereNode):
+ self.model = model
+ self.alias_refcount = {}
+ # alias_map is the most important data structure regarding joins.
+ # It's used for recording which joins exist in the query and what
+ # type they are. The key is the alias of the joined table (possibly
+ # the table name) and the value is JoinInfo from constants.py.
+ self.alias_map = {}
+ self.table_map = {} # Maps table names to list of aliases.
+ self.join_map = {}
+ self.default_cols = True
+ self.default_ordering = True
+ self.standard_ordering = True
+ self.used_aliases = set()
+ self.filter_is_sticky = False
+ self.included_inherited_models = {}
+
+ # SQL-related attributes
+ # Select and related select clauses as SelectInfo instances.
+ # The select is used for cases where we want to set up the select
+ # clause to contain other than default fields (values(), annotate(),
+ # subqueries...)
+ self.select = []
+ # The related_select_cols is used for columns needed for
+ # select_related - this is populated in compile stage.
+ self.related_select_cols = []
+ self.tables = [] # Aliases in the order they are created.
+ self.where = where()
+ self.where_class = where
+ self.group_by = None
+ self.having = where()
+ self.order_by = []
+ self.low_mark, self.high_mark = 0, None # Used for offset/limit
+ self.distinct = False
+ self.distinct_fields = []
+ self.select_for_update = False
+ self.select_for_update_nowait = False
+ self.select_related = False
+
+ # SQL aggregate-related attributes
+ self.aggregates = SortedDict() # Maps alias -> SQL aggregate function
+ self.aggregate_select_mask = None
+ self._aggregate_select_cache = None
+
+ # Arbitrary maximum limit for select_related. Prevents infinite
+ # recursion. Can be changed by the depth parameter to select_related().
+ self.max_depth = 5
+
+ # These are for extensions. The contents are more or less appended
+ # verbatim to the appropriate clause.
+ self.extra = SortedDict() # Maps col_alias -> (col_sql, params).
+ self.extra_select_mask = None
+ self._extra_select_cache = None
+
+ self.extra_tables = ()
+ self.extra_order_by = ()
+
+ # A tuple that is a set of model field names and either True, if these
+ # are the fields to defer, or False if these are the only fields to
+ # load.
+ self.deferred_loading = (set(), True)
+
+ def __str__(self):
+ """
+ Returns the query as a string of SQL with the parameter values
+ substituted in (use sql_with_params() to see the unsubstituted string).
+
+ Parameter values won't necessarily be quoted correctly, since that is
+ done by the database interface at execution time.
+ """
+ sql, params = self.sql_with_params()
+ return sql % params
+
+ def sql_with_params(self):
+ """
+ Returns the query as an SQL string and the parameters that will be
+ subsituted into the query.
+ """
+ return self.get_compiler(DEFAULT_DB_ALIAS).as_sql()
+
+ def __deepcopy__(self, memo):
+ result = self.clone(memo=memo)
+ memo[id(self)] = result
+ return result
+
+ def prepare(self):
+ return self
+
+ def get_compiler(self, using=None, connection=None):
+ if using is None and connection is None:
+ raise ValueError("Need either using or connection")
+ if using:
+ connection = connections[using]
+
+ # Check that the compiler will be able to execute the query
+ for alias, aggregate in self.aggregate_select.items():
+ connection.ops.check_aggregate_support(aggregate)
+
+ return connection.ops.compiler(self.compiler)(self, connection, using)
+
+ def get_meta(self):
+ """
+ Returns the Options instance (the model._meta) from which to start
+ processing. Normally, this is self.model._meta, but it can be changed
+ by subclasses.
+ """
+ return self.model._meta
+
+ def clone(self, klass=None, memo=None, **kwargs):
+ """
+ Creates a copy of the current instance. The 'kwargs' parameter can be
+ used by clients to update attributes after copying has taken place.
+ """
+ obj = Empty()
+ obj.__class__ = klass or self.__class__
+ obj.model = self.model
+ obj.alias_refcount = self.alias_refcount.copy()
+ obj.alias_map = self.alias_map.copy()
+ obj.table_map = self.table_map.copy()
+ obj.join_map = self.join_map.copy()
+ obj.default_cols = self.default_cols
+ obj.default_ordering = self.default_ordering
+ obj.standard_ordering = self.standard_ordering
+ obj.included_inherited_models = self.included_inherited_models.copy()
+ obj.select = self.select[:]
+ obj.related_select_cols = []
+ obj.tables = self.tables[:]
+ obj.where = self.where.clone()
+ obj.where_class = self.where_class
+ if self.group_by is None:
+ obj.group_by = None
+ else:
+ obj.group_by = self.group_by[:]
+ obj.having = self.having.clone()
+ obj.order_by = self.order_by[:]
+ obj.low_mark, obj.high_mark = self.low_mark, self.high_mark
+ obj.distinct = self.distinct
+ obj.distinct_fields = self.distinct_fields[:]
+ obj.select_for_update = self.select_for_update
+ obj.select_for_update_nowait = self.select_for_update_nowait
+ obj.select_related = self.select_related
+ obj.related_select_cols = []
+ obj.aggregates = self.aggregates.copy()
+ if self.aggregate_select_mask is None:
+ obj.aggregate_select_mask = None
+ else:
+ obj.aggregate_select_mask = self.aggregate_select_mask.copy()
+ # _aggregate_select_cache cannot be copied, as doing so breaks the
+ # (necessary) state in which both aggregates and
+ # _aggregate_select_cache point to the same underlying objects.
+ # It will get re-populated in the cloned queryset the next time it's
+ # used.
+ obj._aggregate_select_cache = None
+ obj.max_depth = self.max_depth
+ obj.extra = self.extra.copy()
+ if self.extra_select_mask is None:
+ obj.extra_select_mask = None
+ else:
+ obj.extra_select_mask = self.extra_select_mask.copy()
+ if self._extra_select_cache is None:
+ obj._extra_select_cache = None
+ else:
+ obj._extra_select_cache = self._extra_select_cache.copy()
+ obj.extra_tables = self.extra_tables
+ obj.extra_order_by = self.extra_order_by
+ obj.deferred_loading = copy.copy(self.deferred_loading[0]), self.deferred_loading[1]
+ if self.filter_is_sticky and self.used_aliases:
+ obj.used_aliases = self.used_aliases.copy()
+ else:
+ obj.used_aliases = set()
+ obj.filter_is_sticky = False
+
+ obj.__dict__.update(kwargs)
+ if hasattr(obj, '_setup_query'):
+ obj._setup_query()
+ return obj
+
+ def convert_values(self, value, field, connection):
+ """Convert the database-returned value into a type that is consistent
+ across database backends.
+
+ By default, this defers to the underlying backend operations, but
+ it can be overridden by Query classes for specific backends.
+ """
+ return connection.ops.convert_values(value, field)
+
+ def resolve_aggregate(self, value, aggregate, connection):
+ """Resolve the value of aggregates returned by the database to
+ consistent (and reasonable) types.
+
+ This is required because of the predisposition of certain backends
+ to return Decimal and long types when they are not needed.
+ """
+ if value is None:
+ if aggregate.is_ordinal:
+ return 0
+ # Return None as-is
+ return value
+ elif aggregate.is_ordinal:
+ # Any ordinal aggregate (e.g., count) returns an int
+ return int(value)
+ elif aggregate.is_computed:
+ # Any computed aggregate (e.g., avg) returns a float
+ return float(value)
+ else:
+ # Return value depends on the type of the field being processed.
+ return self.convert_values(value, aggregate.field, connection)
+
+ def get_aggregation(self, using):
+ """
+ Returns the dictionary with the values of the existing aggregations.
+ """
+ if not self.aggregate_select:
+ return {}
+
+ # If there is a group by clause, aggregating does not add useful
+ # information but retrieves only the first row. Aggregate
+ # over the subquery instead.
+ if self.group_by is not None:
+ from django.db.models.sql.subqueries import AggregateQuery
+ query = AggregateQuery(self.model)
+
+ obj = self.clone()
+
+ # Remove any aggregates marked for reduction from the subquery
+ # and move them to the outer AggregateQuery.
+ for alias, aggregate in self.aggregate_select.items():
+ if aggregate.is_summary:
+ query.aggregate_select[alias] = aggregate
+ del obj.aggregate_select[alias]
+
+ try:
+ query.add_subquery(obj, using)
+ except EmptyResultSet:
+ return dict(
+ (alias, None)
+ for alias in query.aggregate_select
+ )
+ else:
+ query = self
+ self.select = []
+ self.default_cols = False
+ self.extra = {}
+ self.remove_inherited_models()
+
+ query.clear_ordering(True)
+ query.clear_limits()
+ query.select_for_update = False
+ query.select_related = False
+ query.related_select_cols = []
+
+ result = query.get_compiler(using).execute_sql(SINGLE)
+ if result is None:
+ result = [None for q in query.aggregate_select.items()]
+
+ return dict([
+ (alias, self.resolve_aggregate(val, aggregate, connection=connections[using]))
+ for (alias, aggregate), val
+ in zip(query.aggregate_select.items(), result)
+ ])
+
+ def get_count(self, using):
+ """
+ Performs a COUNT() query using the current filter constraints.
+ """
+ obj = self.clone()
+ if len(self.select) > 1 or self.aggregate_select or (self.distinct and self.distinct_fields):
+ # If a select clause exists, then the query has already started to
+ # specify the columns that are to be returned.
+ # In this case, we need to use a subquery to evaluate the count.
+ from django.db.models.sql.subqueries import AggregateQuery
+ subquery = obj
+ subquery.clear_ordering(True)
+ subquery.clear_limits()
+
+ obj = AggregateQuery(obj.model)
+ try:
+ obj.add_subquery(subquery, using=using)
+ except EmptyResultSet:
+ # add_subquery evaluates the query, if it's an EmptyResultSet
+ # then there are can be no results, and therefore there the
+ # count is obviously 0
+ return 0
+
+ obj.add_count_column()
+ number = obj.get_aggregation(using=using)[None]
+
+ # Apply offset and limit constraints manually, since using LIMIT/OFFSET
+ # in SQL (in variants that provide them) doesn't change the COUNT
+ # output.
+ number = max(0, number - self.low_mark)
+ if self.high_mark is not None:
+ number = min(number, self.high_mark - self.low_mark)
+
+ return number
+
+ def has_results(self, using):
+ q = self.clone()
+ q.clear_select_clause()
+ q.add_extra({'a': 1}, None, None, None, None, None)
+ q.set_extra_mask(['a'])
+ q.clear_ordering(True)
+ q.set_limits(high=1)
+ compiler = q.get_compiler(using=using)
+ return bool(compiler.execute_sql(SINGLE))
+
+ def combine(self, rhs, connector):
+ """
+ Merge the 'rhs' query into the current one (with any 'rhs' effects
+ being applied *after* (that is, "to the right of") anything in the
+ current query. 'rhs' is not modified during a call to this function.
+
+ The 'connector' parameter describes how to connect filters from the
+ 'rhs' query.
+ """
+ assert self.model == rhs.model, \
+ "Cannot combine queries on two different base models."
+ assert self.can_filter(), \
+ "Cannot combine queries once a slice has been taken."
+ assert self.distinct == rhs.distinct, \
+ "Cannot combine a unique query with a non-unique query."
+ assert self.distinct_fields == rhs.distinct_fields, \
+ "Cannot combine queries with different distinct fields."
+
+ self.remove_inherited_models()
+ # Work out how to relabel the rhs aliases, if necessary.
+ change_map = {}
+ conjunction = (connector == AND)
+
+ # Determine which existing joins can be reused. When combining the
+ # query with AND we must recreate all joins for m2m filters. When
+ # combining with OR we can reuse joins. The reason is that in AND
+ # case a single row can't fulfill a condition like:
+ # revrel__col=1 & revrel__col=2
+ # But, there might be two different related rows matching this
+ # condition. In OR case a single True is enough, so single row is
+ # enough, too.
+ #
+ # Note that we will be creating duplicate joins for non-m2m joins in
+ # the AND case. The results will be correct but this creates too many
+ # joins. This is something that could be fixed later on.
+ reuse = set() if conjunction else set(self.tables)
+ # Base table must be present in the query - this is the same
+ # table on both sides.
+ self.get_initial_alias()
+ # Now, add the joins from rhs query into the new query (skipping base
+ # table).
+ for alias in rhs.tables[1:]:
+ table, _, join_type, lhs, join_cols, nullable, join_field = rhs.alias_map[alias]
+ promote = (join_type == self.LOUTER)
+ # If the left side of the join was already relabeled, use the
+ # updated alias.
+ lhs = change_map.get(lhs, lhs)
+ new_alias = self.join(
+ (lhs, table, join_cols), reuse=reuse,
+ outer_if_first=not conjunction, nullable=nullable,
+ join_field=join_field)
+ if promote:
+ self.promote_joins([new_alias])
+ # We can't reuse the same join again in the query. If we have two
+ # distinct joins for the same connection in rhs query, then the
+ # combined query must have two joins, too.
+ reuse.discard(new_alias)
+ change_map[alias] = new_alias
+ if not rhs.alias_refcount[alias]:
+ # The alias was unused in the rhs query. Unref it so that it
+ # will be unused in the new query, too. We have to add and
+ # unref the alias so that join promotion has information of
+ # the join type for the unused alias.
+ self.unref_alias(new_alias)
+
+ # So that we don't exclude valid results in an OR query combination,
+ # all joins exclusive to either the lhs or the rhs must be converted
+ # to an outer join. RHS joins were already set to outer joins above,
+ # so check which joins were used only in the lhs query.
+ if not conjunction:
+ rhs_used_joins = set(change_map.values())
+ to_promote = [alias for alias in self.tables
+ if alias not in rhs_used_joins]
+ self.promote_joins(to_promote, True)
+
+ # Now relabel a copy of the rhs where-clause and add it to the current
+ # one.
+ if rhs.where:
+ w = rhs.where.clone()
+ w.relabel_aliases(change_map)
+ if not self.where:
+ # Since 'self' matches everything, add an explicit "include
+ # everything" where-constraint so that connections between the
+ # where clauses won't exclude valid results.
+ self.where.add(EverythingNode(), AND)
+ elif self.where:
+ # rhs has an empty where clause.
+ w = self.where_class()
+ w.add(EverythingNode(), AND)
+ else:
+ w = self.where_class()
+ self.where.add(w, connector)
+
+ # Selection columns and extra extensions are those provided by 'rhs'.
+ self.select = []
+ for col, field in rhs.select:
+ if isinstance(col, (list, tuple)):
+ new_col = change_map.get(col[0], col[0]), col[1]
+ self.select.append(SelectInfo(new_col, field))
+ else:
+ new_col = col.relabeled_clone(change_map)
+ self.select.append(SelectInfo(new_col, field))
+
+ if connector == OR:
+ # It would be nice to be able to handle this, but the queries don't
+ # really make sense (or return consistent value sets). Not worth
+ # the extra complexity when you can write a real query instead.
+ if self.extra and rhs.extra:
+ raise ValueError("When merging querysets using 'or', you "
+ "cannot have extra(select=...) on both sides.")
+ self.extra.update(rhs.extra)
+ extra_select_mask = set()
+ if self.extra_select_mask is not None:
+ extra_select_mask.update(self.extra_select_mask)
+ if rhs.extra_select_mask is not None:
+ extra_select_mask.update(rhs.extra_select_mask)
+ if extra_select_mask:
+ self.set_extra_mask(extra_select_mask)
+ self.extra_tables += rhs.extra_tables
+
+ # Ordering uses the 'rhs' ordering, unless it has none, in which case
+ # the current ordering is used.
+ self.order_by = rhs.order_by[:] if rhs.order_by else self.order_by
+ self.extra_order_by = rhs.extra_order_by or self.extra_order_by
+
+ def deferred_to_data(self, target, callback):
+ """
+ Converts the self.deferred_loading data structure to an alternate data
+ structure, describing the field that *will* be loaded. This is used to
+ compute the columns to select from the database and also by the
+ QuerySet class to work out which fields are being initialised on each
+ model. Models that have all their fields included aren't mentioned in
+ the result, only those that have field restrictions in place.
+
+ The "target" parameter is the instance that is populated (in place).
+ The "callback" is a function that is called whenever a (model, field)
+ pair need to be added to "target". It accepts three parameters:
+ "target", and the model and list of fields being added for that model.
+ """
+ field_names, defer = self.deferred_loading
+ if not field_names:
+ return
+ orig_opts = self.get_meta()
+ seen = {}
+ must_include = {orig_opts.concrete_model: set([orig_opts.pk])}
+ for field_name in field_names:
+ parts = field_name.split(LOOKUP_SEP)
+ cur_model = self.model
+ opts = orig_opts
+ for name in parts[:-1]:
+ old_model = cur_model
+ source = opts.get_field_by_name(name)[0]
+ if is_reverse_o2o(source):
+ cur_model = source.model
+ else:
+ cur_model = source.rel.to
+ opts = cur_model._meta
+ # Even if we're "just passing through" this model, we must add
+ # both the current model's pk and the related reference field
+ # (if it's not a reverse relation) to the things we select.
+ if not is_reverse_o2o(source):
+ must_include[old_model].add(source)
+ add_to_dict(must_include, cur_model, opts.pk)
+ field, model, _, _ = opts.get_field_by_name(parts[-1])
+ if model is None:
+ model = cur_model
+ if not is_reverse_o2o(field):
+ add_to_dict(seen, model, field)
+
+ if defer:
+ # We need to load all fields for each model, except those that
+ # appear in "seen" (for all models that appear in "seen"). The only
+ # slight complexity here is handling fields that exist on parent
+ # models.
+ workset = {}
+ for model, values in six.iteritems(seen):
+ for field, m in model._meta.get_fields_with_model():
+ if field in values:
+ continue
+ add_to_dict(workset, m or model, field)
+ for model, values in six.iteritems(must_include):
+ # If we haven't included a model in workset, we don't add the
+ # corresponding must_include fields for that model, since an
+ # empty set means "include all fields". That's why there's no
+ # "else" branch here.
+ if model in workset:
+ workset[model].update(values)
+ for model, values in six.iteritems(workset):
+ callback(target, model, values)
+ else:
+ for model, values in six.iteritems(must_include):
+ if model in seen:
+ seen[model].update(values)
+ else:
+ # As we've passed through this model, but not explicitly
+ # included any fields, we have to make sure it's mentioned
+ # so that only the "must include" fields are pulled in.
+ seen[model] = values
+ # Now ensure that every model in the inheritance chain is mentioned
+ # in the parent list. Again, it must be mentioned to ensure that
+ # only "must include" fields are pulled in.
+ for model in orig_opts.get_parent_list():
+ if model not in seen:
+ seen[model] = set()
+ for model, values in six.iteritems(seen):
+ callback(target, model, values)
+
+
+ def deferred_to_columns_cb(self, target, model, fields):
+ """
+ Callback used by deferred_to_columns(). The "target" parameter should
+ be a set instance.
+ """
+ table = model._meta.db_table
+ if table not in target:
+ target[table] = set()
+ for field in fields:
+ target[table].add(field.column)
+
+
+ def table_alias(self, table_name, create=False):
+ """
+ Returns a table alias for the given table_name and whether this is a
+ new alias or not.
+
+ If 'create' is true, a new alias is always created. Otherwise, the
+ most recently created alias for the table (if one exists) is reused.
+ """
+ current = self.table_map.get(table_name)
+ if not create and current:
+ alias = current[0]
+ self.alias_refcount[alias] += 1
+ return alias, False
+
+ # Create a new alias for this table.
+ if current:
+ alias = '%s%d' % (self.alias_prefix, len(self.alias_map) + 1)
+ current.append(alias)
+ else:
+ # The first occurence of a table uses the table name directly.
+ alias = table_name
+ self.table_map[alias] = [alias]
+ self.alias_refcount[alias] = 1
+ self.tables.append(alias)
+ return alias, True
+
+ def ref_alias(self, alias):
+ """ Increases the reference count for this alias. """
+ self.alias_refcount[alias] += 1
+
+ def unref_alias(self, alias, amount=1):
+ """ Decreases the reference count for this alias. """
+ self.alias_refcount[alias] -= amount
+
+ def promote_joins(self, aliases, unconditional=False):
+ """
+ Promotes recursively the join type of given aliases and its children to
+ an outer join. If 'unconditional' is False, the join is only promoted if
+ it is nullable or the parent join is an outer join.
+
+ Note about join promotion: When promoting any alias, we make sure all
+ joins which start from that alias are promoted, too. When adding a join
+ in join(), we make sure any join added to already existing LOUTER join
+ is generated as LOUTER. This ensures we don't ever have broken join
+ chains which contain first a LOUTER join, then an INNER JOIN, that is
+ this kind of join should never be generated: a LOUTER b INNER c. The
+ reason for avoiding this type of join chain is that the INNER after
+ the LOUTER will effectively remove any effect the LOUTER had.
+ """
+ aliases = list(aliases)
+ while aliases:
+ alias = aliases.pop(0)
+ if self.alias_map[alias].join_cols[0][1] is None:
+ # This is the base table (first FROM entry) - this table
+ # isn't really joined at all in the query, so we should not
+ # alter its join type.
+ continue
+ parent_alias = self.alias_map[alias].lhs_alias
+ parent_louter = (parent_alias
+ and self.alias_map[parent_alias].join_type == self.LOUTER)
+ already_louter = self.alias_map[alias].join_type == self.LOUTER
+ if ((unconditional or self.alias_map[alias].nullable
+ or parent_louter) and not already_louter):
+ data = self.alias_map[alias]._replace(join_type=self.LOUTER)
+ self.alias_map[alias] = data
+ # Join type of 'alias' changed, so re-examine all aliases that
+ # refer to this one.
+ aliases.extend(
+ join for join in self.alias_map.keys()
+ if (self.alias_map[join].lhs_alias == alias
+ and join not in aliases))
+
+ def reset_refcounts(self, to_counts):
+ """
+ This method will reset reference counts for aliases so that they match
+ the value passed in :param to_counts:.
+ """
+ for alias, cur_refcount in self.alias_refcount.copy().items():
+ unref_amount = cur_refcount - to_counts.get(alias, 0)
+ self.unref_alias(alias, unref_amount)
+
+ def promote_disjunction(self, aliases_before, alias_usage_counts,
+ num_childs):
+ """
+ This method is to be used for promoting joins in ORed filters.
+
+ The principle for promotion is: any alias which is used (it is in
+ alias_usage_counts), is not used by every child of the ORed filter,
+ and isn't pre-existing needs to be promoted to LOUTER join.
+ """
+ for alias, use_count in alias_usage_counts.items():
+ if use_count < num_childs and alias not in aliases_before:
+ self.promote_joins([alias])
+
+ def change_aliases(self, change_map):
+ """
+ Changes the aliases in change_map (which maps old-alias -> new-alias),
+ relabelling any references to them in select columns and the where
+ clause.
+ """
+ assert set(change_map.keys()).intersection(set(change_map.values())) == set()
+
+ def relabel_column(col):
+ if isinstance(col, (list, tuple)):
+ old_alias = col[0]
+ return (change_map.get(old_alias, old_alias), col[1])
+ else:
+ return col.relabeled_clone(change_map)
+ # 1. Update references in "select" (normal columns plus aliases),
+ # "group by", "where" and "having".
+ self.where.relabel_aliases(change_map)
+ self.having.relabel_aliases(change_map)
+ if self.group_by:
+ self.group_by = [relabel_column(col) for col in self.group_by]
+ self.select = [SelectInfo(relabel_column(s.col), s.field)
+ for s in self.select]
+ self.aggregates = SortedDict(
+ (key, relabel_column(col)) for key, col in self.aggregates.items())
+
+ # 2. Rename the alias in the internal table/alias datastructures.
+ for ident, aliases in self.join_map.items():
+ del self.join_map[ident]
+ aliases = tuple([change_map.get(a, a) for a in aliases])
+ ident = (change_map.get(ident[0], ident[0]),) + ident[1:]
+ self.join_map[ident] = aliases
+ for old_alias, new_alias in six.iteritems(change_map):
+ alias_data = self.alias_map[old_alias]
+ alias_data = alias_data._replace(rhs_alias=new_alias)
+ self.alias_refcount[new_alias] = self.alias_refcount[old_alias]
+ del self.alias_refcount[old_alias]
+ self.alias_map[new_alias] = alias_data
+ del self.alias_map[old_alias]
+
+ table_aliases = self.table_map[alias_data.table_name]
+ for pos, alias in enumerate(table_aliases):
+ if alias == old_alias:
+ table_aliases[pos] = new_alias
+ break
+ for pos, alias in enumerate(self.tables):
+ if alias == old_alias:
+ self.tables[pos] = new_alias
+ break
+ for key, alias in self.included_inherited_models.items():
+ if alias in change_map:
+ self.included_inherited_models[key] = change_map[alias]
+
+ # 3. Update any joins that refer to the old alias.
+ for alias, data in six.iteritems(self.alias_map):
+ lhs = data.lhs_alias
+ if lhs in change_map:
+ data = data._replace(lhs_alias=change_map[lhs])
+ self.alias_map[alias] = data
+
+ def bump_prefix(self, exceptions=()):
+ """
+ Changes the alias prefix to the next letter in the alphabet and
+ relabels all the aliases. Even tables that previously had no alias will
+ get an alias after this call (it's mostly used for nested queries and
+ the outer query will already be using the non-aliased table name).
+
+ Subclasses who create their own prefix should override this method to
+ produce a similar result (a new prefix and relabelled aliases).
+
+ The 'exceptions' parameter is a container that holds alias names which
+ should not be changed.
+ """
+ current = ord(self.alias_prefix)
+ assert current < ord('Z')
+ prefix = chr(current + 1)
+ self.alias_prefix = prefix
+ change_map = SortedDict()
+ for pos, alias in enumerate(self.tables):
+ if alias in exceptions:
+ continue
+ new_alias = '%s%d' % (prefix, pos)
+ change_map[alias] = new_alias
+ self.tables[pos] = new_alias
+ self.change_aliases(change_map)
+
+ def get_initial_alias(self):
+ """
+ Returns the first alias for this query, after increasing its reference
+ count.
+ """
+ if self.tables:
+ alias = self.tables[0]
+ self.ref_alias(alias)
+ else:
+ alias = self.join((None, self.get_meta().db_table, None))
+ return alias
+
+ def count_active_tables(self):
+ """
+ Returns the number of tables in this query with a non-zero reference
+ count. Note that after execution, the reference counts are zeroed, so
+ tables added in compiler will not be seen by this method.
+ """
+ return len([1 for count in self.alias_refcount.values() if count])
+
+ def join(self, connection, reuse=None, outer_if_first=False,
+ nullable=False, join_field=None):
+ """
+ Returns an alias for the join in 'connection', either reusing an
+ existing alias for that join or creating a new one. 'connection' is a
+ tuple (lhs, table, join_cols) where 'lhs' is either an existing
+ table alias or a table name. 'join_cols' is a tuple of tuples containing
+ columns to join on ((l_id1, r_id1), (l_id2, r_id2)). The join corresponds
+ to the SQL equivalent of::
+
+ lhs.l_id1 = table.r_id1 AND lhs.l_id2 = table.r_id2
+
+ The 'reuse' parameter can be either None which means all joins
+ (matching the connection) are reusable, or it can be a set containing
+ the aliases that can be reused.
+
+ If 'outer_if_first' is True and a new join is created, it will have the
+ LOUTER join type.
+
+ A join is always created as LOUTER if the lhs alias is LOUTER to make
+ sure we do not generate chains like t1 LOUTER t2 INNER t3.
+
+ If 'nullable' is True, the join can potentially involve NULL values and
+ is a candidate for promotion (to "left outer") when combining querysets.
+
+ The 'join_field' is the field we are joining along (if any).
+ """
+ lhs, table, join_cols = connection
+ assert lhs is None or join_field is not None
+ existing = self.join_map.get(connection, ())
+ if reuse is None:
+ reuse = existing
+ else:
+ reuse = [a for a in existing if a in reuse]
+ for alias in reuse:
+ if join_field and self.alias_map[alias].join_field != join_field:
+ # The join_map doesn't contain join_field (mainly because
+ # fields in Query structs are problematic in pickling), so
+ # check that the existing join is created using the same
+ # join_field used for the under work join.
+ continue
+ self.ref_alias(alias)
+ return alias
+
+ # No reuse is possible, so we need a new alias.
+ alias, _ = self.table_alias(table, True)
+ if not lhs:
+ # Not all tables need to be joined to anything. No join type
+ # means the later columns are ignored.
+ join_type = None
+ elif outer_if_first or self.alias_map[lhs].join_type == self.LOUTER:
+ # We need to use LOUTER join if asked by outer_if_first or if the
+ # LHS table is left-joined in the query.
+ join_type = self.LOUTER
+ else:
+ join_type = self.INNER
+ join = JoinInfo(table, alias, join_type, lhs, join_cols or ((None, None),), nullable,
+ join_field)
+ self.alias_map[alias] = join
+ if connection in self.join_map:
+ self.join_map[connection] += (alias,)
+ else:
+ self.join_map[connection] = (alias,)
+ return alias
+
+ def setup_inherited_models(self):
+ """
+ If the model that is the basis for this QuerySet inherits other models,
+ we need to ensure that those other models have their tables included in
+ the query.
+
+ We do this as a separate step so that subclasses know which
+ tables are going to be active in the query, without needing to compute
+ all the select columns (this method is called from pre_sql_setup(),
+ whereas column determination is a later part, and side-effect, of
+ as_sql()).
+ """
+ opts = self.get_meta()
+ root_alias = self.tables[0]
+ seen = {None: root_alias}
+
+ for field, model in opts.get_fields_with_model():
+ if model not in seen:
+ self.join_parent_model(opts, model, root_alias, seen)
+ self.included_inherited_models = seen
+
+ def join_parent_model(self, opts, model, alias, seen):
+ """
+ Makes sure the given 'model' is joined in the query. If 'model' isn't
+ a parent of 'opts' or if it is None this method is a no-op.
+
+ The 'alias' is the root alias for starting the join, 'seen' is a dict
+ of model -> alias of existing joins. It must also contain a mapping
+ of None -> some alias. This will be returned in the no-op case.
+ """
+ if model in seen:
+ return seen[model]
+ chain = opts.get_base_chain(model)
+ if chain is None:
+ return alias
+ curr_opts = opts
+ for int_model in chain:
+ if int_model in seen:
+ return seen[int_model]
+ # Proxy model have elements in base chain
+ # with no parents, assign the new options
+ # object and skip to the next base in that
+ # case
+ if not curr_opts.parents[int_model]:
+ curr_opts = int_model._meta
+ continue
+ link_field = curr_opts.get_ancestor_link(int_model)
+ _, _, _, joins, _ = self.setup_joins(
+ [link_field.name], curr_opts, alias)
+ curr_opts = int_model._meta
+ alias = seen[int_model] = joins[-1]
+ return alias or seen[None]
+
+ def remove_inherited_models(self):
+ """
+ Undoes the effects of setup_inherited_models(). Should be called
+ whenever select columns (self.select) are set explicitly.
+ """
+ for key, alias in self.included_inherited_models.items():
+ if key:
+ self.unref_alias(alias)
+ self.included_inherited_models = {}
+
+
+ def add_aggregate(self, aggregate, model, alias, is_summary):
+ """
+ Adds a single aggregate expression to the Query
+ """
+ opts = model._meta
+ field_list = aggregate.lookup.split(LOOKUP_SEP)
+ if len(field_list) == 1 and aggregate.lookup in self.aggregates:
+ # Aggregate is over an annotation
+ field_name = field_list[0]
+ col = field_name
+ source = self.aggregates[field_name]
+ if not is_summary:
+ raise FieldError("Cannot compute %s('%s'): '%s' is an aggregate" % (
+ aggregate.name, field_name, field_name))
+ elif ((len(field_list) > 1) or
+ (field_list[0] not in [i.name for i in opts.fields]) or
+ self.group_by is None or
+ not is_summary):
+ # If:
+ # - the field descriptor has more than one part (foo__bar), or
+ # - the field descriptor is referencing an m2m/m2o field, or
+ # - this is a reference to a model field (possibly inherited), or
+ # - this is an annotation over a model field
+ # then we need to explore the joins that are required.
+
+ field, sources, opts, join_list, path = self.setup_joins(
+ field_list, opts, self.get_initial_alias())
+
+ # Process the join chain to see if it can be trimmed
+ targets, _, join_list = self.trim_joins(sources, join_list, path)
+
+ # If the aggregate references a model or field that requires a join,
+ # those joins must be LEFT OUTER - empty join rows must be returned
+ # in order for zeros to be returned for those aggregates.
+ self.promote_joins(join_list, True)
+
+ col = targets[0].column
+ source = sources[0]
+ col = (join_list[-1], col)
+ else:
+ # The simplest cases. No joins required -
+ # just reference the provided column alias.
+ field_name = field_list[0]
+ source = opts.get_field(field_name)
+ col = field_name
+
+ # Add the aggregate to the query
+ aggregate.add_to_query(self, alias, col=col, source=source, is_summary=is_summary)
+
+ def build_filter(self, filter_expr, branch_negated=False, current_negated=False,
+ can_reuse=None):
+ """
+ Builds a WhereNode for a single filter clause, but doesn't add it
+ to this Query. Query.add_q() will then add this filter to the where
+ or having Node.
+
+ The 'branch_negated' tells us if the current branch contains any
+ negations. This will be used to determine if subqueries are needed.
+
+ The 'current_negated' is used to determine if the current filter is
+ negated or not and this will be used to determine if IS NULL filtering
+ is needed.
+
+ The difference between current_netageted and branch_negated is that
+ branch_negated is set on first negation, but current_negated is
+ flipped for each negation.
+
+ Note that add_filter will not do any negating itself, that is done
+ upper in the code by add_q().
+
+ The 'can_reuse' is a set of reusable joins for multijoins.
+
+ The method will create a filter clause that can be added to the current
+ query. However, if the filter isn't added to the query then the caller
+ is responsible for unreffing the joins used.
+ """
+ arg, value = filter_expr
+ parts = arg.split(LOOKUP_SEP)
+ if not parts:
+ raise FieldError("Cannot parse keyword query %r" % arg)
+
+ # Work out the lookup type and remove it from the end of 'parts',
+ # if necessary.
+ lookup_type = 'exact' # Default lookup type
+ num_parts = len(parts)
+ if (len(parts) > 1 and parts[-1] in self.query_terms
+ and arg not in self.aggregates):
+ # Traverse the lookup query to distinguish related fields from
+ # lookup types.
+ lookup_model = self.model
+ for counter, field_name in enumerate(parts):
+ try:
+ lookup_field = lookup_model._meta.get_field(field_name)
+ except FieldDoesNotExist:
+ # Not a field. Bail out.
+ lookup_type = parts.pop()
+ break
+ # Unless we're at the end of the list of lookups, let's attempt
+ # to continue traversing relations.
+ if (counter + 1) < num_parts:
+ try:
+ lookup_model = lookup_field.rel.to
+ except AttributeError:
+ # Not a related field. Bail out.
+ lookup_type = parts.pop()
+ break
+
+ clause = self.where_class()
+ # Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all
+ # uses of None as a query value.
+ if value is None:
+ if lookup_type != 'exact':
+ raise ValueError("Cannot use None as a query value")
+ lookup_type = 'isnull'
+ value = True
+ elif callable(value):
+ value = value()
+ elif isinstance(value, ExpressionNode):
+ # If value is a query expression, evaluate it
+ value = SQLEvaluator(value, self, reuse=can_reuse)
+ # For Oracle '' is equivalent to null. The check needs to be done
+ # at this stage because join promotion can't be done at compiler
+ # stage. Using DEFAULT_DB_ALIAS isn't nice, but it is the best we
+ # can do here. Similar thing is done in is_nullable(), too.
+ if (connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls and
+ lookup_type == 'exact' and value == ''):
+ value = True
+ lookup_type = 'isnull'
+
+ for alias, aggregate in self.aggregates.items():
+ if alias in (parts[0], LOOKUP_SEP.join(parts)):
+ clause.add((aggregate, lookup_type, value), AND)
+ return clause
+
+ opts = self.get_meta()
+ alias = self.get_initial_alias()
+ allow_many = not branch_negated
+
+ try:
+ field, sources, opts, join_list, path = self.setup_joins(
+ parts, opts, alias, can_reuse, allow_many,
+ allow_explicit_fk=True)
+ if can_reuse is not None:
+ can_reuse.update(join_list)
+ except MultiJoin as e:
+ return self.split_exclude(filter_expr, LOOKUP_SEP.join(parts[:e.level]),
+ can_reuse, e.names_with_path)
+
+ if (lookup_type == 'isnull' and value is True and not current_negated and
+ len(join_list) > 1):
+ # If the comparison is against NULL, we may need to use some left
+ # outer joins when creating the join chain. This is only done when
+ # needed, as it's less efficient at the database level.
+ self.promote_joins(join_list)
+
+ # Process the join list to see if we can remove any inner joins from
+ # the far end (fewer tables in a query is better). Note that join
+ # promotion must happen before join trimming to have the join type
+ # information available when reusing joins.
+ targets, alias, join_list = self.trim_joins(sources, join_list, path)
+
+ if hasattr(field, 'get_lookup_constraint'):
+ constraint = field.get_lookup_constraint(self.where_class, alias, targets, sources,
+ lookup_type, value)
+ else:
+ constraint = (Constraint(alias, targets[0].column, field), lookup_type, value)
+ clause.add(constraint, AND)
+ if current_negated and (lookup_type != 'isnull' or value is False):
+ self.promote_joins(join_list)
+ if (lookup_type != 'isnull' and (
+ self.is_nullable(targets[0]) or
+ self.alias_map[join_list[-1]].join_type == self.LOUTER)):
+ # The condition added here will be SQL like this:
+ # NOT (col IS NOT NULL), where the first NOT is added in
+ # upper layers of code. The reason for addition is that if col
+ # is null, then col != someval will result in SQL "unknown"
+ # which isn't the same as in Python. The Python None handling
+ # is wanted, and it can be gotten by
+ # (col IS NULL OR col != someval)
+ # <=>
+ # NOT (col IS NOT NULL AND col = someval).
+ clause.add((Constraint(alias, targets[0].column, None), 'isnull', False), AND)
+ return clause
+
+ def add_filter(self, filter_clause):
+ self.where.add(self.build_filter(filter_clause), 'AND')
+
+ def need_having(self, obj):
+ """
+ Returns whether or not all elements of this q_object need to be put
+ together in the HAVING clause.
+ """
+ if not isinstance(obj, Node):
+ return (refs_aggregate(obj[0].split(LOOKUP_SEP), self.aggregates)
+ or (hasattr(obj[1], 'contains_aggregate')
+ and obj[1].contains_aggregate(self.aggregates)))
+ return any(self.need_having(c) for c in obj.children)
+
+ def split_having_parts(self, q_object, negated=False):
+ """
+ Returns a list of q_objects which need to go into the having clause
+ instead of the where clause. Removes the splitted out nodes from the
+ given q_object. Note that the q_object is altered, so cloning it is
+ needed.
+ """
+ having_parts = []
+ for c in q_object.children[:]:
+ # When constucting the having nodes we need to take care to
+ # preserve the negation status from the upper parts of the tree
+ if isinstance(c, Node):
+ # For each negated child, flip the in_negated flag.
+ in_negated = c.negated ^ negated
+ if c.connector == OR and self.need_having(c):
+ # A subtree starting from OR clause must go into having in
+ # whole if any part of that tree references an aggregate.
+ q_object.children.remove(c)
+ having_parts.append(c)
+ c.negated = in_negated
+ else:
+ having_parts.extend(
+ self.split_having_parts(c, in_negated)[1])
+ elif self.need_having(c):
+ q_object.children.remove(c)
+ new_q = self.where_class(children=[c], negated=negated)
+ having_parts.append(new_q)
+ return q_object, having_parts
+
+ def add_q(self, q_object):
+ """
+ A preprocessor for the internal _add_q(). Responsible for
+ splitting the given q_object into where and having parts and
+ setting up some internal variables.
+ """
+ if not self.need_having(q_object):
+ where_part, having_parts = q_object, []
+ else:
+ where_part, having_parts = self.split_having_parts(
+ q_object.clone(), q_object.negated)
+ used_aliases = self.used_aliases
+ clause = self._add_q(where_part, used_aliases)
+ self.where.add(clause, AND)
+ for hp in having_parts:
+ clause = self._add_q(hp, used_aliases)
+ self.having.add(clause, AND)
+ if self.filter_is_sticky:
+ self.used_aliases = used_aliases
+
+ def _add_q(self, q_object, used_aliases, branch_negated=False,
+ current_negated=False):
+ """
+ Adds a Q-object to the current filter.
+ """
+ connector = q_object.connector
+ current_negated = current_negated ^ q_object.negated
+ branch_negated = branch_negated or q_object.negated
+ target_clause = self.where_class(connector=connector,
+ negated=q_object.negated)
+ # Treat case NOT (a AND b) like case ((NOT a) OR (NOT b)) for join
+ # promotion. See ticket #21748.
+ effective_connector = connector
+ if current_negated:
+ effective_connector = OR if effective_connector == AND else AND
+ if effective_connector == OR:
+ alias_usage_counts = dict()
+ aliases_before = set(self.tables)
+ for child in q_object.children:
+ if effective_connector == OR:
+ refcounts_before = self.alias_refcount.copy()
+ if isinstance(child, Node):
+ child_clause = self._add_q(
+ child, used_aliases, branch_negated,
+ current_negated)
+ else:
+ child_clause = self.build_filter(
+ child, can_reuse=used_aliases, branch_negated=branch_negated,
+ current_negated=current_negated)
+ target_clause.add(child_clause, connector)
+ if effective_connector == OR:
+ used = alias_diff(refcounts_before, self.alias_refcount)
+ for alias in used:
+ alias_usage_counts[alias] = alias_usage_counts.get(alias, 0) + 1
+ if effective_connector == OR:
+ self.promote_disjunction(aliases_before, alias_usage_counts,
+ len(q_object.children))
+ return target_clause
+
+ def names_to_path(self, names, opts, allow_many, allow_explicit_fk):
+ """
+ Walks the names path and turns them PathInfo tuples. Note that a
+ single name in 'names' can generate multiple PathInfos (m2m for
+ example).
+
+ 'names' is the path of names to travle, 'opts' is the model Options we
+ start the name resolving from, 'allow_many' and 'allow_explicit_fk'
+ are as for setup_joins().
+
+ Returns a list of PathInfo tuples. In addition returns the final field
+ (the last used join field), and target (which is a field guaranteed to
+ contain the same value as the final field).
+ """
+ path, names_with_path = [], []
+ for pos, name in enumerate(names):
+ cur_names_with_path = (name, [])
+ if name == 'pk':
+ name = opts.pk.name
+ try:
+ field, model, direct, m2m = opts.get_field_by_name(name)
+ except FieldDoesNotExist:
+ for f in opts.fields:
+ if allow_explicit_fk and name == f.attname:
+ # XXX: A hack to allow foo_id to work in values() for
+ # backwards compatibility purposes. If we dropped that
+ # feature, this could be removed.
+ field, model, direct, m2m = opts.get_field_by_name(f.name)
+ break
+ else:
+ available = opts.get_all_field_names() + list(self.aggregate_select)
+ raise FieldError("Cannot resolve keyword %r into field. "
+ "Choices are: %s" % (name, ", ".join(available)))
+ # Check if we need any joins for concrete inheritance cases (the
+ # field lives in parent, but we are currently in one of its
+ # children)
+ if model:
+ # The field lives on a base class of the current model.
+ # Skip the chain of proxy to the concrete proxied model
+ proxied_model = opts.concrete_model
+
+ for int_model in opts.get_base_chain(model):
+ if int_model is proxied_model:
+ opts = int_model._meta
+ else:
+ final_field = opts.parents[int_model]
+ targets = (final_field.rel.get_related_field(),)
+ opts = int_model._meta
+ path.append(PathInfo(final_field.model._meta, opts, targets, final_field, False, True))
+ cur_names_with_path[1].append(PathInfo(final_field.model._meta, opts, targets, final_field, False, True))
+ if hasattr(field, 'get_path_info'):
+ pathinfos = field.get_path_info()
+ if not allow_many:
+ for inner_pos, p in enumerate(pathinfos):
+ if p.m2m:
+ cur_names_with_path[1].extend(pathinfos[0:inner_pos + 1])
+ names_with_path.append(cur_names_with_path)
+ raise MultiJoin(pos + 1, names_with_path)
+ last = pathinfos[-1]
+ path.extend(pathinfos)
+ final_field = last.join_field
+ opts = last.to_opts
+ targets = last.target_fields
+ cur_names_with_path[1].extend(pathinfos)
+ names_with_path.append(cur_names_with_path)
+ else:
+ # Local non-relational field.
+ final_field = field
+ targets = (field,)
+ break
+
+ if pos != len(names) - 1:
+ if pos == len(names) - 2:
+ raise FieldError(
+ "Join on field %r not permitted. Did you misspell %r for "
+ "the lookup type?" % (name, names[pos + 1]))
+ else:
+ raise FieldError("Join on field %r not permitted." % name)
+ return path, final_field, targets
+
+ def setup_joins(self, names, opts, alias, can_reuse=None, allow_many=True,
+ allow_explicit_fk=False, outer_if_first=False):
+ """
+ Compute the necessary table joins for the passage through the fields
+ given in 'names'. 'opts' is the Options class for the current model
+ (which gives the table we are starting from), 'alias' is the alias for
+ the table to start the joining from.
+
+ The 'can_reuse' defines the reverse foreign key joins we can reuse. It
+ can be None in which case all joins are reusable or a set of aliases
+ that can be reused. Note that non-reverse foreign keys are always
+ reusable when using setup_joins().
+
+ If 'allow_many' is False, then any reverse foreign key seen will
+ generate a MultiJoin exception.
+
+ The 'allow_explicit_fk' controls if field.attname is allowed in the
+ lookups.
+
+ Returns the final field involved in the joins, the target field (used
+ for any 'where' constraint), the final 'opts' value, the joins and the
+ field path travelled to generate the joins.
+
+ The target field is the field containing the concrete value. Final
+ field can be something different, for example foreign key pointing to
+ that value. Final field is needed for example in some value
+ conversions (convert 'obj' in fk__id=obj to pk val using the foreign
+ key field for example).
+ """
+ joins = [alias]
+ # First, generate the path for the names
+ path, final_field, targets = self.names_to_path(
+ names, opts, allow_many, allow_explicit_fk)
+ # Then, add the path to the query's joins. Note that we can't trim
+ # joins at this stage - we will need the information about join type
+ # of the trimmed joins.
+ for pos, join in enumerate(path):
+ opts = join.to_opts
+ if join.direct:
+ nullable = self.is_nullable(join.join_field)
+ else:
+ nullable = True
+ connection = alias, opts.db_table, join.join_field.get_joining_columns()
+ reuse = can_reuse if join.m2m else None
+ alias = self.join(
+ connection, reuse=reuse, nullable=nullable, join_field=join.join_field,
+ outer_if_first=outer_if_first)
+ joins.append(alias)
+ if hasattr(final_field, 'field'):
+ final_field = final_field.field
+ return final_field, targets, opts, joins, path
+
+ def trim_joins(self, targets, joins, path):
+ """
+ The 'target' parameter is the final field being joined to, 'joins'
+ is the full list of join aliases. The 'path' contain the PathInfos
+ used to create the joins.
+
+ Returns the final target field and table alias and the new active
+ joins.
+
+ We will always trim any direct join if we have the target column
+ available already in the previous table. Reverse joins can't be
+ trimmed as we don't know if there is anything on the other side of
+ the join.
+ """
+ for pos, info in enumerate(reversed(path)):
+ if len(joins) == 1 or not info.direct:
+ break
+ join_targets = set(t.column for t in info.join_field.foreign_related_fields)
+ cur_targets = set(t.column for t in targets)
+ if not cur_targets.issubset(join_targets):
+ break
+ targets = tuple(r[0] for r in info.join_field.related_fields if r[1].column in cur_targets)
+ self.unref_alias(joins.pop())
+ return targets, joins[-1], joins
+
+ def split_exclude(self, filter_expr, prefix, can_reuse, names_with_path):
+ """
+ When doing an exclude against any kind of N-to-many relation, we need
+ to use a subquery. This method constructs the nested query, given the
+ original exclude filter (filter_expr) and the portion up to the first
+ N-to-many relation field.
+
+ As an example we could have original filter ~Q(child__name='foo').
+ We would get here with filter_expr = child__name, prefix = child and
+ can_reuse is a set of joins usable for filters in the original query.
+
+ We will turn this into equivalent of:
+ WHERE NOT (pk IN (SELECT parent_id FROM thetable
+ WHERE name = 'foo' AND parent_id IS NOT NULL))
+
+ It might be worth it to consider using WHERE NOT EXISTS as that has
+ saner null handling, and is easier for the backend's optimizer to
+ handle.
+ """
+ # Generate the inner query.
+ query = Query(self.model)
+ query.where.add(query.build_filter(filter_expr), AND)
+ query.bump_prefix()
+ query.clear_ordering(True)
+ # Try to have as simple as possible subquery -> trim leading joins from
+ # the subquery.
+ trimmed_prefix, contains_louter = query.trim_start(names_with_path)
+ query.remove_inherited_models()
+
+ # Add extra check to make sure the selected field will not be null
+ # since we are adding a IN <subquery> clause. This prevents the
+ # database from tripping over IN (...,NULL,...) selects and returning
+ # nothing
+ if self.is_nullable(query.select[0].field):
+ alias, col = query.select[0].col
+ query.where.add((Constraint(alias, col, query.select[0].field), 'isnull', False), AND)
+
+ condition = self.build_filter(
+ ('%s__in' % trimmed_prefix, query),
+ current_negated=True, branch_negated=True, can_reuse=can_reuse)
+ if contains_louter:
+ or_null_condition = self.build_filter(
+ ('%s__isnull' % trimmed_prefix, True),
+ current_negated=True, branch_negated=True, can_reuse=can_reuse)
+ condition.add(or_null_condition, OR)
+ # Note that the end result will be:
+ # (outercol NOT IN innerq AND outercol IS NOT NULL) OR outercol IS NULL.
+ # This might look crazy but due to how IN works, this seems to be
+ # correct. If the IS NOT NULL check is removed then outercol NOT
+ # IN will return UNKNOWN. If the IS NULL check is removed, then if
+ # outercol IS NULL we will not match the row.
+ return condition
+
+ def set_empty(self):
+ self.where = EmptyWhere()
+ self.having = EmptyWhere()
+
+ def is_empty(self):
+ return isinstance(self.where, EmptyWhere) or isinstance(self.having, EmptyWhere)
+
+ def set_limits(self, low=None, high=None):
+ """
+ Adjusts the limits on the rows retrieved. We use low/high to set these,
+ as it makes it more Pythonic to read and write. When the SQL query is
+ created, they are converted to the appropriate offset and limit values.
+
+ Any limits passed in here are applied relative to the existing
+ constraints. So low is added to the current low value and both will be
+ clamped to any existing high value.
+ """
+ if high is not None:
+ if self.high_mark is not None:
+ self.high_mark = min(self.high_mark, self.low_mark + high)
+ else:
+ self.high_mark = self.low_mark + high
+ if low is not None:
+ if self.high_mark is not None:
+ self.low_mark = min(self.high_mark, self.low_mark + low)
+ else:
+ self.low_mark = self.low_mark + low
+
+ def clear_limits(self):
+ """
+ Clears any existing limits.
+ """
+ self.low_mark, self.high_mark = 0, None
+
+ def can_filter(self):
+ """
+ Returns True if adding filters to this instance is still possible.
+
+ Typically, this means no limits or offsets have been put on the results.
+ """
+ return not self.low_mark and self.high_mark is None
+
+ def clear_select_clause(self):
+ """
+ Removes all fields from SELECT clause.
+ """
+ self.select = []
+ self.default_cols = False
+ self.select_related = False
+ self.set_extra_mask(())
+ self.set_aggregate_mask(())
+
+ def clear_select_fields(self):
+ """
+ Clears the list of fields to select (but not extra_select columns).
+ Some queryset types completely replace any existing list of select
+ columns.
+ """
+ self.select = []
+
+ def add_distinct_fields(self, *field_names):
+ """
+ Adds and resolves the given fields to the query's "distinct on" clause.
+ """
+ self.distinct_fields = field_names
+ self.distinct = True
+
+ def add_fields(self, field_names, allow_m2m=True):
+ """
+ Adds the given (model) fields to the select set. The field names are
+ added in the order specified.
+ """
+ alias = self.get_initial_alias()
+ opts = self.get_meta()
+
+ try:
+ for name in field_names:
+ field, targets, u2, joins, path = self.setup_joins(
+ name.split(LOOKUP_SEP), opts, alias, None, allow_m2m,
+ allow_explicit_fk=True, outer_if_first=True)
+
+ # Trim last join if possible
+ targets, final_alias, remaining_joins = self.trim_joins(targets, joins[-2:], path)
+ joins = joins[:-2] + remaining_joins
+
+ self.promote_joins(joins[1:])
+ for target in targets:
+ self.select.append(SelectInfo((final_alias, target.column), target))
+ except MultiJoin:
+ raise FieldError("Invalid field name: '%s'" % name)
+ except FieldError:
+ if LOOKUP_SEP in name:
+ # For lookups spanning over relationships, show the error
+ # from the model on which the lookup failed.
+ raise
+ else:
+ names = sorted(opts.get_all_field_names() + list(self.extra)
+ + list(self.aggregate_select))
+ raise FieldError("Cannot resolve keyword %r into field. "
+ "Choices are: %s" % (name, ", ".join(names)))
+ self.remove_inherited_models()
+
+ def add_ordering(self, *ordering):
+ """
+ Adds items from the 'ordering' sequence to the query's "order by"
+ clause. These items are either field names (not column names) --
+ possibly with a direction prefix ('-' or '?') -- or ordinals,
+ corresponding to column positions in the 'select' list.
+
+ If 'ordering' is empty, all ordering is cleared from the query.
+ """
+ errors = []
+ for item in ordering:
+ if not ORDER_PATTERN.match(item):
+ errors.append(item)
+ if errors:
+ raise FieldError('Invalid order_by arguments: %s' % errors)
+ if ordering:
+ self.order_by.extend(ordering)
+ else:
+ self.default_ordering = False
+
+ def clear_ordering(self, force_empty):
+ """
+ Removes any ordering settings. If 'force_empty' is True, there will be
+ no ordering in the resulting query (not even the model's default).
+ """
+ self.order_by = []
+ self.extra_order_by = ()
+ if force_empty:
+ self.default_ordering = False
+
+ def set_group_by(self):
+ """
+ Expands the GROUP BY clause required by the query.
+
+ This will usually be the set of all non-aggregate fields in the
+ return data. If the database backend supports grouping by the
+ primary key, and the query would be equivalent, the optimization
+ will be made automatically.
+ """
+ self.group_by = []
+
+ for col, _ in self.select:
+ self.group_by.append(col)
+
+ def add_count_column(self):
+ """
+ Converts the query to do count(...) or count(distinct(pk)) in order to
+ get its size.
+ """
+ if not self.distinct:
+ if not self.select:
+ count = self.aggregates_module.Count('*', is_summary=True)
+ else:
+ assert len(self.select) == 1, \
+ "Cannot add count col with multiple cols in 'select': %r" % self.select
+ count = self.aggregates_module.Count(self.select[0].col)
+ else:
+ opts = self.get_meta()
+ if not self.select:
+ count = self.aggregates_module.Count(
+ (self.join((None, opts.db_table, None)), opts.pk.column),
+ is_summary=True, distinct=True)
+ else:
+ # Because of SQL portability issues, multi-column, distinct
+ # counts need a sub-query -- see get_count() for details.
+ assert len(self.select) == 1, \
+ "Cannot add count col with multiple cols in 'select'."
+
+ count = self.aggregates_module.Count(self.select[0].col, distinct=True)
+ # Distinct handling is done in Count(), so don't do it at this
+ # level.
+ self.distinct = False
+
+ # Set only aggregate to be the count column.
+ # Clear out the select cache to reflect the new unmasked aggregates.
+ self.aggregates = {None: count}
+ self.set_aggregate_mask(None)
+ self.group_by = None
+
+ def add_select_related(self, fields):
+ """
+ Sets up the select_related data structure so that we only select
+ certain related models (as opposed to all models, when
+ self.select_related=True).
+ """
+ field_dict = {}
+ for field in fields:
+ d = field_dict
+ for part in field.split(LOOKUP_SEP):
+ d = d.setdefault(part, {})
+ self.select_related = field_dict
+ self.related_select_cols = []
+
+ def add_extra(self, select, select_params, where, params, tables, order_by):
+ """
+ Adds data to the various extra_* attributes for user-created additions
+ to the query.
+ """
+ if select:
+ # We need to pair any placeholder markers in the 'select'
+ # dictionary with their parameters in 'select_params' so that
+ # subsequent updates to the select dictionary also adjust the
+ # parameters appropriately.
+ select_pairs = SortedDict()
+ if select_params:
+ param_iter = iter(select_params)
+ else:
+ param_iter = iter([])
+ for name, entry in select.items():
+ entry = force_text(entry)
+ entry_params = []
+ pos = entry.find("%s")
+ while pos != -1:
+ entry_params.append(next(param_iter))
+ pos = entry.find("%s", pos + 2)
+ select_pairs[name] = (entry, entry_params)
+ # This is order preserving, since self.extra_select is a SortedDict.
+ self.extra.update(select_pairs)
+ if where or params:
+ self.where.add(ExtraWhere(where, params), AND)
+ if tables:
+ self.extra_tables += tuple(tables)
+ if order_by:
+ self.extra_order_by = order_by
+
+ def clear_deferred_loading(self):
+ """
+ Remove any fields from the deferred loading set.
+ """
+ self.deferred_loading = (set(), True)
+
+ def add_deferred_loading(self, field_names):
+ """
+ Add the given list of model field names to the set of fields to
+ exclude from loading from the database when automatic column selection
+ is done. The new field names are added to any existing field names that
+ are deferred (or removed from any existing field names that are marked
+ as the only ones for immediate loading).
+ """
+ # Fields on related models are stored in the literal double-underscore
+ # format, so that we can use a set datastructure. We do the foo__bar
+ # splitting and handling when computing the SQL colum names (as part of
+ # get_columns()).
+ existing, defer = self.deferred_loading
+ if defer:
+ # Add to existing deferred names.
+ self.deferred_loading = existing.union(field_names), True
+ else:
+ # Remove names from the set of any existing "immediate load" names.
+ self.deferred_loading = existing.difference(field_names), False
+
+ def add_immediate_loading(self, field_names):
+ """
+ Add the given list of model field names to the set of fields to
+ retrieve when the SQL is executed ("immediate loading" fields). The
+ field names replace any existing immediate loading field names. If
+ there are field names already specified for deferred loading, those
+ names are removed from the new field_names before storing the new names
+ for immediate loading. (That is, immediate loading overrides any
+ existing immediate values, but respects existing deferrals.)
+ """
+ existing, defer = self.deferred_loading
+ field_names = set(field_names)
+ if 'pk' in field_names:
+ field_names.remove('pk')
+ field_names.add(self.get_meta().pk.name)
+
+ if defer:
+ # Remove any existing deferred names from the current set before
+ # setting the new names.
+ self.deferred_loading = field_names.difference(existing), False
+ else:
+ # Replace any existing "immediate load" field names.
+ self.deferred_loading = field_names, False
+
+ def get_loaded_field_names(self):
+ """
+ If any fields are marked to be deferred, returns a dictionary mapping
+ models to a set of names in those fields that will be loaded. If a
+ model is not in the returned dictionary, none of it's fields are
+ deferred.
+
+ If no fields are marked for deferral, returns an empty dictionary.
+ """
+ # We cache this because we call this function multiple times
+ # (compiler.fill_related_selections, query.iterator)
+ try:
+ return self._loaded_field_names_cache
+ except AttributeError:
+ collection = {}
+ self.deferred_to_data(collection, self.get_loaded_field_names_cb)
+ self._loaded_field_names_cache = collection
+ return collection
+
+ def get_loaded_field_names_cb(self, target, model, fields):
+ """
+ Callback used by get_deferred_field_names().
+ """
+ target[model] = set([f.name for f in fields])
+
+ def set_aggregate_mask(self, names):
+ "Set the mask of aggregates that will actually be returned by the SELECT"
+ if names is None:
+ self.aggregate_select_mask = None
+ else:
+ self.aggregate_select_mask = set(names)
+ self._aggregate_select_cache = None
+
+ def set_extra_mask(self, names):
+ """
+ Set the mask of extra select items that will be returned by SELECT,
+ we don't actually remove them from the Query since they might be used
+ later
+ """
+ if names is None:
+ self.extra_select_mask = None
+ else:
+ self.extra_select_mask = set(names)
+ self._extra_select_cache = None
+
+ def _aggregate_select(self):
+ """The SortedDict of aggregate columns that are not masked, and should
+ be used in the SELECT clause.
+
+ This result is cached for optimization purposes.
+ """
+ if self._aggregate_select_cache is not None:
+ return self._aggregate_select_cache
+ elif self.aggregate_select_mask is not None:
+ self._aggregate_select_cache = SortedDict([
+ (k,v) for k,v in self.aggregates.items()
+ if k in self.aggregate_select_mask
+ ])
+ return self._aggregate_select_cache
+ else:
+ return self.aggregates
+ aggregate_select = property(_aggregate_select)
+
+ def _extra_select(self):
+ if self._extra_select_cache is not None:
+ return self._extra_select_cache
+ elif self.extra_select_mask is not None:
+ self._extra_select_cache = SortedDict([
+ (k,v) for k,v in self.extra.items()
+ if k in self.extra_select_mask
+ ])
+ return self._extra_select_cache
+ else:
+ return self.extra
+ extra_select = property(_extra_select)
+
+ def trim_start(self, names_with_path):
+ """
+ Trims joins from the start of the join path. The candidates for trim
+ are the PathInfos in names_with_path structure that are m2m joins.
+
+ Also sets the select column so the start matches the join.
+
+ This method is meant to be used for generating the subquery joins &
+ cols in split_exclude().
+
+ Returns a lookup usable for doing outerq.filter(lookup=self). Returns
+ also if the joins in the prefix contain a LEFT OUTER join.
+ _"""
+ all_paths = []
+ for _, paths in names_with_path:
+ all_paths.extend(paths)
+ contains_louter = False
+ for pos, path in enumerate(all_paths):
+ if path.m2m:
+ break
+ if self.alias_map[self.tables[pos + 1]].join_type == self.LOUTER:
+ contains_louter = True
+ self.unref_alias(self.tables[pos])
+ # The path.join_field is a Rel, lets get the other side's field
+ join_field = path.join_field.field
+ # Build the filter prefix.
+ trimmed_prefix = []
+ paths_in_prefix = pos
+ for name, path in names_with_path:
+ if paths_in_prefix - len(path) < 0:
+ break
+ trimmed_prefix.append(name)
+ paths_in_prefix -= len(path)
+ trimmed_prefix.append(
+ join_field.foreign_related_fields[0].name)
+ trimmed_prefix = LOOKUP_SEP.join(trimmed_prefix)
+ # Lets still see if we can trim the first join from the inner query
+ # (that is, self). We can't do this for LEFT JOINs because we would
+ # miss those rows that have nothing on the outer side.
+ if self.alias_map[self.tables[pos + 1]].join_type != self.LOUTER:
+ select_fields = [r[0] for r in join_field.related_fields]
+ select_alias = self.tables[pos + 1]
+ self.unref_alias(self.tables[pos])
+ extra_restriction = join_field.get_extra_restriction(
+ self.where_class, None, self.tables[pos + 1])
+ if extra_restriction:
+ self.where.add(extra_restriction, AND)
+ else:
+ # TODO: It might be possible to trim more joins from the start of the
+ # inner query if it happens to have a longer join chain containing the
+ # values in select_fields. Lets punt this one for now.
+ select_fields = [r[1] for r in join_field.related_fields]
+ select_alias = self.tables[pos]
+ self.select = [SelectInfo((select_alias, f.column), f) for f in select_fields]
+ return trimmed_prefix, contains_louter
+
+ def is_nullable(self, field):
+ """
+ A helper to check if the given field should be treated as nullable.
+
+ Some backends treat '' as null and Django treats such fields as
+ nullable for those backends. In such situations field.null can be
+ False even if we should treat the field as nullable.
+ """
+ # We need to use DEFAULT_DB_ALIAS here, as QuerySet does not have
+ # (nor should it have) knowledge of which connection is going to be
+ # used. The proper fix would be to defer all decisions where
+ # is_nullable() is needed to the compiler stage, but that is not easy
+ # to do currently.
+ if ((connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls)
+ and field.empty_strings_allowed):
+ return True
+ else:
+ return field.null
+
+def get_order_dir(field, default='ASC'):
+ """
+ Returns the field name and direction for an order specification. For
+ example, '-foo' is returned as ('foo', 'DESC').
+
+ The 'default' param is used to indicate which way no prefix (or a '+'
+ prefix) should sort. The '-' prefix always sorts the opposite way.
+ """
+ dirn = ORDER_DIR[default]
+ if field[0] == '-':
+ return field[1:], dirn[1]
+ return field, dirn[0]
+
+
+def add_to_dict(data, key, value):
+ """
+ A helper function to add "value" to the set of values for "key", whether or
+ not "key" already exists.
+ """
+ if key in data:
+ data[key].add(value)
+ else:
+ data[key] = set([value])
+
+def is_reverse_o2o(field):
+ """
+ A little helper to check if the given field is reverse-o2o. The field is
+ expected to be some sort of relation field or related object.
+ """
+ return not hasattr(field, 'rel') and field.field.unique
+
+def alias_diff(refcounts_before, refcounts_after):
+ """
+ Given the before and after copies of refcounts works out which aliases
+ have been added to the after copy.
+ """
+ # Use -1 as default value so that any join that is created, then trimmed
+ # is seen as added.
+ return set(t for t in refcounts_after
+ if refcounts_after[t] > refcounts_before.get(t, -1))
diff --git a/lib/python2.7/site-packages/django/db/models/sql/subqueries.py b/lib/python2.7/site-packages/django/db/models/sql/subqueries.py
new file mode 100644
index 0000000..6dc0005
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/models/sql/subqueries.py
@@ -0,0 +1,297 @@
+"""
+Query subclasses which provide extra functionality beyond simple data retrieval.
+"""
+
+from django.conf import settings
+from django.core.exceptions import FieldError
+from django.db import connections
+from django.db.models.constants import LOOKUP_SEP
+from django.db.models.fields import DateField, DateTimeField, FieldDoesNotExist
+from django.db.models.sql.constants import *
+from django.db.models.sql.datastructures import Date, DateTime
+from django.db.models.sql.query import Query
+from django.db.models.sql.where import AND, Constraint
+from django.utils.functional import Promise
+from django.utils.encoding import force_text
+from django.utils import six
+from django.utils import timezone
+
+
+__all__ = ['DeleteQuery', 'UpdateQuery', 'InsertQuery', 'DateQuery',
+ 'DateTimeQuery', 'AggregateQuery']
+
+class DeleteQuery(Query):
+ """
+ Delete queries are done through this class, since they are more constrained
+ than general queries.
+ """
+
+ compiler = 'SQLDeleteCompiler'
+
+ def do_query(self, table, where, using):
+ self.tables = [table]
+ self.where = where
+ self.get_compiler(using).execute_sql(None)
+
+ def delete_batch(self, pk_list, using, field=None):
+ """
+ Set up and execute delete queries for all the objects in pk_list.
+
+ More than one physical query may be executed if there are a
+ lot of values in pk_list.
+ """
+ if not field:
+ field = self.get_meta().pk
+ for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
+ where = self.where_class()
+ where.add((Constraint(None, field.column, field), 'in',
+ pk_list[offset:offset + GET_ITERATOR_CHUNK_SIZE]), AND)
+ self.do_query(self.get_meta().db_table, where, using=using)
+
+ def delete_qs(self, query, using):
+ """
+ Delete the queryset in one SQL query (if possible). For simple queries
+ this is done by copying the query.query.where to self.query, for
+ complex queries by using subquery.
+ """
+ innerq = query.query
+ # Make sure the inner query has at least one table in use.
+ innerq.get_initial_alias()
+ # The same for our new query.
+ self.get_initial_alias()
+ innerq_used_tables = [t for t in innerq.tables
+ if innerq.alias_refcount[t]]
+ if ((not innerq_used_tables or innerq_used_tables == self.tables)
+ and not len(innerq.having)):
+ # There is only the base table in use in the query, and there are
+ # no aggregate filtering going on.
+ self.where = innerq.where
+ else:
+ pk = query.model._meta.pk
+ if not connections[using].features.update_can_self_select:
+ # We can't do the delete using subquery.
+ values = list(query.values_list('pk', flat=True))
+ if not values:
+ return
+ self.delete_batch(values, using)
+ return
+ else:
+ innerq.clear_select_clause()
+ innerq.select = [SelectInfo((self.get_initial_alias(), pk.column), None)]
+ values = innerq
+ where = self.where_class()
+ where.add((Constraint(None, pk.column, pk), 'in', values), AND)
+ self.where = where
+ self.get_compiler(using).execute_sql(None)
+
+
+class UpdateQuery(Query):
+ """
+ Represents an "update" SQL query.
+ """
+
+ compiler = 'SQLUpdateCompiler'
+
+ def __init__(self, *args, **kwargs):
+ super(UpdateQuery, self).__init__(*args, **kwargs)
+ self._setup_query()
+
+ def _setup_query(self):
+ """
+ Runs on initialization and after cloning. Any attributes that would
+ normally be set in __init__ should go in here, instead, so that they
+ are also set up after a clone() call.
+ """
+ self.values = []
+ self.related_ids = None
+ if not hasattr(self, 'related_updates'):
+ self.related_updates = {}
+
+ def clone(self, klass=None, **kwargs):
+ return super(UpdateQuery, self).clone(klass,
+ related_updates=self.related_updates.copy(), **kwargs)
+
+ def update_batch(self, pk_list, values, using):
+ pk_field = self.get_meta().pk
+ self.add_update_values(values)
+ for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
+ self.where = self.where_class()
+ self.where.add((Constraint(None, pk_field.column, pk_field), 'in',
+ pk_list[offset:offset + GET_ITERATOR_CHUNK_SIZE]),
+ AND)
+ self.get_compiler(using).execute_sql(None)
+
+ def add_update_values(self, values):
+ """
+ Convert a dictionary of field name to value mappings into an update
+ query. This is the entry point for the public update() method on
+ querysets.
+ """
+ values_seq = []
+ for name, val in six.iteritems(values):
+ field, model, direct, m2m = self.get_meta().get_field_by_name(name)
+ if not direct or m2m:
+ raise FieldError('Cannot update model field %r (only non-relations and foreign keys permitted).' % field)
+ if model:
+ self.add_related_update(model, field, val)
+ continue
+ values_seq.append((field, model, val))
+ return self.add_update_fields(values_seq)
+
+ def add_update_fields(self, values_seq):
+ """
+ Turn a sequence of (field, model, value) triples into an update query.
+ Used by add_update_values() as well as the "fast" update path when
+ saving models.
+ """
+ # Check that no Promise object passes to the query. Refs #10498.
+ values_seq = [(value[0], value[1], force_text(value[2]))
+ if isinstance(value[2], Promise) else value
+ for value in values_seq]
+ self.values.extend(values_seq)
+
+ def add_related_update(self, model, field, value):
+ """
+ Adds (name, value) to an update query for an ancestor model.
+
+ Updates are coalesced so that we only run one update query per ancestor.
+ """
+ try:
+ self.related_updates[model].append((field, None, value))
+ except KeyError:
+ self.related_updates[model] = [(field, None, value)]
+
+ def get_related_updates(self):
+ """
+ Returns a list of query objects: one for each update required to an
+ ancestor model. Each query will have the same filtering conditions as
+ the current query but will only update a single table.
+ """
+ if not self.related_updates:
+ return []
+ result = []
+ for model, values in six.iteritems(self.related_updates):
+ query = UpdateQuery(model)
+ query.values = values
+ if self.related_ids is not None:
+ query.add_filter(('pk__in', self.related_ids))
+ result.append(query)
+ return result
+
+class InsertQuery(Query):
+ compiler = 'SQLInsertCompiler'
+
+ def __init__(self, *args, **kwargs):
+ super(InsertQuery, self).__init__(*args, **kwargs)
+ self.fields = []
+ self.objs = []
+
+ def clone(self, klass=None, **kwargs):
+ extras = {
+ 'fields': self.fields[:],
+ 'objs': self.objs[:],
+ 'raw': self.raw,
+ }
+ extras.update(kwargs)
+ return super(InsertQuery, self).clone(klass, **extras)
+
+ def insert_values(self, fields, objs, raw=False):
+ """
+ Set up the insert query from the 'insert_values' dictionary. The
+ dictionary gives the model field names and their target values.
+
+ If 'raw_values' is True, the values in the 'insert_values' dictionary
+ are inserted directly into the query, rather than passed as SQL
+ parameters. This provides a way to insert NULL and DEFAULT keywords
+ into the query, for example.
+ """
+ self.fields = fields
+ # Check that no Promise object reaches the DB. Refs #10498.
+ for field in fields:
+ for obj in objs:
+ value = getattr(obj, field.attname)
+ if isinstance(value, Promise):
+ setattr(obj, field.attname, force_text(value))
+ self.objs = objs
+ self.raw = raw
+
+class DateQuery(Query):
+ """
+ A DateQuery is a normal query, except that it specifically selects a single
+ date field. This requires some special handling when converting the results
+ back to Python objects, so we put it in a separate class.
+ """
+
+ compiler = 'SQLDateCompiler'
+
+ def add_select(self, field_name, lookup_type, order='ASC'):
+ """
+ Converts the query into an extraction query.
+ """
+ try:
+ result = self.setup_joins(
+ field_name.split(LOOKUP_SEP),
+ self.get_meta(),
+ self.get_initial_alias(),
+ )
+ except FieldError:
+ raise FieldDoesNotExist("%s has no field named '%s'" % (
+ self.get_meta().object_name, field_name
+ ))
+ field = result[0]
+ self._check_field(field) # overridden in DateTimeQuery
+ alias = result[3][-1]
+ select = self._get_select((alias, field.column), lookup_type)
+ self.clear_select_clause()
+ self.select = [SelectInfo(select, None)]
+ self.distinct = True
+ self.order_by = [1] if order == 'ASC' else [-1]
+
+ if field.null:
+ self.add_filter(("%s__isnull" % field_name, False))
+
+ def _check_field(self, field):
+ assert isinstance(field, DateField), \
+ "%r isn't a DateField." % field.name
+ if settings.USE_TZ:
+ assert not isinstance(field, DateTimeField), \
+ "%r is a DateTimeField, not a DateField." % field.name
+
+ def _get_select(self, col, lookup_type):
+ return Date(col, lookup_type)
+
+class DateTimeQuery(DateQuery):
+ """
+ A DateTimeQuery is like a DateQuery but for a datetime field. If time zone
+ support is active, the tzinfo attribute contains the time zone to use for
+ converting the values before truncating them. Otherwise it's set to None.
+ """
+
+ compiler = 'SQLDateTimeCompiler'
+
+ def clone(self, klass=None, memo=None, **kwargs):
+ if 'tzinfo' not in kwargs and hasattr(self, 'tzinfo'):
+ kwargs['tzinfo'] = self.tzinfo
+ return super(DateTimeQuery, self).clone(klass, memo, **kwargs)
+
+ def _check_field(self, field):
+ assert isinstance(field, DateTimeField), \
+ "%r isn't a DateTimeField." % field.name
+
+ def _get_select(self, col, lookup_type):
+ if self.tzinfo is None:
+ tzname = None
+ else:
+ tzname = timezone._get_timezone_name(self.tzinfo)
+ return DateTime(col, lookup_type, tzname)
+
+class AggregateQuery(Query):
+ """
+ An AggregateQuery takes another query as a parameter to the FROM
+ clause and only selects the elements in the provided list.
+ """
+
+ compiler = 'SQLAggregateCompiler'
+
+ def add_subquery(self, query, using):
+ self.subquery, self.sub_params = query.get_compiler(using).as_sql(with_col_aliases=True)
diff --git a/lib/python2.7/site-packages/django/db/models/sql/where.py b/lib/python2.7/site-packages/django/db/models/sql/where.py
new file mode 100644
index 0000000..2a342d4
--- /dev/null
+++ b/lib/python2.7/site-packages/django/db/models/sql/where.py
@@ -0,0 +1,419 @@
+"""
+Code to manage the creation and SQL rendering of 'where' constraints.
+"""
+
+from __future__ import absolute_import
+
+import datetime
+from itertools import repeat
+
+from django.conf import settings
+from django.db.models.fields import DateTimeField, Field
+from django.db.models.sql.datastructures import EmptyResultSet, Empty
+from django.db.models.sql.aggregates import Aggregate
+from django.utils.itercompat import is_iterator
+from django.utils.six.moves import xrange
+from django.utils import timezone
+from django.utils import tree
+
+# Connection types
+AND = 'AND'
+OR = 'OR'
+
+class EmptyShortCircuit(Exception):
+ """
+ Internal exception used to indicate that a "matches nothing" node should be
+ added to the where-clause.
+ """
+ pass
+
+class WhereNode(tree.Node):
+ """
+ Used to represent the SQL where-clause.
+
+ The class is tied to the Query class that created it (in order to create
+ the correct SQL).
+
+ A child is usually a tuple of:
+ (Constraint(alias, targetcol, field), lookup_type, value)
+ where value can be either raw Python value, or Query, ExpressionNode or
+ something else knowing how to turn itself into SQL.
+
+ However, a child could also be any class with as_sql() and either
+ relabeled_clone() method or relabel_aliases() and clone() methods. The
+ second alternative should be used if the alias is not the only mutable
+ variable.
+ """
+ default = AND
+
+ def _prepare_data(self, data):
+ """
+ Prepare data for addition to the tree. If the data is a list or tuple,
+ it is expected to be of the form (obj, lookup_type, value), where obj
+ is a Constraint object, and is then slightly munged before being
+ stored (to avoid storing any reference to field objects). Otherwise,
+ the 'data' is stored unchanged and can be any class with an 'as_sql()'
+ method.
+ """
+ if not isinstance(data, (list, tuple)):
+ return data
+ obj, lookup_type, value = data
+ if is_iterator(value):
+ # Consume any generators immediately, so that we can determine
+ # emptiness and transform any non-empty values correctly.
+ value = list(value)
+
+ # The "value_annotation" parameter is used to pass auxilliary information
+ # about the value(s) to the query construction. Specifically, datetime
+ # and empty values need special handling. Other types could be used
+ # here in the future (using Python types is suggested for consistency).
+ if (isinstance(value, datetime.datetime)
+ or (isinstance(obj.field, DateTimeField) and lookup_type != 'isnull')):
+ value_annotation = datetime.datetime
+ elif hasattr(value, 'value_annotation'):
+ value_annotation = value.value_annotation
+ else:
+ value_annotation = bool(value)
+
+ if hasattr(obj, "prepare"):
+ value = obj.prepare(lookup_type, value)
+ return (obj, lookup_type, value_annotation, value)
+
+ def as_sql(self, qn, connection):
+ """
+ Returns the SQL version of the where clause and the value to be
+ substituted in. Returns '', [] if this node matches everything,
+ None, [] if this node is empty, and raises EmptyResultSet if this
+ node can't match anything.
+ """
+ # Note that the logic here is made slightly more complex than
+ # necessary because there are two kind of empty nodes: Nodes
+ # containing 0 children, and nodes that are known to match everything.
+ # A match-everything node is different than empty node (which also
+ # technically matches everything) for backwards compatibility reasons.
+ # Refs #5261.
+ result = []
+ result_params = []
+ everything_childs, nothing_childs = 0, 0
+ non_empty_childs = len(self.children)
+
+ for child in self.children:
+ try:
+ if hasattr(child, 'as_sql'):
+ sql, params = child.as_sql(qn=qn, connection=connection)
+ else:
+ # A leaf node in the tree.
+ sql, params = self.make_atom(child, qn, connection)
+ except EmptyResultSet:
+ nothing_childs += 1
+ else:
+ if sql:
+ result.append(sql)
+ result_params.extend(params)
+ else:
+ if sql is None:
+ # Skip empty childs totally.
+ non_empty_childs -= 1
+ continue
+ everything_childs += 1
+ # Check if this node matches nothing or everything.
+ # First check the amount of full nodes and empty nodes
+ # to make this node empty/full.
+ if self.connector == AND:
+ full_needed, empty_needed = non_empty_childs, 1
+ else:
+ full_needed, empty_needed = 1, non_empty_childs
+ # Now, check if this node is full/empty using the
+ # counts.
+ if empty_needed - nothing_childs <= 0:
+ if self.negated:
+ return '', []
+ else:
+ raise EmptyResultSet
+ if full_needed - everything_childs <= 0:
+ if self.negated:
+ raise EmptyResultSet
+ else:
+ return '', []
+
+ if non_empty_childs == 0:
+ # All the child nodes were empty, so this one is empty, too.
+ return None, []
+ conn = ' %s ' % self.connector
+ sql_string = conn.join(result)
+ if sql_string:
+ if self.negated:
+ # Some backends (Oracle at least) need parentheses
+ # around the inner SQL in the negated case, even if the
+ # inner SQL contains just a single expression.
+ sql_string = 'NOT (%s)' % sql_string
+ elif len(result) > 1:
+ sql_string = '(%s)' % sql_string
+ return sql_string, result_params
+
+ def get_cols(self):
+ cols = []
+ for child in self.children:
+ if hasattr(child, 'get_cols'):
+ cols.extend(child.get_cols())
+ else:
+ if isinstance(child[0], Constraint):
+ cols.append((child[0].alias, child[0].col))
+ if hasattr(child[3], 'get_cols'):
+ cols.extend(child[3].get_cols())
+ return cols
+
+ def make_atom(self, child, qn, connection):
+ """
+ Turn a tuple (Constraint(table_alias, column_name, db_type),
+ lookup_type, value_annotation, params) into valid SQL.
+
+ The first item of the tuple may also be an Aggregate.
+
+ Returns the string for the SQL fragment and the parameters to use for
+ it.
+ """
+ lvalue, lookup_type, value_annotation, params_or_value = child
+ field_internal_type = lvalue.field.get_internal_type() if lvalue.field else None
+
+ if isinstance(lvalue, Constraint):
+ try:
+ lvalue, params = lvalue.process(lookup_type, params_or_value, connection)
+ except EmptyShortCircuit:
+ raise EmptyResultSet
+ elif isinstance(lvalue, Aggregate):
+ params = lvalue.field.get_db_prep_lookup(lookup_type, params_or_value, connection)
+ else:
+ raise TypeError("'make_atom' expects a Constraint or an Aggregate "
+ "as the first item of its 'child' argument.")
+
+ if isinstance(lvalue, tuple):
+ # A direct database column lookup.
+ field_sql, field_params = self.sql_for_columns(lvalue, qn, connection, field_internal_type), []
+ else:
+ # A smart object with an as_sql() method.
+ field_sql, field_params = lvalue.as_sql(qn, connection)
+
+ is_datetime_field = value_annotation is datetime.datetime
+ cast_sql = connection.ops.datetime_cast_sql() if is_datetime_field else '%s'
+
+ if hasattr(params, 'as_sql'):
+ extra, params = params.as_sql(qn, connection)
+ cast_sql = ''
+ else:
+ extra = ''
+
+ params = field_params + params
+
+ if (len(params) == 1 and params[0] == '' and lookup_type == 'exact'
+ and connection.features.interprets_empty_strings_as_nulls):
+ lookup_type = 'isnull'
+ value_annotation = True
+
+ if lookup_type in connection.operators:
+ format = "%s %%s %%s" % (connection.ops.lookup_cast(lookup_type),)
+ return (format % (field_sql,
+ connection.operators[lookup_type] % cast_sql,
+ extra), params)
+
+ if lookup_type == 'in':
+ if not value_annotation:
+ raise EmptyResultSet
+ if extra:
+ return ('%s IN %s' % (field_sql, extra), params)
+ max_in_list_size = connection.ops.max_in_list_size()
+ if max_in_list_size and len(params) > max_in_list_size:
+ # Break up the params list into an OR of manageable chunks.
+ in_clause_elements = ['(']
+ for offset in xrange(0, len(params), max_in_list_size):
+ if offset > 0:
+ in_clause_elements.append(' OR ')
+ in_clause_elements.append('%s IN (' % field_sql)
+ group_size = min(len(params) - offset, max_in_list_size)
+ param_group = ', '.join(repeat('%s', group_size))
+ in_clause_elements.append(param_group)
+ in_clause_elements.append(')')
+ in_clause_elements.append(')')
+ return ''.join(in_clause_elements), params
+ else:
+ return ('%s IN (%s)' % (field_sql,
+ ', '.join(repeat('%s', len(params)))),
+ params)
+ elif lookup_type in ('range', 'year'):
+ return ('%s BETWEEN %%s and %%s' % field_sql, params)
+ elif is_datetime_field and lookup_type in ('month', 'day', 'week_day',
+ 'hour', 'minute', 'second'):
+ tzname = timezone.get_current_timezone_name() if settings.USE_TZ else None
+ sql, tz_params = connection.ops.datetime_extract_sql(lookup_type, field_sql, tzname)
+ return ('%s = %%s' % sql, tz_params + params)
+ elif lookup_type in ('month', 'day', 'week_day'):
+ return ('%s = %%s'
+ % connection.ops.date_extract_sql(lookup_type, field_sql), params)
+ elif lookup_type == 'isnull':
+ assert value_annotation in (True, False), "Invalid value_annotation for isnull"
+ return ('%s IS %sNULL' % (field_sql, ('' if value_annotation else 'NOT ')), ())
+ elif lookup_type == 'search':
+ return (connection.ops.fulltext_search_sql(field_sql), params)
+ elif lookup_type in ('regex', 'iregex'):
+ return connection.ops.regex_lookup(lookup_type) % (field_sql, cast_sql), params
+
+ raise TypeError('Invalid lookup_type: %r' % lookup_type)
+
+ def sql_for_columns(self, data, qn, connection, internal_type=None):
+ """
+ Returns the SQL fragment used for the left-hand side of a column
+ constraint (for example, the "T1.foo" portion in the clause
+ "WHERE ... T1.foo = 6") and a list of parameters.
+ """
+ table_alias, name, db_type = data
+ if table_alias:
+ lhs = '%s.%s' % (qn(table_alias), qn(name))
+ else:
+ lhs = qn(name)
+ return connection.ops.field_cast_sql(db_type, internal_type) % lhs
+
+ def relabel_aliases(self, change_map):
+ """
+ Relabels the alias values of any children. 'change_map' is a dictionary
+ mapping old (current) alias values to the new values.
+ """
+ for pos, child in enumerate(self.children):
+ if hasattr(child, 'relabel_aliases'):
+ # For example another WhereNode
+ child.relabel_aliases(change_map)
+ elif isinstance(child, (list, tuple)):
+ # tuple starting with Constraint
+ child = (child[0].relabeled_clone(change_map),) + child[1:]
+ if hasattr(child[3], 'relabeled_clone'):
+ child = (child[0], child[1], child[2]) + (
+ child[3].relabeled_clone(change_map),)
+ self.children[pos] = child
+
+ def clone(self):
+ """
+ Creates a clone of the tree. Must only be called on root nodes (nodes
+ with empty subtree_parents). Childs must be either (Contraint, lookup,
+ value) tuples, or objects supporting .clone().
+ """
+ clone = self.__class__._new_instance(
+ children=[], connector=self.connector, negated=self.negated)
+ for child in self.children:
+ if hasattr(child, 'clone'):
+ clone.children.append(child.clone())
+ else:
+ clone.children.append(child)
+ return clone
+
+class EmptyWhere(WhereNode):
+
+ def add(self, data, connector):
+ return
+
+ def as_sql(self, qn=None, connection=None):
+ raise EmptyResultSet
+
+class EverythingNode(object):
+ """
+ A node that matches everything.
+ """
+
+ def as_sql(self, qn=None, connection=None):
+ return '', []
+
+
+class NothingNode(object):
+ """
+ A node that matches nothing.
+ """
+ def as_sql(self, qn=None, connection=None):
+ raise EmptyResultSet
+
+
+class ExtraWhere(object):
+ def __init__(self, sqls, params):
+ self.sqls = sqls
+ self.params = params
+
+ def as_sql(self, qn=None, connection=None):
+ sqls = ["(%s)" % sql for sql in self.sqls]
+ return " AND ".join(sqls), list(self.params or ())
+
+
+class Constraint(object):
+ """
+ An object that can be passed to WhereNode.add() and knows how to
+ pre-process itself prior to including in the WhereNode.
+ """
+ def __init__(self, alias, col, field):
+ self.alias, self.col, self.field = alias, col, field
+
+ def prepare(self, lookup_type, value):
+ if self.field:
+ return self.field.get_prep_lookup(lookup_type, value)
+ return value
+
+ def process(self, lookup_type, value, connection):
+ """
+ Returns a tuple of data suitable for inclusion in a WhereNode
+ instance.
+ """
+ # Because of circular imports, we need to import this here.
+ from django.db.models.base import ObjectDoesNotExist
+ try:
+ if self.field:
+ params = self.field.get_db_prep_lookup(lookup_type, value,
+ connection=connection, prepared=True)
+ db_type = self.field.db_type(connection=connection)
+ else:
+ # This branch is used at times when we add a comparison to NULL
+ # (we don't really want to waste time looking up the associated
+ # field object at the calling location).
+ params = Field().get_db_prep_lookup(lookup_type, value,
+ connection=connection, prepared=True)
+ db_type = None
+ except ObjectDoesNotExist:
+ raise EmptyShortCircuit
+
+ return (self.alias, self.col, db_type), params
+
+ def relabeled_clone(self, change_map):
+ if self.alias not in change_map:
+ return self
+ else:
+ new = Empty()
+ new.__class__ = self.__class__
+ new.alias, new.col, new.field = change_map[self.alias], self.col, self.field
+ return new
+
+class SubqueryConstraint(object):
+ def __init__(self, alias, columns, targets, query_object):
+ self.alias = alias
+ self.columns = columns
+ self.targets = targets
+ self.query_object = query_object
+
+ def as_sql(self, qn, connection):
+ query = self.query_object
+
+ # QuerySet was sent
+ if hasattr(query, 'values'):
+ if query._db and connection.alias != query._db:
+ raise ValueError("Can't do subqueries with queries on different DBs.")
+ # Do not override already existing values.
+ if not hasattr(query, 'field_names'):
+ query = query.values(*self.targets)
+ else:
+ query = query._clone()
+ query = query.query
+ query.clear_ordering(True)
+
+ query_compiler = query.get_compiler(connection=connection)
+ return query_compiler.as_subquery_condition(self.alias, self.columns, qn)
+
+ def relabel_aliases(self, change_map):
+ self.alias = change_map.get(self.alias, self.alias)
+
+ def clone(self):
+ return self.__class__(
+ self.alias, self.columns, self.targets,
+ self.query_object)