diff options
Diffstat (limited to 'lib/python2.7/site-packages/django/db/models')
27 files changed, 12429 insertions, 0 deletions
diff --git a/lib/python2.7/site-packages/django/db/models/__init__.py b/lib/python2.7/site-packages/django/db/models/__init__.py new file mode 100644 index 0000000..b5dd1a5 --- /dev/null +++ b/lib/python2.7/site-packages/django/db/models/__init__.py @@ -0,0 +1,33 @@ +from functools import wraps + +from django.core.exceptions import ObjectDoesNotExist, ImproperlyConfigured +from django.db.models.loading import get_apps, get_app_paths, get_app, get_models, get_model, register_models, UnavailableApp +from django.db.models.query import Q +from django.db.models.expressions import F +from django.db.models.manager import Manager +from django.db.models.base import Model +from django.db.models.aggregates import * +from django.db.models.fields import * +from django.db.models.fields.subclassing import SubfieldBase +from django.db.models.fields.files import FileField, ImageField +from django.db.models.fields.related import ForeignKey, ForeignObject, OneToOneField, ManyToManyField, ManyToOneRel, ManyToManyRel, OneToOneRel +from django.db.models.deletion import CASCADE, PROTECT, SET, SET_NULL, SET_DEFAULT, DO_NOTHING, ProtectedError +from django.db.models import signals + + +def permalink(func): + """ + Decorator that calls urlresolvers.reverse() to return a URL using + parameters returned by the decorated function "func". + + "func" should be a function that returns a tuple in one of the + following formats: + (viewname, viewargs) + (viewname, viewargs, viewkwargs) + """ + from django.core.urlresolvers import reverse + @wraps(func) + def inner(*args, **kwargs): + bits = func(*args, **kwargs) + return reverse(bits[0], None, *bits[1:3]) + return inner diff --git a/lib/python2.7/site-packages/django/db/models/aggregates.py b/lib/python2.7/site-packages/django/db/models/aggregates.py new file mode 100644 index 0000000..b89db1c --- /dev/null +++ b/lib/python2.7/site-packages/django/db/models/aggregates.py @@ -0,0 +1,80 @@ +""" +Classes to represent the definitions of aggregate functions. +""" +from django.db.models.constants import LOOKUP_SEP + +def refs_aggregate(lookup_parts, aggregates): + """ + A little helper method to check if the lookup_parts contains references + to the given aggregates set. Because the LOOKUP_SEP is contained in the + default annotation names we must check each prefix of the lookup_parts + for match. + """ + for i in range(len(lookup_parts) + 1): + if LOOKUP_SEP.join(lookup_parts[0:i]) in aggregates: + return True + return False + +class Aggregate(object): + """ + Default Aggregate definition. + """ + def __init__(self, lookup, **extra): + """Instantiate a new aggregate. + + * lookup is the field on which the aggregate operates. + * extra is a dictionary of additional data to provide for the + aggregate definition + + Also utilizes the class variables: + * name, the identifier for this aggregate function. + """ + self.lookup = lookup + self.extra = extra + + def _default_alias(self): + return '%s__%s' % (self.lookup, self.name.lower()) + default_alias = property(_default_alias) + + def add_to_query(self, query, alias, col, source, is_summary): + """Add the aggregate to the nominated query. + + This method is used to convert the generic Aggregate definition into a + backend-specific definition. + + * query is the backend-specific query instance to which the aggregate + is to be added. + * col is a column reference describing the subject field + of the aggregate. It can be an alias, or a tuple describing + a table and column name. + * source is the underlying field or aggregate definition for + the column reference. If the aggregate is not an ordinal or + computed type, this reference is used to determine the coerced + output type of the aggregate. + * is_summary is a boolean that is set True if the aggregate is a + summary value rather than an annotation. + """ + klass = getattr(query.aggregates_module, self.name) + aggregate = klass(col, source=source, is_summary=is_summary, **self.extra) + query.aggregates[alias] = aggregate + +class Avg(Aggregate): + name = 'Avg' + +class Count(Aggregate): + name = 'Count' + +class Max(Aggregate): + name = 'Max' + +class Min(Aggregate): + name = 'Min' + +class StdDev(Aggregate): + name = 'StdDev' + +class Sum(Aggregate): + name = 'Sum' + +class Variance(Aggregate): + name = 'Variance' diff --git a/lib/python2.7/site-packages/django/db/models/base.py b/lib/python2.7/site-packages/django/db/models/base.py new file mode 100644 index 0000000..f6001b4 --- /dev/null +++ b/lib/python2.7/site-packages/django/db/models/base.py @@ -0,0 +1,1042 @@ +from __future__ import unicode_literals + +import copy +import sys +from functools import update_wrapper +from django.utils.six.moves import zip + +import django.db.models.manager # Imported to register signal handler. +from django.conf import settings +from django.core.exceptions import (ObjectDoesNotExist, + MultipleObjectsReturned, FieldError, ValidationError, NON_FIELD_ERRORS) +from django.db.models.fields import AutoField, FieldDoesNotExist +from django.db.models.fields.related import (ForeignObjectRel, ManyToOneRel, + OneToOneField, add_lazy_relation) +from django.db import (router, transaction, DatabaseError, + DEFAULT_DB_ALIAS) +from django.db.models.query import Q +from django.db.models.query_utils import DeferredAttribute, deferred_class_factory +from django.db.models.deletion import Collector +from django.db.models.options import Options +from django.db.models import signals +from django.db.models.loading import register_models, get_model +from django.utils.translation import ugettext_lazy as _ +from django.utils.functional import curry +from django.utils.encoding import force_str, force_text +from django.utils import six +from django.utils.text import get_text_list, capfirst + + +def subclass_exception(name, parents, module, attached_to=None): + """ + Create exception subclass. Used by ModelBase below. + + If 'attached_to' is supplied, the exception will be created in a way that + allows it to be pickled, assuming the returned exception class will be added + as an attribute to the 'attached_to' class. + """ + class_dict = {'__module__': module} + if attached_to is not None: + def __reduce__(self): + # Exceptions are special - they've got state that isn't + # in self.__dict__. We assume it is all in self.args. + return (unpickle_inner_exception, (attached_to, name), self.args) + + def __setstate__(self, args): + self.args = args + + class_dict['__reduce__'] = __reduce__ + class_dict['__setstate__'] = __setstate__ + + return type(name, parents, class_dict) + + +class ModelBase(type): + """ + Metaclass for all models. + """ + def __new__(cls, name, bases, attrs): + super_new = super(ModelBase, cls).__new__ + + # six.with_metaclass() inserts an extra class called 'NewBase' in the + # inheritance tree: Model -> NewBase -> object. But the initialization + # should be executed only once for a given model class. + + # attrs will never be empty for classes declared in the standard way + # (ie. with the `class` keyword). This is quite robust. + if name == 'NewBase' and attrs == {}: + return super_new(cls, name, bases, attrs) + + # Also ensure initialization is only performed for subclasses of Model + # (excluding Model class itself). + parents = [b for b in bases if isinstance(b, ModelBase) and + not (b.__name__ == 'NewBase' and b.__mro__ == (b, object))] + if not parents: + return super_new(cls, name, bases, attrs) + + # Create the class. + module = attrs.pop('__module__') + new_class = super_new(cls, name, bases, {'__module__': module}) + attr_meta = attrs.pop('Meta', None) + abstract = getattr(attr_meta, 'abstract', False) + if not attr_meta: + meta = getattr(new_class, 'Meta', None) + else: + meta = attr_meta + base_meta = getattr(new_class, '_meta', None) + + if getattr(meta, 'app_label', None) is None: + # Figure out the app_label by looking one level up. + # For 'django.contrib.sites.models', this would be 'sites'. + model_module = sys.modules[new_class.__module__] + kwargs = {"app_label": model_module.__name__.split('.')[-2]} + else: + kwargs = {} + + new_class.add_to_class('_meta', Options(meta, **kwargs)) + if not abstract: + new_class.add_to_class('DoesNotExist', subclass_exception(str('DoesNotExist'), + tuple(x.DoesNotExist + for x in parents if hasattr(x, '_meta') and not x._meta.abstract) + or (ObjectDoesNotExist,), + module, attached_to=new_class)) + new_class.add_to_class('MultipleObjectsReturned', subclass_exception(str('MultipleObjectsReturned'), + tuple(x.MultipleObjectsReturned + for x in parents if hasattr(x, '_meta') and not x._meta.abstract) + or (MultipleObjectsReturned,), + module, attached_to=new_class)) + if base_meta and not base_meta.abstract: + # Non-abstract child classes inherit some attributes from their + # non-abstract parent (unless an ABC comes before it in the + # method resolution order). + if not hasattr(meta, 'ordering'): + new_class._meta.ordering = base_meta.ordering + if not hasattr(meta, 'get_latest_by'): + new_class._meta.get_latest_by = base_meta.get_latest_by + + is_proxy = new_class._meta.proxy + + # If the model is a proxy, ensure that the base class + # hasn't been swapped out. + if is_proxy and base_meta and base_meta.swapped: + raise TypeError("%s cannot proxy the swapped model '%s'." % (name, base_meta.swapped)) + + if getattr(new_class, '_default_manager', None): + if not is_proxy: + # Multi-table inheritance doesn't inherit default manager from + # parents. + new_class._default_manager = None + new_class._base_manager = None + else: + # Proxy classes do inherit parent's default manager, if none is + # set explicitly. + new_class._default_manager = new_class._default_manager._copy_to_model(new_class) + new_class._base_manager = new_class._base_manager._copy_to_model(new_class) + + # Bail out early if we have already created this class. + m = get_model(new_class._meta.app_label, name, + seed_cache=False, only_installed=False) + if m is not None: + return m + + # Add all attributes to the class. + for obj_name, obj in attrs.items(): + new_class.add_to_class(obj_name, obj) + + # All the fields of any type declared on this model + new_fields = new_class._meta.local_fields + \ + new_class._meta.local_many_to_many + \ + new_class._meta.virtual_fields + field_names = set([f.name for f in new_fields]) + + # Basic setup for proxy models. + if is_proxy: + base = None + for parent in [cls for cls in parents if hasattr(cls, '_meta')]: + if parent._meta.abstract: + if parent._meta.fields: + raise TypeError("Abstract base class containing model fields not permitted for proxy model '%s'." % name) + else: + continue + if base is not None: + raise TypeError("Proxy model '%s' has more than one non-abstract model base class." % name) + else: + base = parent + if base is None: + raise TypeError("Proxy model '%s' has no non-abstract model base class." % name) + if (new_class._meta.local_fields or + new_class._meta.local_many_to_many): + raise FieldError("Proxy model '%s' contains model fields." % name) + new_class._meta.setup_proxy(base) + new_class._meta.concrete_model = base._meta.concrete_model + else: + new_class._meta.concrete_model = new_class + + # Do the appropriate setup for any model parents. + o2o_map = dict([(f.rel.to, f) for f in new_class._meta.local_fields + if isinstance(f, OneToOneField)]) + + for base in parents: + original_base = base + if not hasattr(base, '_meta'): + # Things without _meta aren't functional models, so they're + # uninteresting parents. + continue + + parent_fields = base._meta.local_fields + base._meta.local_many_to_many + # Check for clashes between locally declared fields and those + # on the base classes (we cannot handle shadowed fields at the + # moment). + for field in parent_fields: + if field.name in field_names: + raise FieldError('Local field %r in class %r clashes ' + 'with field of similar name from ' + 'base class %r' % + (field.name, name, base.__name__)) + if not base._meta.abstract: + # Concrete classes... + base = base._meta.concrete_model + if base in o2o_map: + field = o2o_map[base] + elif not is_proxy: + attr_name = '%s_ptr' % base._meta.model_name + field = OneToOneField(base, name=attr_name, + auto_created=True, parent_link=True) + new_class.add_to_class(attr_name, field) + else: + field = None + new_class._meta.parents[base] = field + else: + # .. and abstract ones. + for field in parent_fields: + new_class.add_to_class(field.name, copy.deepcopy(field)) + + # Pass any non-abstract parent classes onto child. + new_class._meta.parents.update(base._meta.parents) + + # Inherit managers from the abstract base classes. + new_class.copy_managers(base._meta.abstract_managers) + + # Proxy models inherit the non-abstract managers from their base, + # unless they have redefined any of them. + if is_proxy: + new_class.copy_managers(original_base._meta.concrete_managers) + + # Inherit virtual fields (like GenericForeignKey) from the parent + # class + for field in base._meta.virtual_fields: + if base._meta.abstract and field.name in field_names: + raise FieldError('Local field %r in class %r clashes '\ + 'with field of similar name from '\ + 'abstract base class %r' % \ + (field.name, name, base.__name__)) + new_class.add_to_class(field.name, copy.deepcopy(field)) + + if abstract: + # Abstract base models can't be instantiated and don't appear in + # the list of models for an app. We do the final setup for them a + # little differently from normal models. + attr_meta.abstract = False + new_class.Meta = attr_meta + return new_class + + new_class._prepare() + register_models(new_class._meta.app_label, new_class) + + # Because of the way imports happen (recursively), we may or may not be + # the first time this model tries to register with the framework. There + # should only be one class for each model, so we always return the + # registered version. + return get_model(new_class._meta.app_label, name, + seed_cache=False, only_installed=False) + + def copy_managers(cls, base_managers): + # This is in-place sorting of an Options attribute, but that's fine. + base_managers.sort() + for _, mgr_name, manager in base_managers: + val = getattr(cls, mgr_name, None) + if not val or val is manager: + new_manager = manager._copy_to_model(cls) + cls.add_to_class(mgr_name, new_manager) + + def add_to_class(cls, name, value): + if hasattr(value, 'contribute_to_class'): + value.contribute_to_class(cls, name) + else: + setattr(cls, name, value) + + def _prepare(cls): + """ + Creates some methods once self._meta has been populated. + """ + opts = cls._meta + opts._prepare(cls) + + if opts.order_with_respect_to: + cls.get_next_in_order = curry(cls._get_next_or_previous_in_order, is_next=True) + cls.get_previous_in_order = curry(cls._get_next_or_previous_in_order, is_next=False) + + # defer creating accessors on the foreign class until we are + # certain it has been created + def make_foreign_order_accessors(field, model, cls): + setattr( + field.rel.to, + 'get_%s_order' % cls.__name__.lower(), + curry(method_get_order, cls) + ) + setattr( + field.rel.to, + 'set_%s_order' % cls.__name__.lower(), + curry(method_set_order, cls) + ) + add_lazy_relation( + cls, + opts.order_with_respect_to, + opts.order_with_respect_to.rel.to, + make_foreign_order_accessors + ) + + # Give the class a docstring -- its definition. + if cls.__doc__ is None: + cls.__doc__ = "%s(%s)" % (cls.__name__, ", ".join([f.attname for f in opts.fields])) + + if hasattr(cls, 'get_absolute_url'): + cls.get_absolute_url = update_wrapper(curry(get_absolute_url, opts, cls.get_absolute_url), + cls.get_absolute_url) + + signals.class_prepared.send(sender=cls) + + +class ModelState(object): + """ + A class for storing instance state + """ + def __init__(self, db=None): + self.db = db + # If true, uniqueness validation checks will consider this a new, as-yet-unsaved object. + # Necessary for correct validation of new instances of objects with explicit (non-auto) PKs. + # This impacts validation only; it has no effect on the actual save. + self.adding = True + + +class Model(six.with_metaclass(ModelBase)): + _deferred = False + + def __init__(self, *args, **kwargs): + signals.pre_init.send(sender=self.__class__, args=args, kwargs=kwargs) + + # Set up the storage for instance state + self._state = ModelState() + + # There is a rather weird disparity here; if kwargs, it's set, then args + # overrides it. It should be one or the other; don't duplicate the work + # The reason for the kwargs check is that standard iterator passes in by + # args, and instantiation for iteration is 33% faster. + args_len = len(args) + if args_len > len(self._meta.concrete_fields): + # Daft, but matches old exception sans the err msg. + raise IndexError("Number of args exceeds number of fields") + + if not kwargs: + fields_iter = iter(self._meta.concrete_fields) + # The ordering of the zip calls matter - zip throws StopIteration + # when an iter throws it. So if the first iter throws it, the second + # is *not* consumed. We rely on this, so don't change the order + # without changing the logic. + for val, field in zip(args, fields_iter): + setattr(self, field.attname, val) + else: + # Slower, kwargs-ready version. + fields_iter = iter(self._meta.fields) + for val, field in zip(args, fields_iter): + setattr(self, field.attname, val) + kwargs.pop(field.name, None) + # Maintain compatibility with existing calls. + if isinstance(field.rel, ManyToOneRel): + kwargs.pop(field.attname, None) + + # Now we're left with the unprocessed fields that *must* come from + # keywords, or default. + + for field in fields_iter: + is_related_object = False + # This slightly odd construct is so that we can access any + # data-descriptor object (DeferredAttribute) without triggering its + # __get__ method. + if (field.attname not in kwargs and + (isinstance(self.__class__.__dict__.get(field.attname), DeferredAttribute) + or field.column is None)): + # This field will be populated on request. + continue + if kwargs: + if isinstance(field.rel, ForeignObjectRel): + try: + # Assume object instance was passed in. + rel_obj = kwargs.pop(field.name) + is_related_object = True + except KeyError: + try: + # Object instance wasn't passed in -- must be an ID. + val = kwargs.pop(field.attname) + except KeyError: + val = field.get_default() + else: + # Object instance was passed in. Special case: You can + # pass in "None" for related objects if it's allowed. + if rel_obj is None and field.null: + val = None + else: + try: + val = kwargs.pop(field.attname) + except KeyError: + # This is done with an exception rather than the + # default argument on pop because we don't want + # get_default() to be evaluated, and then not used. + # Refs #12057. + val = field.get_default() + else: + val = field.get_default() + + if is_related_object: + # If we are passed a related instance, set it using the + # field.name instead of field.attname (e.g. "user" instead of + # "user_id") so that the object gets properly cached (and type + # checked) by the RelatedObjectDescriptor. + setattr(self, field.name, rel_obj) + else: + setattr(self, field.attname, val) + + if kwargs: + for prop in list(kwargs): + try: + if isinstance(getattr(self.__class__, prop), property): + setattr(self, prop, kwargs.pop(prop)) + except AttributeError: + pass + if kwargs: + raise TypeError("'%s' is an invalid keyword argument for this function" % list(kwargs)[0]) + super(Model, self).__init__() + signals.post_init.send(sender=self.__class__, instance=self) + + def __repr__(self): + try: + u = six.text_type(self) + except (UnicodeEncodeError, UnicodeDecodeError): + u = '[Bad Unicode data]' + return force_str('<%s: %s>' % (self.__class__.__name__, u)) + + def __str__(self): + if six.PY2 and hasattr(self, '__unicode__'): + return force_text(self).encode('utf-8') + return '%s object' % self.__class__.__name__ + + def __eq__(self, other): + return isinstance(other, self.__class__) and self._get_pk_val() == other._get_pk_val() + + def __ne__(self, other): + return not self.__eq__(other) + + def __hash__(self): + return hash(self._get_pk_val()) + + def __reduce__(self): + """ + Provides pickling support. Normally, this just dispatches to Python's + standard handling. However, for models with deferred field loading, we + need to do things manually, as they're dynamically created classes and + only module-level classes can be pickled by the default path. + """ + data = self.__dict__ + if not self._deferred: + class_id = self._meta.app_label, self._meta.object_name + return model_unpickle, (class_id, [], simple_class_factory), data + defers = [] + for field in self._meta.fields: + if isinstance(self.__class__.__dict__.get(field.attname), + DeferredAttribute): + defers.append(field.attname) + model = self._meta.proxy_for_model + class_id = model._meta.app_label, model._meta.object_name + return (model_unpickle, (class_id, defers, deferred_class_factory), data) + + def _get_pk_val(self, meta=None): + if not meta: + meta = self._meta + return getattr(self, meta.pk.attname) + + def _set_pk_val(self, value): + return setattr(self, self._meta.pk.attname, value) + + pk = property(_get_pk_val, _set_pk_val) + + def serializable_value(self, field_name): + """ + Returns the value of the field name for this instance. If the field is + a foreign key, returns the id value, instead of the object. If there's + no Field object with this name on the model, the model attribute's + value is returned directly. + + Used to serialize a field's value (in the serializer, or form output, + for example). Normally, you would just access the attribute directly + and not use this method. + """ + try: + field = self._meta.get_field_by_name(field_name)[0] + except FieldDoesNotExist: + return getattr(self, field_name) + return getattr(self, field.attname) + + def save(self, force_insert=False, force_update=False, using=None, + update_fields=None): + """ + Saves the current instance. Override this in a subclass if you want to + control the saving process. + + The 'force_insert' and 'force_update' parameters can be used to insist + that the "save" must be an SQL insert or update (or equivalent for + non-SQL backends), respectively. Normally, they should not be set. + """ + using = using or router.db_for_write(self.__class__, instance=self) + if force_insert and (force_update or update_fields): + raise ValueError("Cannot force both insert and updating in model saving.") + + if update_fields is not None: + # If update_fields is empty, skip the save. We do also check for + # no-op saves later on for inheritance cases. This bailout is + # still needed for skipping signal sending. + if len(update_fields) == 0: + return + + update_fields = frozenset(update_fields) + field_names = set() + + for field in self._meta.fields: + if not field.primary_key: + field_names.add(field.name) + + if field.name != field.attname: + field_names.add(field.attname) + + non_model_fields = update_fields.difference(field_names) + + if non_model_fields: + raise ValueError("The following fields do not exist in this " + "model or are m2m fields: %s" + % ', '.join(non_model_fields)) + + # If saving to the same database, and this model is deferred, then + # automatically do a "update_fields" save on the loaded fields. + elif not force_insert and self._deferred and using == self._state.db: + field_names = set() + for field in self._meta.concrete_fields: + if not field.primary_key and not hasattr(field, 'through'): + field_names.add(field.attname) + deferred_fields = [ + f.attname for f in self._meta.fields + if f.attname not in self.__dict__ + and isinstance(self.__class__.__dict__[f.attname], + DeferredAttribute)] + + loaded_fields = field_names.difference(deferred_fields) + if loaded_fields: + update_fields = frozenset(loaded_fields) + + self.save_base(using=using, force_insert=force_insert, + force_update=force_update, update_fields=update_fields) + save.alters_data = True + + def save_base(self, raw=False, force_insert=False, + force_update=False, using=None, update_fields=None): + """ + Handles the parts of saving which should be done only once per save, + yet need to be done in raw saves, too. This includes some sanity + checks and signal sending. + + The 'raw' argument is telling save_base not to save any parent + models and not to do any changes to the values before save. This + is used by fixture loading. + """ + using = using or router.db_for_write(self.__class__, instance=self) + assert not (force_insert and (force_update or update_fields)) + assert update_fields is None or len(update_fields) > 0 + cls = origin = self.__class__ + # Skip proxies, but keep the origin as the proxy model. + if cls._meta.proxy: + cls = cls._meta.concrete_model + meta = cls._meta + if not meta.auto_created: + signals.pre_save.send(sender=origin, instance=self, raw=raw, using=using, + update_fields=update_fields) + with transaction.commit_on_success_unless_managed(using=using, savepoint=False): + if not raw: + self._save_parents(cls, using, update_fields) + updated = self._save_table(raw, cls, force_insert, force_update, using, update_fields) + # Store the database on which the object was saved + self._state.db = using + # Once saved, this is no longer a to-be-added instance. + self._state.adding = False + + # Signal that the save is complete + if not meta.auto_created: + signals.post_save.send(sender=origin, instance=self, created=(not updated), + update_fields=update_fields, raw=raw, using=using) + + save_base.alters_data = True + + def _save_parents(self, cls, using, update_fields): + """ + Saves all the parents of cls using values from self. + """ + meta = cls._meta + for parent, field in meta.parents.items(): + # Make sure the link fields are synced between parent and self. + if (field and getattr(self, parent._meta.pk.attname) is None + and getattr(self, field.attname) is not None): + setattr(self, parent._meta.pk.attname, getattr(self, field.attname)) + self._save_parents(cls=parent, using=using, update_fields=update_fields) + self._save_table(cls=parent, using=using, update_fields=update_fields) + # Set the parent's PK value to self. + if field: + setattr(self, field.attname, self._get_pk_val(parent._meta)) + # Since we didn't have an instance of the parent handy set + # attname directly, bypassing the descriptor. Invalidate + # the related object cache, in case it's been accidentally + # populated. A fresh instance will be re-built from the + # database if necessary. + cache_name = field.get_cache_name() + if hasattr(self, cache_name): + delattr(self, cache_name) + + def _save_table(self, raw=False, cls=None, force_insert=False, + force_update=False, using=None, update_fields=None): + """ + Does the heavy-lifting involved in saving. Updates or inserts the data + for a single table. + """ + meta = cls._meta + non_pks = [f for f in meta.local_concrete_fields if not f.primary_key] + + if update_fields: + non_pks = [f for f in non_pks + if f.name in update_fields or f.attname in update_fields] + + pk_val = self._get_pk_val(meta) + pk_set = pk_val is not None + if not pk_set and (force_update or update_fields): + raise ValueError("Cannot force an update in save() with no primary key.") + updated = False + # If possible, try an UPDATE. If that doesn't update anything, do an INSERT. + if pk_set and not force_insert: + base_qs = cls._base_manager.using(using) + values = [(f, None, (getattr(self, f.attname) if raw else f.pre_save(self, False))) + for f in non_pks] + forced_update = update_fields or force_update + updated = self._do_update(base_qs, using, pk_val, values, update_fields, + forced_update) + if force_update and not updated: + raise DatabaseError("Forced update did not affect any rows.") + if update_fields and not updated: + raise DatabaseError("Save with update_fields did not affect any rows.") + if not updated: + if meta.order_with_respect_to: + # If this is a model with an order_with_respect_to + # autopopulate the _order field + field = meta.order_with_respect_to + order_value = cls._base_manager.using(using).filter( + **{field.name: getattr(self, field.attname)}).count() + self._order = order_value + + fields = meta.local_concrete_fields + if not pk_set: + fields = [f for f in fields if not isinstance(f, AutoField)] + + update_pk = bool(meta.has_auto_field and not pk_set) + result = self._do_insert(cls._base_manager, using, fields, update_pk, raw) + if update_pk: + setattr(self, meta.pk.attname, result) + return updated + + def _do_update(self, base_qs, using, pk_val, values, update_fields, forced_update): + """ + This method will try to update the model. If the model was updated (in + the sense that an update query was done and a matching row was found + from the DB) the method will return True. + """ + filtered = base_qs.filter(pk=pk_val) + if not values: + # We can end up here when saving a model in inheritance chain where + # update_fields doesn't target any field in current model. In that + # case we just say the update succeeded. Another case ending up here + # is a model with just PK - in that case check that the PK still + # exists. + return update_fields is not None or filtered.exists() + if self._meta.select_on_save and not forced_update: + if filtered.exists(): + filtered._update(values) + return True + else: + return False + return filtered._update(values) > 0 + + def _do_insert(self, manager, using, fields, update_pk, raw): + """ + Do an INSERT. If update_pk is defined then this method should return + the new pk for the model. + """ + return manager._insert([self], fields=fields, return_id=update_pk, + using=using, raw=raw) + + def delete(self, using=None): + using = using or router.db_for_write(self.__class__, instance=self) + assert self._get_pk_val() is not None, "%s object can't be deleted because its %s attribute is set to None." % (self._meta.object_name, self._meta.pk.attname) + + collector = Collector(using=using) + collector.collect([self]) + collector.delete() + + delete.alters_data = True + + def _get_FIELD_display(self, field): + value = getattr(self, field.attname) + return force_text(dict(field.flatchoices).get(value, value), strings_only=True) + + def _get_next_or_previous_by_FIELD(self, field, is_next, **kwargs): + if not self.pk: + raise ValueError("get_next/get_previous cannot be used on unsaved objects.") + op = 'gt' if is_next else 'lt' + order = '' if is_next else '-' + param = force_text(getattr(self, field.attname)) + q = Q(**{'%s__%s' % (field.name, op): param}) + q = q | Q(**{field.name: param, 'pk__%s' % op: self.pk}) + qs = self.__class__._default_manager.using(self._state.db).filter(**kwargs).filter(q).order_by('%s%s' % (order, field.name), '%spk' % order) + try: + return qs[0] + except IndexError: + raise self.DoesNotExist("%s matching query does not exist." % self.__class__._meta.object_name) + + def _get_next_or_previous_in_order(self, is_next): + cachename = "__%s_order_cache" % is_next + if not hasattr(self, cachename): + op = 'gt' if is_next else 'lt' + order = '_order' if is_next else '-_order' + order_field = self._meta.order_with_respect_to + obj = self._default_manager.filter(**{ + order_field.name: getattr(self, order_field.attname) + }).filter(**{ + '_order__%s' % op: self._default_manager.values('_order').filter(**{ + self._meta.pk.name: self.pk + }) + }).order_by(order)[:1].get() + setattr(self, cachename, obj) + return getattr(self, cachename) + + def prepare_database_save(self, unused): + if self.pk is None: + raise ValueError("Unsaved model instance %r cannot be used in an ORM query." % self) + return self.pk + + def clean(self): + """ + Hook for doing any extra model-wide validation after clean() has been + called on every field by self.clean_fields. Any ValidationError raised + by this method will not be associated with a particular field; it will + have a special-case association with the field defined by NON_FIELD_ERRORS. + """ + pass + + def validate_unique(self, exclude=None): + """ + Checks unique constraints on the model and raises ``ValidationError`` + if any failed. + """ + unique_checks, date_checks = self._get_unique_checks(exclude=exclude) + + errors = self._perform_unique_checks(unique_checks) + date_errors = self._perform_date_checks(date_checks) + + for k, v in date_errors.items(): + errors.setdefault(k, []).extend(v) + + if errors: + raise ValidationError(errors) + + def _get_unique_checks(self, exclude=None): + """ + Gather a list of checks to perform. Since validate_unique could be + called from a ModelForm, some fields may have been excluded; we can't + perform a unique check on a model that is missing fields involved + in that check. + Fields that did not validate should also be excluded, but they need + to be passed in via the exclude argument. + """ + if exclude is None: + exclude = [] + unique_checks = [] + + unique_togethers = [(self.__class__, self._meta.unique_together)] + for parent_class in self._meta.parents.keys(): + if parent_class._meta.unique_together: + unique_togethers.append((parent_class, parent_class._meta.unique_together)) + + for model_class, unique_together in unique_togethers: + for check in unique_together: + for name in check: + # If this is an excluded field, don't add this check. + if name in exclude: + break + else: + unique_checks.append((model_class, tuple(check))) + + # These are checks for the unique_for_<date/year/month>. + date_checks = [] + + # Gather a list of checks for fields declared as unique and add them to + # the list of checks. + + fields_with_class = [(self.__class__, self._meta.local_fields)] + for parent_class in self._meta.parents.keys(): + fields_with_class.append((parent_class, parent_class._meta.local_fields)) + + for model_class, fields in fields_with_class: + for f in fields: + name = f.name + if name in exclude: + continue + if f.unique: + unique_checks.append((model_class, (name,))) + if f.unique_for_date and f.unique_for_date not in exclude: + date_checks.append((model_class, 'date', name, f.unique_for_date)) + if f.unique_for_year and f.unique_for_year not in exclude: + date_checks.append((model_class, 'year', name, f.unique_for_year)) + if f.unique_for_month and f.unique_for_month not in exclude: + date_checks.append((model_class, 'month', name, f.unique_for_month)) + return unique_checks, date_checks + + def _perform_unique_checks(self, unique_checks): + errors = {} + + for model_class, unique_check in unique_checks: + # Try to look up an existing object with the same values as this + # object's values for all the unique field. + + lookup_kwargs = {} + for field_name in unique_check: + f = self._meta.get_field(field_name) + lookup_value = getattr(self, f.attname) + if lookup_value is None: + # no value, skip the lookup + continue + if f.primary_key and not self._state.adding: + # no need to check for unique primary key when editing + continue + lookup_kwargs[str(field_name)] = lookup_value + + # some fields were skipped, no reason to do the check + if len(unique_check) != len(lookup_kwargs): + continue + + qs = model_class._default_manager.filter(**lookup_kwargs) + + # Exclude the current object from the query if we are editing an + # instance (as opposed to creating a new one) + # Note that we need to use the pk as defined by model_class, not + # self.pk. These can be different fields because model inheritance + # allows single model to have effectively multiple primary keys. + # Refs #17615. + model_class_pk = self._get_pk_val(model_class._meta) + if not self._state.adding and model_class_pk is not None: + qs = qs.exclude(pk=model_class_pk) + if qs.exists(): + if len(unique_check) == 1: + key = unique_check[0] + else: + key = NON_FIELD_ERRORS + errors.setdefault(key, []).append(self.unique_error_message(model_class, unique_check)) + + return errors + + def _perform_date_checks(self, date_checks): + errors = {} + for model_class, lookup_type, field, unique_for in date_checks: + lookup_kwargs = {} + # there's a ticket to add a date lookup, we can remove this special + # case if that makes it's way in + date = getattr(self, unique_for) + if date is None: + continue + if lookup_type == 'date': + lookup_kwargs['%s__day' % unique_for] = date.day + lookup_kwargs['%s__month' % unique_for] = date.month + lookup_kwargs['%s__year' % unique_for] = date.year + else: + lookup_kwargs['%s__%s' % (unique_for, lookup_type)] = getattr(date, lookup_type) + lookup_kwargs[field] = getattr(self, field) + + qs = model_class._default_manager.filter(**lookup_kwargs) + # Exclude the current object from the query if we are editing an + # instance (as opposed to creating a new one) + if not self._state.adding and self.pk is not None: + qs = qs.exclude(pk=self.pk) + + if qs.exists(): + errors.setdefault(field, []).append( + self.date_error_message(lookup_type, field, unique_for) + ) + return errors + + def date_error_message(self, lookup_type, field, unique_for): + opts = self._meta + return _("%(field_name)s must be unique for %(date_field)s %(lookup)s.") % { + 'field_name': six.text_type(capfirst(opts.get_field(field).verbose_name)), + 'date_field': six.text_type(capfirst(opts.get_field(unique_for).verbose_name)), + 'lookup': lookup_type, + } + + def unique_error_message(self, model_class, unique_check): + opts = model_class._meta + model_name = capfirst(opts.verbose_name) + + # A unique field + if len(unique_check) == 1: + field_name = unique_check[0] + field = opts.get_field(field_name) + field_label = capfirst(field.verbose_name) + # Insert the error into the error dict, very sneaky + return field.error_messages['unique'] % { + 'model_name': six.text_type(model_name), + 'field_label': six.text_type(field_label) + } + # unique_together + else: + field_labels = [capfirst(opts.get_field(f).verbose_name) for f in unique_check] + field_labels = get_text_list(field_labels, _('and')) + return _("%(model_name)s with this %(field_label)s already exists.") % { + 'model_name': six.text_type(model_name), + 'field_label': six.text_type(field_labels) + } + + def full_clean(self, exclude=None, validate_unique=True): + """ + Calls clean_fields, clean, and validate_unique, on the model, + and raises a ``ValidationError`` for any errors that occurred. + """ + errors = {} + if exclude is None: + exclude = [] + + try: + self.clean_fields(exclude=exclude) + except ValidationError as e: + errors = e.update_error_dict(errors) + + # Form.clean() is run even if other validation fails, so do the + # same with Model.clean() for consistency. + try: + self.clean() + except ValidationError as e: + errors = e.update_error_dict(errors) + + # Run unique checks, but only for fields that passed validation. + if validate_unique: + for name in errors.keys(): + if name != NON_FIELD_ERRORS and name not in exclude: + exclude.append(name) + try: + self.validate_unique(exclude=exclude) + except ValidationError as e: + errors = e.update_error_dict(errors) + + if errors: + raise ValidationError(errors) + + def clean_fields(self, exclude=None): + """ + Cleans all fields and raises a ValidationError containing message_dict + of all validation errors if any occur. + """ + if exclude is None: + exclude = [] + + errors = {} + for f in self._meta.fields: + if f.name in exclude: + continue + # Skip validation for empty fields with blank=True. The developer + # is responsible for making sure they have a valid value. + raw_value = getattr(self, f.attname) + if f.blank and raw_value in f.empty_values: + continue + try: + setattr(self, f.attname, f.clean(raw_value, self)) + except ValidationError as e: + errors[f.name] = e.error_list + + if errors: + raise ValidationError(errors) + + +############################################ +# HELPER FUNCTIONS (CURRIED MODEL METHODS) # +############################################ + +# ORDERING METHODS ######################### + +def method_set_order(ordered_obj, self, id_list, using=None): + if using is None: + using = DEFAULT_DB_ALIAS + rel_val = getattr(self, ordered_obj._meta.order_with_respect_to.rel.field_name) + order_name = ordered_obj._meta.order_with_respect_to.name + # FIXME: It would be nice if there was an "update many" version of update + # for situations like this. + with transaction.commit_on_success_unless_managed(using=using): + for i, j in enumerate(id_list): + ordered_obj.objects.filter(**{'pk': j, order_name: rel_val}).update(_order=i) + + +def method_get_order(ordered_obj, self): + rel_val = getattr(self, ordered_obj._meta.order_with_respect_to.rel.field_name) + order_name = ordered_obj._meta.order_with_respect_to.name + pk_name = ordered_obj._meta.pk.name + return [r[pk_name] for r in + ordered_obj.objects.filter(**{order_name: rel_val}).values(pk_name)] + + +############################################## +# HELPER FUNCTIONS (CURRIED MODEL FUNCTIONS) # +############################################## + +def get_absolute_url(opts, func, self, *args, **kwargs): + return settings.ABSOLUTE_URL_OVERRIDES.get('%s.%s' % (opts.app_label, opts.model_name), func)(self, *args, **kwargs) + + +######## +# MISC # +######## + +class Empty(object): + pass + +def simple_class_factory(model, attrs): + """ + Needed for dynamic classes. + """ + return model + +def model_unpickle(model_id, attrs, factory): + """ + Used to unpickle Model subclasses with deferred fields. + """ + if isinstance(model_id, tuple): + model = get_model(*model_id) + else: + # Backwards compat - the model was cached directly in earlier versions. + model = model_id + cls = factory(model, attrs) + return cls.__new__(cls) +model_unpickle.__safe_for_unpickle__ = True + + +def unpickle_inner_exception(klass, exception_name): + # Get the exception class from the class it is attached to: + exception = getattr(klass, exception_name) + return exception.__new__(exception) diff --git a/lib/python2.7/site-packages/django/db/models/constants.py b/lib/python2.7/site-packages/django/db/models/constants.py new file mode 100644 index 0000000..a7e6c25 --- /dev/null +++ b/lib/python2.7/site-packages/django/db/models/constants.py @@ -0,0 +1,6 @@ +""" +Constants used across the ORM in general. +""" + +# Separator used to split filter strings apart. +LOOKUP_SEP = '__' diff --git a/lib/python2.7/site-packages/django/db/models/deletion.py b/lib/python2.7/site-packages/django/db/models/deletion.py new file mode 100644 index 0000000..e0bfb9d --- /dev/null +++ b/lib/python2.7/site-packages/django/db/models/deletion.py @@ -0,0 +1,292 @@ +from operator import attrgetter + +from django.db import connections, transaction, IntegrityError +from django.db.models import signals, sql +from django.utils.datastructures import SortedDict +from django.utils import six + + +class ProtectedError(IntegrityError): + def __init__(self, msg, protected_objects): + self.protected_objects = protected_objects + super(ProtectedError, self).__init__(msg, protected_objects) + + +def CASCADE(collector, field, sub_objs, using): + collector.collect(sub_objs, source=field.rel.to, + source_attr=field.name, nullable=field.null) + if field.null and not connections[using].features.can_defer_constraint_checks: + collector.add_field_update(field, None, sub_objs) + + +def PROTECT(collector, field, sub_objs, using): + raise ProtectedError("Cannot delete some instances of model '%s' because " + "they are referenced through a protected foreign key: '%s.%s'" % ( + field.rel.to.__name__, sub_objs[0].__class__.__name__, field.name + ), + sub_objs + ) + + +def SET(value): + if callable(value): + def set_on_delete(collector, field, sub_objs, using): + collector.add_field_update(field, value(), sub_objs) + else: + def set_on_delete(collector, field, sub_objs, using): + collector.add_field_update(field, value, sub_objs) + return set_on_delete + + +SET_NULL = SET(None) + + +def SET_DEFAULT(collector, field, sub_objs, using): + collector.add_field_update(field, field.get_default(), sub_objs) + + +def DO_NOTHING(collector, field, sub_objs, using): + pass + + +class Collector(object): + def __init__(self, using): + self.using = using + # Initially, {model: set([instances])}, later values become lists. + self.data = {} + self.field_updates = {} # {model: {(field, value): set([instances])}} + # fast_deletes is a list of queryset-likes that can be deleted without + # fetching the objects into memory. + self.fast_deletes = [] + + # Tracks deletion-order dependency for databases without transactions + # or ability to defer constraint checks. Only concrete model classes + # should be included, as the dependencies exist only between actual + # database tables; proxy models are represented here by their concrete + # parent. + self.dependencies = {} # {model: set([models])} + + def add(self, objs, source=None, nullable=False, reverse_dependency=False): + """ + Adds 'objs' to the collection of objects to be deleted. If the call is + the result of a cascade, 'source' should be the model that caused it, + and 'nullable' should be set to True if the relation can be null. + + Returns a list of all objects that were not already collected. + """ + if not objs: + return [] + new_objs = [] + model = objs[0].__class__ + instances = self.data.setdefault(model, set()) + for obj in objs: + if obj not in instances: + new_objs.append(obj) + instances.update(new_objs) + # Nullable relationships can be ignored -- they are nulled out before + # deleting, and therefore do not affect the order in which objects have + # to be deleted. + if source is not None and not nullable: + if reverse_dependency: + source, model = model, source + self.dependencies.setdefault( + source._meta.concrete_model, set()).add(model._meta.concrete_model) + return new_objs + + def add_field_update(self, field, value, objs): + """ + Schedules a field update. 'objs' must be a homogenous iterable + collection of model instances (e.g. a QuerySet). + """ + if not objs: + return + model = objs[0].__class__ + self.field_updates.setdefault( + model, {}).setdefault( + (field, value), set()).update(objs) + + def can_fast_delete(self, objs, from_field=None): + """ + Determines if the objects in the given queryset-like can be + fast-deleted. This can be done if there are no cascades, no + parents and no signal listeners for the object class. + + The 'from_field' tells where we are coming from - we need this to + determine if the objects are in fact to be deleted. Allows also + skipping parent -> child -> parent chain preventing fast delete of + the child. + """ + if from_field and from_field.rel.on_delete is not CASCADE: + return False + if not (hasattr(objs, 'model') and hasattr(objs, '_raw_delete')): + return False + model = objs.model + if (signals.pre_delete.has_listeners(model) + or signals.post_delete.has_listeners(model) + or signals.m2m_changed.has_listeners(model)): + return False + # The use of from_field comes from the need to avoid cascade back to + # parent when parent delete is cascading to child. + opts = model._meta + if any(link != from_field for link in opts.concrete_model._meta.parents.values()): + return False + # Foreign keys pointing to this model, both from m2m and other + # models. + for related in opts.get_all_related_objects( + include_hidden=True, include_proxy_eq=True): + if related.field.rel.on_delete is not DO_NOTHING: + return False + # GFK deletes + for relation in opts.many_to_many: + if not relation.rel.through: + return False + return True + + def collect(self, objs, source=None, nullable=False, collect_related=True, + source_attr=None, reverse_dependency=False): + """ + Adds 'objs' to the collection of objects to be deleted as well as all + parent instances. 'objs' must be a homogenous iterable collection of + model instances (e.g. a QuerySet). If 'collect_related' is True, + related objects will be handled by their respective on_delete handler. + + If the call is the result of a cascade, 'source' should be the model + that caused it and 'nullable' should be set to True, if the relation + can be null. + + If 'reverse_dependency' is True, 'source' will be deleted before the + current model, rather than after. (Needed for cascading to parent + models, the one case in which the cascade follows the forwards + direction of an FK rather than the reverse direction.) + """ + if self.can_fast_delete(objs): + self.fast_deletes.append(objs) + return + new_objs = self.add(objs, source, nullable, + reverse_dependency=reverse_dependency) + if not new_objs: + return + + model = new_objs[0].__class__ + + # Recursively collect concrete model's parent models, but not their + # related objects. These will be found by meta.get_all_related_objects() + concrete_model = model._meta.concrete_model + for ptr in six.itervalues(concrete_model._meta.parents): + if ptr: + # FIXME: This seems to be buggy and execute a query for each + # parent object fetch. We have the parent data in the obj, + # but we don't have a nice way to turn that data into parent + # object instance. + parent_objs = [getattr(obj, ptr.name) for obj in new_objs] + self.collect(parent_objs, source=model, + source_attr=ptr.rel.related_name, + collect_related=False, + reverse_dependency=True) + + if collect_related: + for related in model._meta.get_all_related_objects( + include_hidden=True, include_proxy_eq=True): + field = related.field + if field.rel.on_delete == DO_NOTHING: + continue + sub_objs = self.related_objects(related, new_objs) + if self.can_fast_delete(sub_objs, from_field=field): + self.fast_deletes.append(sub_objs) + elif sub_objs: + field.rel.on_delete(self, field, sub_objs, self.using) + for field in model._meta.virtual_fields: + if hasattr(field, 'bulk_related_objects'): + # Its something like generic foreign key. + sub_objs = field.bulk_related_objects(new_objs, self.using) + self.collect(sub_objs, + source=model, + source_attr=field.rel.related_name, + nullable=True) + + def related_objects(self, related, objs): + """ + Gets a QuerySet of objects related to ``objs`` via the relation ``related``. + + """ + return related.model._base_manager.using(self.using).filter( + **{"%s__in" % related.field.name: objs} + ) + + def instances_with_model(self): + for model, instances in six.iteritems(self.data): + for obj in instances: + yield model, obj + + def sort(self): + sorted_models = [] + concrete_models = set() + models = list(self.data) + while len(sorted_models) < len(models): + found = False + for model in models: + if model in sorted_models: + continue + dependencies = self.dependencies.get(model._meta.concrete_model) + if not (dependencies and dependencies.difference(concrete_models)): + sorted_models.append(model) + concrete_models.add(model._meta.concrete_model) + found = True + if not found: + return + self.data = SortedDict([(model, self.data[model]) + for model in sorted_models]) + + def delete(self): + # sort instance collections + for model, instances in self.data.items(): + self.data[model] = sorted(instances, key=attrgetter("pk")) + + # if possible, bring the models in an order suitable for databases that + # don't support transactions or cannot defer constraint checks until the + # end of a transaction. + self.sort() + + with transaction.commit_on_success_unless_managed(using=self.using): + # send pre_delete signals + for model, obj in self.instances_with_model(): + if not model._meta.auto_created: + signals.pre_delete.send( + sender=model, instance=obj, using=self.using + ) + + # fast deletes + for qs in self.fast_deletes: + qs._raw_delete(using=self.using) + + # update fields + for model, instances_for_fieldvalues in six.iteritems(self.field_updates): + query = sql.UpdateQuery(model) + for (field, value), instances in six.iteritems(instances_for_fieldvalues): + query.update_batch([obj.pk for obj in instances], + {field.name: value}, self.using) + + # reverse instance collections + for instances in six.itervalues(self.data): + instances.reverse() + + # delete instances + for model, instances in six.iteritems(self.data): + query = sql.DeleteQuery(model) + pk_list = [obj.pk for obj in instances] + query.delete_batch(pk_list, self.using) + + if not model._meta.auto_created: + for obj in instances: + signals.post_delete.send( + sender=model, instance=obj, using=self.using + ) + + # update collected instances + for model, instances_for_fieldvalues in six.iteritems(self.field_updates): + for (field, value), instances in six.iteritems(instances_for_fieldvalues): + for obj in instances: + setattr(obj, field.attname, value) + for model, instances in six.iteritems(self.data): + for instance in instances: + setattr(instance, model._meta.pk.attname, None) diff --git a/lib/python2.7/site-packages/django/db/models/expressions.py b/lib/python2.7/site-packages/django/db/models/expressions.py new file mode 100644 index 0000000..6e0f3c4 --- /dev/null +++ b/lib/python2.7/site-packages/django/db/models/expressions.py @@ -0,0 +1,186 @@ +import datetime + +from django.db.models.aggregates import refs_aggregate +from django.db.models.constants import LOOKUP_SEP +from django.utils import tree + +class ExpressionNode(tree.Node): + """ + Base class for all query expressions. + """ + # Arithmetic connectors + ADD = '+' + SUB = '-' + MUL = '*' + DIV = '/' + MOD = '%%' # This is a quoted % operator - it is quoted + # because it can be used in strings that also + # have parameter substitution. + + # Bitwise operators - note that these are generated by .bitand() + # and .bitor(), the '&' and '|' are reserved for boolean operator + # usage. + BITAND = '&' + BITOR = '|' + + def __init__(self, children=None, connector=None, negated=False): + if children is not None and len(children) > 1 and connector is None: + raise TypeError('You have to specify a connector.') + super(ExpressionNode, self).__init__(children, connector, negated) + + def _combine(self, other, connector, reversed, node=None): + if isinstance(other, datetime.timedelta): + return DateModifierNode([self, other], connector) + + if reversed: + obj = ExpressionNode([other], connector) + obj.add(node or self, connector) + else: + obj = node or ExpressionNode([self], connector) + obj.add(other, connector) + return obj + + def contains_aggregate(self, existing_aggregates): + if self.children: + return any(child.contains_aggregate(existing_aggregates) + for child in self.children + if hasattr(child, 'contains_aggregate')) + else: + return refs_aggregate(self.name.split(LOOKUP_SEP), + existing_aggregates) + + def prepare_database_save(self, unused): + return self + + ################### + # VISITOR METHODS # + ################### + + def prepare(self, evaluator, query, allow_joins): + return evaluator.prepare_node(self, query, allow_joins) + + def evaluate(self, evaluator, qn, connection): + return evaluator.evaluate_node(self, qn, connection) + + ############# + # OPERATORS # + ############# + + def __add__(self, other): + return self._combine(other, self.ADD, False) + + def __sub__(self, other): + return self._combine(other, self.SUB, False) + + def __mul__(self, other): + return self._combine(other, self.MUL, False) + + def __truediv__(self, other): + return self._combine(other, self.DIV, False) + + def __div__(self, other): # Python 2 compatibility + return type(self).__truediv__(self, other) + + def __mod__(self, other): + return self._combine(other, self.MOD, False) + + def __and__(self, other): + raise NotImplementedError( + "Use .bitand() and .bitor() for bitwise logical operations." + ) + + def bitand(self, other): + return self._combine(other, self.BITAND, False) + + def __or__(self, other): + raise NotImplementedError( + "Use .bitand() and .bitor() for bitwise logical operations." + ) + + def bitor(self, other): + return self._combine(other, self.BITOR, False) + + def __radd__(self, other): + return self._combine(other, self.ADD, True) + + def __rsub__(self, other): + return self._combine(other, self.SUB, True) + + def __rmul__(self, other): + return self._combine(other, self.MUL, True) + + def __rtruediv__(self, other): + return self._combine(other, self.DIV, True) + + def __rdiv__(self, other): # Python 2 compatibility + return type(self).__rtruediv__(self, other) + + def __rmod__(self, other): + return self._combine(other, self.MOD, True) + + def __rand__(self, other): + raise NotImplementedError( + "Use .bitand() and .bitor() for bitwise logical operations." + ) + + def __ror__(self, other): + raise NotImplementedError( + "Use .bitand() and .bitor() for bitwise logical operations." + ) + +class F(ExpressionNode): + """ + An expression representing the value of the given field. + """ + def __init__(self, name): + super(F, self).__init__(None, None, False) + self.name = name + + def __deepcopy__(self, memodict): + obj = super(F, self).__deepcopy__(memodict) + obj.name = self.name + return obj + + def prepare(self, evaluator, query, allow_joins): + return evaluator.prepare_leaf(self, query, allow_joins) + + def evaluate(self, evaluator, qn, connection): + return evaluator.evaluate_leaf(self, qn, connection) + +class DateModifierNode(ExpressionNode): + """ + Node that implements the following syntax: + filter(end_date__gt=F('start_date') + datetime.timedelta(days=3, seconds=200)) + + which translates into: + POSTGRES: + WHERE end_date > (start_date + INTERVAL '3 days 200 seconds') + + MYSQL: + WHERE end_date > (start_date + INTERVAL '3 0:0:200:0' DAY_MICROSECOND) + + ORACLE: + WHERE end_date > (start_date + INTERVAL '3 00:03:20.000000' DAY(1) TO SECOND(6)) + + SQLITE: + WHERE end_date > django_format_dtdelta(start_date, "+" "3", "200", "0") + (A custom function is used in order to preserve six digits of fractional + second information on sqlite, and to format both date and datetime values.) + + Note that microsecond comparisons are not well supported with MySQL, since + MySQL does not store microsecond information. + + Only adding and subtracting timedeltas is supported, attempts to use other + operations raise a TypeError. + """ + def __init__(self, children, connector, negated=False): + if len(children) != 2: + raise TypeError('Must specify a node and a timedelta.') + if not isinstance(children[1], datetime.timedelta): + raise TypeError('Second child must be a timedelta.') + if connector not in (self.ADD, self.SUB): + raise TypeError('Connector must be + or -, not %s' % connector) + super(DateModifierNode, self).__init__(children, connector, negated) + + def evaluate(self, evaluator, qn, connection): + return evaluator.evaluate_date_modifier_node(self, qn, connection) diff --git a/lib/python2.7/site-packages/django/db/models/fields/__init__.py b/lib/python2.7/site-packages/django/db/models/fields/__init__.py new file mode 100644 index 0000000..c10e2b1 --- /dev/null +++ b/lib/python2.7/site-packages/django/db/models/fields/__init__.py @@ -0,0 +1,1438 @@ +from __future__ import unicode_literals + +import copy +import datetime +import decimal +import math +import warnings +from base64 import b64decode, b64encode +from itertools import tee + +from django.db import connection +from django.db.models.loading import get_model +from django.db.models.query_utils import QueryWrapper +from django.conf import settings +from django import forms +from django.core import exceptions, validators +from django.utils.datastructures import DictWrapper +from django.utils.dateparse import parse_date, parse_datetime, parse_time +from django.utils.functional import curry, total_ordering +from django.utils.itercompat import is_iterator +from django.utils.text import capfirst +from django.utils import timezone +from django.utils.translation import ugettext_lazy as _ +from django.utils.encoding import smart_text, force_text, force_bytes +from django.utils.ipv6 import clean_ipv6_address +from django.utils import six + +class Empty(object): + pass + +class NOT_PROVIDED: + pass + +# The values to use for "blank" in SelectFields. Will be appended to the start +# of most "choices" lists. +BLANK_CHOICE_DASH = [("", "---------")] + +def _load_field(app_label, model_name, field_name): + return get_model(app_label, model_name)._meta.get_field_by_name(field_name)[0] + +class FieldDoesNotExist(Exception): + pass + +# A guide to Field parameters: +# +# * name: The name of the field specifed in the model. +# * attname: The attribute to use on the model object. This is the same as +# "name", except in the case of ForeignKeys, where "_id" is +# appended. +# * db_column: The db_column specified in the model (or None). +# * column: The database column for this field. This is the same as +# "attname", except if db_column is specified. +# +# Code that introspects values, or does other dynamic things, should use +# attname. For example, this gets the primary key value of object "obj": +# +# getattr(obj, opts.pk.attname) + +def _empty(of_cls): + new = Empty() + new.__class__ = of_cls + return new + +@total_ordering +class Field(object): + """Base class for all field types""" + + # Designates whether empty strings fundamentally are allowed at the + # database level. + empty_strings_allowed = True + empty_values = list(validators.EMPTY_VALUES) + + # These track each time a Field instance is created. Used to retain order. + # The auto_creation_counter is used for fields that Django implicitly + # creates, creation_counter is used for all user-specified fields. + creation_counter = 0 + auto_creation_counter = -1 + default_validators = [] # Default set of validators + default_error_messages = { + 'invalid_choice': _('Value %(value)r is not a valid choice.'), + 'null': _('This field cannot be null.'), + 'blank': _('This field cannot be blank.'), + 'unique': _('%(model_name)s with this %(field_label)s ' + 'already exists.'), + } + + # Generic field type description, usually overriden by subclasses + def _description(self): + return _('Field of type: %(field_type)s') % { + 'field_type': self.__class__.__name__ + } + description = property(_description) + + def __init__(self, verbose_name=None, name=None, primary_key=False, + max_length=None, unique=False, blank=False, null=False, + db_index=False, rel=None, default=NOT_PROVIDED, editable=True, + serialize=True, unique_for_date=None, unique_for_month=None, + unique_for_year=None, choices=None, help_text='', db_column=None, + db_tablespace=None, auto_created=False, validators=[], + error_messages=None): + self.name = name + self.verbose_name = verbose_name + self.primary_key = primary_key + self.max_length, self._unique = max_length, unique + self.blank, self.null = blank, null + self.rel = rel + self.default = default + self.editable = editable + self.serialize = serialize + self.unique_for_date, self.unique_for_month = (unique_for_date, + unique_for_month) + self.unique_for_year = unique_for_year + self._choices = choices or [] + self.help_text = help_text + self.db_column = db_column + self.db_tablespace = db_tablespace or settings.DEFAULT_INDEX_TABLESPACE + self.auto_created = auto_created + + # Set db_index to True if the field has a relationship and doesn't + # explicitly set db_index. + self.db_index = db_index + + # Adjust the appropriate creation counter, and save our local copy. + if auto_created: + self.creation_counter = Field.auto_creation_counter + Field.auto_creation_counter -= 1 + else: + self.creation_counter = Field.creation_counter + Field.creation_counter += 1 + + self.validators = self.default_validators + validators + + messages = {} + for c in reversed(self.__class__.__mro__): + messages.update(getattr(c, 'default_error_messages', {})) + messages.update(error_messages or {}) + self.error_messages = messages + + def __eq__(self, other): + # Needed for @total_ordering + if isinstance(other, Field): + return self.creation_counter == other.creation_counter + return NotImplemented + + def __lt__(self, other): + # This is needed because bisect does not take a comparison function. + if isinstance(other, Field): + return self.creation_counter < other.creation_counter + return NotImplemented + + def __hash__(self): + return hash(self.creation_counter) + + def __deepcopy__(self, memodict): + # We don't have to deepcopy very much here, since most things are not + # intended to be altered after initial creation. + obj = copy.copy(self) + if self.rel: + obj.rel = copy.copy(self.rel) + if hasattr(self.rel, 'field') and self.rel.field is self: + obj.rel.field = obj + memodict[id(self)] = obj + return obj + + def __copy__(self): + # We need to avoid hitting __reduce__, so define this + # slightly weird copy construct. + obj = Empty() + obj.__class__ = self.__class__ + obj.__dict__ = self.__dict__.copy() + return obj + + def __reduce__(self): + """ + Pickling should return the model._meta.fields instance of the field, + not a new copy of that field. So, we use the app cache to load the + model and then the field back. + """ + if not hasattr(self, 'model'): + # Fields are sometimes used without attaching them to models (for + # example in aggregation). In this case give back a plain field + # instance. The code below will create a new empty instance of + # class self.__class__, then update its dict with self.__dict__ + # values - so, this is very close to normal pickle. + return _empty, (self.__class__,), self.__dict__ + if self.model._deferred: + # Deferred model will not be found from the app cache. This could + # be fixed by reconstructing the deferred model on unpickle. + raise RuntimeError("Fields of deferred models can't be reduced") + return _load_field, (self.model._meta.app_label, self.model._meta.object_name, + self.name) + + def to_python(self, value): + """ + Converts the input value into the expected Python data type, raising + django.core.exceptions.ValidationError if the data can't be converted. + Returns the converted value. Subclasses should override this. + """ + return value + + def run_validators(self, value): + if value in self.empty_values: + return + + errors = [] + for v in self.validators: + try: + v(value) + except exceptions.ValidationError as e: + if hasattr(e, 'code') and e.code in self.error_messages: + e.message = self.error_messages[e.code] + errors.extend(e.error_list) + + if errors: + raise exceptions.ValidationError(errors) + + def validate(self, value, model_instance): + """ + Validates value and throws ValidationError. Subclasses should override + this to provide validation logic. + """ + if not self.editable: + # Skip validation for non-editable fields. + return + + if self._choices and value not in self.empty_values: + for option_key, option_value in self.choices: + if isinstance(option_value, (list, tuple)): + # This is an optgroup, so look inside the group for + # options. + for optgroup_key, optgroup_value in option_value: + if value == optgroup_key: + return + elif value == option_key: + return + raise exceptions.ValidationError( + self.error_messages['invalid_choice'], + code='invalid_choice', + params={'value': value}, + ) + + if value is None and not self.null: + raise exceptions.ValidationError(self.error_messages['null'], code='null') + + if not self.blank and value in self.empty_values: + raise exceptions.ValidationError(self.error_messages['blank'], code='blank') + + def clean(self, value, model_instance): + """ + Convert the value's type and run validation. Validation errors + from to_python and validate are propagated. The correct value is + returned if no error is raised. + """ + value = self.to_python(value) + self.validate(value, model_instance) + self.run_validators(value) + return value + + def db_type(self, connection): + """ + Returns the database column data type for this field, for the provided + connection. + """ + # The default implementation of this method looks at the + # backend-specific DATA_TYPES dictionary, looking up the field by its + # "internal type". + # + # A Field class can implement the get_internal_type() method to specify + # which *preexisting* Django Field class it's most similar to -- i.e., + # a custom field might be represented by a TEXT column type, which is + # the same as the TextField Django field type, which means the custom + # field's get_internal_type() returns 'TextField'. + # + # But the limitation of the get_internal_type() / data_types approach + # is that it cannot handle database column types that aren't already + # mapped to one of the built-in Django field types. In this case, you + # can implement db_type() instead of get_internal_type() to specify + # exactly which wacky database column type you want to use. + data = DictWrapper(self.__dict__, connection.ops.quote_name, "qn_") + try: + return (connection.creation.data_types[self.get_internal_type()] + % data) + except KeyError: + return None + + @property + def unique(self): + return self._unique or self.primary_key + + def set_attributes_from_name(self, name): + if not self.name: + self.name = name + self.attname, self.column = self.get_attname_column() + if self.verbose_name is None and self.name: + self.verbose_name = self.name.replace('_', ' ') + + def contribute_to_class(self, cls, name, virtual_only=False): + self.set_attributes_from_name(name) + self.model = cls + if virtual_only: + cls._meta.add_virtual_field(self) + else: + cls._meta.add_field(self) + if self.choices: + setattr(cls, 'get_%s_display' % self.name, + curry(cls._get_FIELD_display, field=self)) + + def get_attname(self): + return self.name + + def get_attname_column(self): + attname = self.get_attname() + column = self.db_column or attname + return attname, column + + def get_cache_name(self): + return '_%s_cache' % self.name + + def get_internal_type(self): + return self.__class__.__name__ + + def pre_save(self, model_instance, add): + """ + Returns field's value just before saving. + """ + return getattr(model_instance, self.attname) + + def get_prep_value(self, value): + """ + Perform preliminary non-db specific value checks and conversions. + """ + return value + + def get_db_prep_value(self, value, connection, prepared=False): + """Returns field's value prepared for interacting with the database + backend. + + Used by the default implementations of ``get_db_prep_save``and + `get_db_prep_lookup``` + """ + if not prepared: + value = self.get_prep_value(value) + return value + + def get_db_prep_save(self, value, connection): + """ + Returns field's value prepared for saving into a database. + """ + return self.get_db_prep_value(value, connection=connection, + prepared=False) + + def get_prep_lookup(self, lookup_type, value): + """ + Perform preliminary non-db specific lookup checks and conversions + """ + if hasattr(value, 'prepare'): + return value.prepare() + if hasattr(value, '_prepare'): + return value._prepare() + + if lookup_type in ( + 'iexact', 'contains', 'icontains', + 'startswith', 'istartswith', 'endswith', 'iendswith', + 'month', 'day', 'week_day', 'hour', 'minute', 'second', + 'isnull', 'search', 'regex', 'iregex', + ): + return value + elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'): + return self.get_prep_value(value) + elif lookup_type in ('range', 'in'): + return [self.get_prep_value(v) for v in value] + elif lookup_type == 'year': + try: + return int(value) + except ValueError: + raise ValueError("The __year lookup type requires an integer " + "argument") + + raise TypeError("Field has invalid lookup: %s" % lookup_type) + + def get_db_prep_lookup(self, lookup_type, value, connection, + prepared=False): + """ + Returns field's value prepared for database lookup. + """ + if not prepared: + value = self.get_prep_lookup(lookup_type, value) + prepared = True + if hasattr(value, 'get_compiler'): + value = value.get_compiler(connection=connection) + if hasattr(value, 'as_sql') or hasattr(value, '_as_sql'): + # If the value has a relabeled_clone method it means the + # value will be handled later on. + if hasattr(value, 'relabeled_clone'): + return value + if hasattr(value, 'as_sql'): + sql, params = value.as_sql() + else: + sql, params = value._as_sql(connection=connection) + return QueryWrapper(('(%s)' % sql), params) + + if lookup_type in ('month', 'day', 'week_day', 'hour', 'minute', + 'second', 'search', 'regex', 'iregex'): + return [value] + elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'): + return [self.get_db_prep_value(value, connection=connection, + prepared=prepared)] + elif lookup_type in ('range', 'in'): + return [self.get_db_prep_value(v, connection=connection, + prepared=prepared) for v in value] + elif lookup_type in ('contains', 'icontains'): + return ["%%%s%%" % connection.ops.prep_for_like_query(value)] + elif lookup_type == 'iexact': + return [connection.ops.prep_for_iexact_query(value)] + elif lookup_type in ('startswith', 'istartswith'): + return ["%s%%" % connection.ops.prep_for_like_query(value)] + elif lookup_type in ('endswith', 'iendswith'): + return ["%%%s" % connection.ops.prep_for_like_query(value)] + elif lookup_type == 'isnull': + return [] + elif lookup_type == 'year': + if isinstance(self, DateTimeField): + return connection.ops.year_lookup_bounds_for_datetime_field(value) + elif isinstance(self, DateField): + return connection.ops.year_lookup_bounds_for_date_field(value) + else: + return [value] # this isn't supposed to happen + + def has_default(self): + """ + Returns a boolean of whether this field has a default value. + """ + return self.default is not NOT_PROVIDED + + def get_default(self): + """ + Returns the default value for this field. + """ + if self.has_default(): + if callable(self.default): + return self.default() + return force_text(self.default, strings_only=True) + if (not self.empty_strings_allowed or (self.null and + not connection.features.interprets_empty_strings_as_nulls)): + return None + return "" + + def get_validator_unique_lookup_type(self): + return '%s__exact' % self.name + + def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH): + """Returns choices with a default blank choices included, for use + as SelectField choices for this field.""" + first_choice = blank_choice if include_blank else [] + if self.choices: + return first_choice + list(self.choices) + rel_model = self.rel.to + if hasattr(self.rel, 'get_related_field'): + lst = [(getattr(x, self.rel.get_related_field().attname), + smart_text(x)) + for x in rel_model._default_manager.complex_filter( + self.rel.limit_choices_to)] + else: + lst = [(x._get_pk_val(), smart_text(x)) + for x in rel_model._default_manager.complex_filter( + self.rel.limit_choices_to)] + return first_choice + lst + + def get_choices_default(self): + return self.get_choices() + + def get_flatchoices(self, include_blank=True, + blank_choice=BLANK_CHOICE_DASH): + """ + Returns flattened choices with a default blank choice included. + """ + first_choice = blank_choice if include_blank else [] + return first_choice + list(self.flatchoices) + + def _get_val_from_obj(self, obj): + if obj is not None: + return getattr(obj, self.attname) + else: + return self.get_default() + + def value_to_string(self, obj): + """ + Returns a string value of this field from the passed obj. + This is used by the serialization framework. + """ + return smart_text(self._get_val_from_obj(obj)) + + def bind(self, fieldmapping, original, bound_field_class): + return bound_field_class(self, fieldmapping, original) + + def _get_choices(self): + if is_iterator(self._choices): + choices, self._choices = tee(self._choices) + return choices + else: + return self._choices + choices = property(_get_choices) + + def _get_flatchoices(self): + """Flattened version of choices tuple.""" + flat = [] + for choice, value in self.choices: + if isinstance(value, (list, tuple)): + flat.extend(value) + else: + flat.append((choice,value)) + return flat + flatchoices = property(_get_flatchoices) + + def save_form_data(self, instance, data): + setattr(instance, self.name, data) + + def formfield(self, form_class=None, choices_form_class=None, **kwargs): + """ + Returns a django.forms.Field instance for this database Field. + """ + defaults = {'required': not self.blank, + 'label': capfirst(self.verbose_name), + 'help_text': self.help_text} + if self.has_default(): + if callable(self.default): + defaults['initial'] = self.default + defaults['show_hidden_initial'] = True + else: + defaults['initial'] = self.get_default() + if self.choices: + # Fields with choices get special treatment. + include_blank = (self.blank or + not (self.has_default() or 'initial' in kwargs)) + defaults['choices'] = self.get_choices(include_blank=include_blank) + defaults['coerce'] = self.to_python + if self.null: + defaults['empty_value'] = None + if choices_form_class is not None: + form_class = choices_form_class + else: + form_class = forms.TypedChoiceField + # Many of the subclass-specific formfield arguments (min_value, + # max_value) don't apply for choice fields, so be sure to only pass + # the values that TypedChoiceField will understand. + for k in list(kwargs): + if k not in ('coerce', 'empty_value', 'choices', 'required', + 'widget', 'label', 'initial', 'help_text', + 'error_messages', 'show_hidden_initial'): + del kwargs[k] + defaults.update(kwargs) + if form_class is None: + form_class = forms.CharField + return form_class(**defaults) + + def value_from_object(self, obj): + """ + Returns the value of this field in the given model instance. + """ + return getattr(obj, self.attname) + + def __repr__(self): + """ + Displays the module, class and name of the field. + """ + path = '%s.%s' % (self.__class__.__module__, self.__class__.__name__) + name = getattr(self, 'name', None) + if name is not None: + return '<%s: %s>' % (path, name) + return '<%s>' % path + +class AutoField(Field): + description = _("Integer") + + empty_strings_allowed = False + default_error_messages = { + 'invalid': _("'%(value)s' value must be an integer."), + } + + def __init__(self, *args, **kwargs): + assert kwargs.get('primary_key', False) is True, \ + "%ss must have primary_key=True." % self.__class__.__name__ + kwargs['blank'] = True + Field.__init__(self, *args, **kwargs) + + def get_internal_type(self): + return "AutoField" + + def to_python(self, value): + if value is None: + return value + try: + return int(value) + except (TypeError, ValueError): + raise exceptions.ValidationError( + self.error_messages['invalid'], + code='invalid', + params={'value': value}, + ) + + def validate(self, value, model_instance): + pass + + def get_db_prep_value(self, value, connection, prepared=False): + if not prepared: + value = self.get_prep_value(value) + value = connection.ops.validate_autopk_value(value) + return value + + def get_prep_value(self, value): + if value is None: + return None + return int(value) + + def contribute_to_class(self, cls, name): + assert not cls._meta.has_auto_field, \ + "A model can't have more than one AutoField." + super(AutoField, self).contribute_to_class(cls, name) + cls._meta.has_auto_field = True + cls._meta.auto_field = self + + def formfield(self, **kwargs): + return None + +class BooleanField(Field): + empty_strings_allowed = False + default_error_messages = { + 'invalid': _("'%(value)s' value must be either True or False."), + } + description = _("Boolean (Either True or False)") + + def __init__(self, *args, **kwargs): + kwargs['blank'] = True + Field.__init__(self, *args, **kwargs) + + def get_internal_type(self): + return "BooleanField" + + def to_python(self, value): + if value in (True, False): + # if value is 1 or 0 than it's equal to True or False, but we want + # to return a true bool for semantic reasons. + return bool(value) + if value in ('t', 'True', '1'): + return True + if value in ('f', 'False', '0'): + return False + raise exceptions.ValidationError( + self.error_messages['invalid'], + code='invalid', + params={'value': value}, + ) + + def get_prep_lookup(self, lookup_type, value): + # Special-case handling for filters coming from a Web request (e.g. the + # admin interface). Only works for scalar values (not lists). If you're + # passing in a list, you might as well make things the right type when + # constructing the list. + if value in ('1', '0'): + value = bool(int(value)) + return super(BooleanField, self).get_prep_lookup(lookup_type, value) + + def get_prep_value(self, value): + if value is None: + return None + return bool(value) + + def formfield(self, **kwargs): + # Unlike most fields, BooleanField figures out include_blank from + # self.null instead of self.blank. + if self.choices: + include_blank = (self.null or + not (self.has_default() or 'initial' in kwargs)) + defaults = {'choices': self.get_choices( + include_blank=include_blank)} + else: + defaults = {'form_class': forms.BooleanField} + defaults.update(kwargs) + return super(BooleanField, self).formfield(**defaults) + +class CharField(Field): + description = _("String (up to %(max_length)s)") + + def __init__(self, *args, **kwargs): + super(CharField, self).__init__(*args, **kwargs) + self.validators.append(validators.MaxLengthValidator(self.max_length)) + + def get_internal_type(self): + return "CharField" + + def to_python(self, value): + if isinstance(value, six.string_types) or value is None: + return value + return smart_text(value) + + def get_prep_value(self, value): + return self.to_python(value) + + def formfield(self, **kwargs): + # Passing max_length to forms.CharField means that the value's length + # will be validated twice. This is considered acceptable since we want + # the value in the form field (to pass into widget for example). + defaults = {'max_length': self.max_length} + defaults.update(kwargs) + return super(CharField, self).formfield(**defaults) + +# TODO: Maybe move this into contrib, because it's specialized. +class CommaSeparatedIntegerField(CharField): + default_validators = [validators.validate_comma_separated_integer_list] + description = _("Comma-separated integers") + + def formfield(self, **kwargs): + defaults = { + 'error_messages': { + 'invalid': _('Enter only digits separated by commas.'), + } + } + defaults.update(kwargs) + return super(CommaSeparatedIntegerField, self).formfield(**defaults) + +class DateField(Field): + empty_strings_allowed = False + default_error_messages = { + 'invalid': _("'%(value)s' value has an invalid date format. It must be " + "in YYYY-MM-DD format."), + 'invalid_date': _("'%(value)s' value has the correct format (YYYY-MM-DD) " + "but it is an invalid date."), + } + description = _("Date (without time)") + + def __init__(self, verbose_name=None, name=None, auto_now=False, + auto_now_add=False, **kwargs): + self.auto_now, self.auto_now_add = auto_now, auto_now_add + if auto_now or auto_now_add: + kwargs['editable'] = False + kwargs['blank'] = True + Field.__init__(self, verbose_name, name, **kwargs) + + def get_internal_type(self): + return "DateField" + + def to_python(self, value): + if value is None: + return value + if isinstance(value, datetime.datetime): + if settings.USE_TZ and timezone.is_aware(value): + # Convert aware datetimes to the default time zone + # before casting them to dates (#17742). + default_timezone = timezone.get_default_timezone() + value = timezone.make_naive(value, default_timezone) + return value.date() + if isinstance(value, datetime.date): + return value + + try: + parsed = parse_date(value) + if parsed is not None: + return parsed + except ValueError: + raise exceptions.ValidationError( + self.error_messages['invalid_date'], + code='invalid_date', + params={'value': value}, + ) + + raise exceptions.ValidationError( + self.error_messages['invalid'], + code='invalid', + params={'value': value}, + ) + + def pre_save(self, model_instance, add): + if self.auto_now or (self.auto_now_add and add): + value = datetime.date.today() + setattr(model_instance, self.attname, value) + return value + else: + return super(DateField, self).pre_save(model_instance, add) + + def contribute_to_class(self, cls, name): + super(DateField,self).contribute_to_class(cls, name) + if not self.null: + setattr(cls, 'get_next_by_%s' % self.name, + curry(cls._get_next_or_previous_by_FIELD, field=self, + is_next=True)) + setattr(cls, 'get_previous_by_%s' % self.name, + curry(cls._get_next_or_previous_by_FIELD, field=self, + is_next=False)) + + def get_prep_lookup(self, lookup_type, value): + # For dates lookups, convert the value to an int + # so the database backend always sees a consistent type. + if lookup_type in ('month', 'day', 'week_day', 'hour', 'minute', 'second'): + return int(value) + return super(DateField, self).get_prep_lookup(lookup_type, value) + + def get_prep_value(self, value): + return self.to_python(value) + + def get_db_prep_value(self, value, connection, prepared=False): + # Casts dates into the format expected by the backend + if not prepared: + value = self.get_prep_value(value) + return connection.ops.value_to_db_date(value) + + def value_to_string(self, obj): + val = self._get_val_from_obj(obj) + return '' if val is None else val.isoformat() + + def formfield(self, **kwargs): + defaults = {'form_class': forms.DateField} + defaults.update(kwargs) + return super(DateField, self).formfield(**defaults) + +class DateTimeField(DateField): + empty_strings_allowed = False + default_error_messages = { + 'invalid': _("'%(value)s' value has an invalid format. It must be in " + "YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ] format."), + 'invalid_date': _("'%(value)s' value has the correct format " + "(YYYY-MM-DD) but it is an invalid date."), + 'invalid_datetime': _("'%(value)s' value has the correct format " + "(YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ]) " + "but it is an invalid date/time."), + } + description = _("Date (with time)") + + # __init__ is inherited from DateField + + def get_internal_type(self): + return "DateTimeField" + + def to_python(self, value): + if value is None: + return value + if isinstance(value, datetime.datetime): + return value + if isinstance(value, datetime.date): + value = datetime.datetime(value.year, value.month, value.day) + if settings.USE_TZ: + # For backwards compatibility, interpret naive datetimes in + # local time. This won't work during DST change, but we can't + # do much about it, so we let the exceptions percolate up the + # call stack. + warnings.warn("DateTimeField %s.%s received a naive datetime " + "(%s) while time zone support is active." % + (self.model.__name__, self.name, value), + RuntimeWarning) + default_timezone = timezone.get_default_timezone() + value = timezone.make_aware(value, default_timezone) + return value + + try: + parsed = parse_datetime(value) + if parsed is not None: + return parsed + except ValueError: + raise exceptions.ValidationError( + self.error_messages['invalid_datetime'], + code='invalid_datetime', + params={'value': value}, + ) + + try: + parsed = parse_date(value) + if parsed is not None: + return datetime.datetime(parsed.year, parsed.month, parsed.day) + except ValueError: + raise exceptions.ValidationError( + self.error_messages['invalid_date'], + code='invalid_date', + params={'value': value}, + ) + + raise exceptions.ValidationError( + self.error_messages['invalid'], + code='invalid', + params={'value': value}, + ) + + def pre_save(self, model_instance, add): + if self.auto_now or (self.auto_now_add and add): + value = timezone.now() + setattr(model_instance, self.attname, value) + return value + else: + return super(DateTimeField, self).pre_save(model_instance, add) + + # contribute_to_class is inherited from DateField, it registers + # get_next_by_FOO and get_prev_by_FOO + + # get_prep_lookup is inherited from DateField + + def get_prep_value(self, value): + value = self.to_python(value) + if value is not None and settings.USE_TZ and timezone.is_naive(value): + # For backwards compatibility, interpret naive datetimes in local + # time. This won't work during DST change, but we can't do much + # about it, so we let the exceptions percolate up the call stack. + warnings.warn("DateTimeField %s.%s received a naive datetime (%s)" + " while time zone support is active." % + (self.model.__name__, self.name, value), + RuntimeWarning) + default_timezone = timezone.get_default_timezone() + value = timezone.make_aware(value, default_timezone) + return value + + def get_db_prep_value(self, value, connection, prepared=False): + # Casts datetimes into the format expected by the backend + if not prepared: + value = self.get_prep_value(value) + return connection.ops.value_to_db_datetime(value) + + def value_to_string(self, obj): + val = self._get_val_from_obj(obj) + return '' if val is None else val.isoformat() + + def formfield(self, **kwargs): + defaults = {'form_class': forms.DateTimeField} + defaults.update(kwargs) + return super(DateTimeField, self).formfield(**defaults) + +class DecimalField(Field): + empty_strings_allowed = False + default_error_messages = { + 'invalid': _("'%(value)s' value must be a decimal number."), + } + description = _("Decimal number") + + def __init__(self, verbose_name=None, name=None, max_digits=None, + decimal_places=None, **kwargs): + self.max_digits, self.decimal_places = max_digits, decimal_places + Field.__init__(self, verbose_name, name, **kwargs) + + def get_internal_type(self): + return "DecimalField" + + def to_python(self, value): + if value is None: + return value + try: + return decimal.Decimal(value) + except decimal.InvalidOperation: + raise exceptions.ValidationError( + self.error_messages['invalid'], + code='invalid', + params={'value': value}, + ) + + def _format(self, value): + if isinstance(value, six.string_types) or value is None: + return value + else: + return self.format_number(value) + + def format_number(self, value): + """ + Formats a number into a string with the requisite number of digits and + decimal places. + """ + # Method moved to django.db.backends.util. + # + # It is preserved because it is used by the oracle backend + # (django.db.backends.oracle.query), and also for + # backwards-compatibility with any external code which may have used + # this method. + from django.db.backends import util + return util.format_number(value, self.max_digits, self.decimal_places) + + def get_db_prep_save(self, value, connection): + return connection.ops.value_to_db_decimal(self.to_python(value), + self.max_digits, self.decimal_places) + + def get_prep_value(self, value): + return self.to_python(value) + + def formfield(self, **kwargs): + defaults = { + 'max_digits': self.max_digits, + 'decimal_places': self.decimal_places, + 'form_class': forms.DecimalField, + } + defaults.update(kwargs) + return super(DecimalField, self).formfield(**defaults) + +class EmailField(CharField): + default_validators = [validators.validate_email] + description = _("Email address") + + def __init__(self, *args, **kwargs): + # max_length should be overridden to 254 characters to be fully + # compliant with RFCs 3696 and 5321 + + kwargs['max_length'] = kwargs.get('max_length', 75) + CharField.__init__(self, *args, **kwargs) + + def formfield(self, **kwargs): + # As with CharField, this will cause email validation to be performed + # twice. + defaults = { + 'form_class': forms.EmailField, + } + defaults.update(kwargs) + return super(EmailField, self).formfield(**defaults) + +class FilePathField(Field): + description = _("File path") + + def __init__(self, verbose_name=None, name=None, path='', match=None, + recursive=False, allow_files=True, allow_folders=False, **kwargs): + self.path, self.match, self.recursive = path, match, recursive + self.allow_files, self.allow_folders = allow_files, allow_folders + kwargs['max_length'] = kwargs.get('max_length', 100) + Field.__init__(self, verbose_name, name, **kwargs) + + def get_prep_value(self, value): + value = super(FilePathField, self).get_prep_value(value) + if value is None: + return None + return six.text_type(value) + + def formfield(self, **kwargs): + defaults = { + 'path': self.path, + 'match': self.match, + 'recursive': self.recursive, + 'form_class': forms.FilePathField, + 'allow_files': self.allow_files, + 'allow_folders': self.allow_folders, + } + defaults.update(kwargs) + return super(FilePathField, self).formfield(**defaults) + + def get_internal_type(self): + return "FilePathField" + +class FloatField(Field): + empty_strings_allowed = False + default_error_messages = { + 'invalid': _("'%(value)s' value must be a float."), + } + description = _("Floating point number") + + def get_prep_value(self, value): + if value is None: + return None + return float(value) + + def get_internal_type(self): + return "FloatField" + + def to_python(self, value): + if value is None: + return value + try: + return float(value) + except (TypeError, ValueError): + raise exceptions.ValidationError( + self.error_messages['invalid'], + code='invalid', + params={'value': value}, + ) + + def formfield(self, **kwargs): + defaults = {'form_class': forms.FloatField} + defaults.update(kwargs) + return super(FloatField, self).formfield(**defaults) + +class IntegerField(Field): + empty_strings_allowed = False + default_error_messages = { + 'invalid': _("'%(value)s' value must be an integer."), + } + description = _("Integer") + + def get_prep_value(self, value): + if value is None: + return None + return int(value) + + def get_prep_lookup(self, lookup_type, value): + if ((lookup_type == 'gte' or lookup_type == 'lt') + and isinstance(value, float)): + value = math.ceil(value) + return super(IntegerField, self).get_prep_lookup(lookup_type, value) + + def get_internal_type(self): + return "IntegerField" + + def to_python(self, value): + if value is None: + return value + try: + return int(value) + except (TypeError, ValueError): + raise exceptions.ValidationError( + self.error_messages['invalid'], + code='invalid', + params={'value': value}, + ) + + def formfield(self, **kwargs): + defaults = {'form_class': forms.IntegerField} + defaults.update(kwargs) + return super(IntegerField, self).formfield(**defaults) + +class BigIntegerField(IntegerField): + empty_strings_allowed = False + description = _("Big (8 byte) integer") + MAX_BIGINT = 9223372036854775807 + + def get_internal_type(self): + return "BigIntegerField" + + def formfield(self, **kwargs): + defaults = {'min_value': -BigIntegerField.MAX_BIGINT - 1, + 'max_value': BigIntegerField.MAX_BIGINT} + defaults.update(kwargs) + return super(BigIntegerField, self).formfield(**defaults) + +class IPAddressField(Field): + empty_strings_allowed = False + description = _("IPv4 address") + + def __init__(self, *args, **kwargs): + kwargs['max_length'] = 15 + Field.__init__(self, *args, **kwargs) + + def get_prep_value(self, value): + value = super(IPAddressField, self).get_prep_value(value) + if value is None: + return None + return six.text_type(value) + + def get_internal_type(self): + return "IPAddressField" + + def formfield(self, **kwargs): + defaults = {'form_class': forms.IPAddressField} + defaults.update(kwargs) + return super(IPAddressField, self).formfield(**defaults) + +class GenericIPAddressField(Field): + empty_strings_allowed = True + description = _("IP address") + default_error_messages = {} + + def __init__(self, verbose_name=None, name=None, protocol='both', + unpack_ipv4=False, *args, **kwargs): + self.unpack_ipv4 = unpack_ipv4 + self.protocol = protocol + self.default_validators, invalid_error_message = \ + validators.ip_address_validators(protocol, unpack_ipv4) + self.default_error_messages['invalid'] = invalid_error_message + kwargs['max_length'] = 39 + Field.__init__(self, verbose_name, name, *args, **kwargs) + + def get_internal_type(self): + return "GenericIPAddressField" + + def to_python(self, value): + if value and ':' in value: + return clean_ipv6_address(value, + self.unpack_ipv4, self.error_messages['invalid']) + return value + + def get_db_prep_value(self, value, connection, prepared=False): + if not prepared: + value = self.get_prep_value(value) + return value or None + + def get_prep_value(self, value): + if value is None: + return value + if value and ':' in value: + try: + return clean_ipv6_address(value, self.unpack_ipv4) + except exceptions.ValidationError: + pass + return six.text_type(value) + + def formfield(self, **kwargs): + defaults = { + 'protocol': self.protocol, + 'form_class': forms.GenericIPAddressField, + } + defaults.update(kwargs) + return super(GenericIPAddressField, self).formfield(**defaults) + + +class NullBooleanField(Field): + empty_strings_allowed = False + default_error_messages = { + 'invalid': _("'%(value)s' value must be either None, True or False."), + } + description = _("Boolean (Either True, False or None)") + + def __init__(self, *args, **kwargs): + kwargs['null'] = True + kwargs['blank'] = True + Field.__init__(self, *args, **kwargs) + + def get_internal_type(self): + return "NullBooleanField" + + def to_python(self, value): + if value is None: + return None + if value in (True, False): + return bool(value) + if value in ('None',): + return None + if value in ('t', 'True', '1'): + return True + if value in ('f', 'False', '0'): + return False + raise exceptions.ValidationError( + self.error_messages['invalid'], + code='invalid', + params={'value': value}, + ) + + def get_prep_lookup(self, lookup_type, value): + # Special-case handling for filters coming from a Web request (e.g. the + # admin interface). Only works for scalar values (not lists). If you're + # passing in a list, you might as well make things the right type when + # constructing the list. + if value in ('1', '0'): + value = bool(int(value)) + return super(NullBooleanField, self).get_prep_lookup(lookup_type, + value) + + def get_prep_value(self, value): + if value is None: + return None + return bool(value) + + def formfield(self, **kwargs): + defaults = { + 'form_class': forms.NullBooleanField, + 'required': not self.blank, + 'label': capfirst(self.verbose_name), + 'help_text': self.help_text} + defaults.update(kwargs) + return super(NullBooleanField, self).formfield(**defaults) + +class PositiveIntegerField(IntegerField): + description = _("Positive integer") + + def get_internal_type(self): + return "PositiveIntegerField" + + def formfield(self, **kwargs): + defaults = {'min_value': 0} + defaults.update(kwargs) + return super(PositiveIntegerField, self).formfield(**defaults) + +class PositiveSmallIntegerField(IntegerField): + description = _("Positive small integer") + + def get_internal_type(self): + return "PositiveSmallIntegerField" + + def formfield(self, **kwargs): + defaults = {'min_value': 0} + defaults.update(kwargs) + return super(PositiveSmallIntegerField, self).formfield(**defaults) + +class SlugField(CharField): + default_validators = [validators.validate_slug] + description = _("Slug (up to %(max_length)s)") + + def __init__(self, *args, **kwargs): + kwargs['max_length'] = kwargs.get('max_length', 50) + # Set db_index=True unless it's been set manually. + if 'db_index' not in kwargs: + kwargs['db_index'] = True + super(SlugField, self).__init__(*args, **kwargs) + + def get_internal_type(self): + return "SlugField" + + def formfield(self, **kwargs): + defaults = {'form_class': forms.SlugField} + defaults.update(kwargs) + return super(SlugField, self).formfield(**defaults) + +class SmallIntegerField(IntegerField): + description = _("Small integer") + + def get_internal_type(self): + return "SmallIntegerField" + +class TextField(Field): + description = _("Text") + + def get_internal_type(self): + return "TextField" + + def get_prep_value(self, value): + if isinstance(value, six.string_types) or value is None: + return value + return smart_text(value) + + def formfield(self, **kwargs): + defaults = {'widget': forms.Textarea} + defaults.update(kwargs) + return super(TextField, self).formfield(**defaults) + +class TimeField(Field): + empty_strings_allowed = False + default_error_messages = { + 'invalid': _("'%(value)s' value has an invalid format. It must be in " + "HH:MM[:ss[.uuuuuu]] format."), + 'invalid_time': _("'%(value)s' value has the correct format " + "(HH:MM[:ss[.uuuuuu]]) but it is an invalid time."), + } + description = _("Time") + + def __init__(self, verbose_name=None, name=None, auto_now=False, + auto_now_add=False, **kwargs): + self.auto_now, self.auto_now_add = auto_now, auto_now_add + if auto_now or auto_now_add: + kwargs['editable'] = False + kwargs['blank'] = True + Field.__init__(self, verbose_name, name, **kwargs) + + def get_internal_type(self): + return "TimeField" + + def to_python(self, value): + if value is None: + return None + if isinstance(value, datetime.time): + return value + if isinstance(value, datetime.datetime): + # Not usually a good idea to pass in a datetime here (it loses + # information), but this can be a side-effect of interacting with a + # database backend (e.g. Oracle), so we'll be accommodating. + return value.time() + + try: + parsed = parse_time(value) + if parsed is not None: + return parsed + except ValueError: + raise exceptions.ValidationError( + self.error_messages['invalid_time'], + code='invalid_time', + params={'value': value}, + ) + + raise exceptions.ValidationError( + self.error_messages['invalid'], + code='invalid', + params={'value': value}, + ) + + def pre_save(self, model_instance, add): + if self.auto_now or (self.auto_now_add and add): + value = datetime.datetime.now().time() + setattr(model_instance, self.attname, value) + return value + else: + return super(TimeField, self).pre_save(model_instance, add) + + def get_prep_value(self, value): + return self.to_python(value) + + def get_db_prep_value(self, value, connection, prepared=False): + # Casts times into the format expected by the backend + if not prepared: + value = self.get_prep_value(value) + return connection.ops.value_to_db_time(value) + + def value_to_string(self, obj): + val = self._get_val_from_obj(obj) + return '' if val is None else val.isoformat() + + def formfield(self, **kwargs): + defaults = {'form_class': forms.TimeField} + defaults.update(kwargs) + return super(TimeField, self).formfield(**defaults) + +class URLField(CharField): + default_validators = [validators.URLValidator()] + description = _("URL") + + def __init__(self, verbose_name=None, name=None, **kwargs): + kwargs['max_length'] = kwargs.get('max_length', 200) + CharField.__init__(self, verbose_name, name, **kwargs) + + def formfield(self, **kwargs): + # As with CharField, this will cause URL validation to be performed + # twice. + defaults = { + 'form_class': forms.URLField, + } + defaults.update(kwargs) + return super(URLField, self).formfield(**defaults) + +class BinaryField(Field): + description = _("Raw binary data") + empty_values = [None, b''] + + def __init__(self, *args, **kwargs): + kwargs['editable'] = False + super(BinaryField, self).__init__(*args, **kwargs) + if self.max_length is not None: + self.validators.append(validators.MaxLengthValidator(self.max_length)) + + def get_internal_type(self): + return "BinaryField" + + def get_default(self): + if self.has_default() and not callable(self.default): + return self.default + default = super(BinaryField, self).get_default() + if default == '': + return b'' + return default + + def get_db_prep_value(self, value, connection, prepared=False): + value = super(BinaryField, self + ).get_db_prep_value(value, connection, prepared) + if value is not None: + return connection.Database.Binary(value) + return value + + def value_to_string(self, obj): + """Binary data is serialized as base64""" + return b64encode(force_bytes(self._get_val_from_obj(obj))).decode('ascii') + + def to_python(self, value): + # If it's a string, it should be base64-encoded data + if isinstance(value, six.text_type): + return six.memoryview(b64decode(force_bytes(value))) + return value diff --git a/lib/python2.7/site-packages/django/db/models/fields/files.py b/lib/python2.7/site-packages/django/db/models/fields/files.py new file mode 100644 index 0000000..3b3c1ec --- /dev/null +++ b/lib/python2.7/site-packages/django/db/models/fields/files.py @@ -0,0 +1,397 @@ +import datetime +import os + +from django import forms +from django.db.models.fields import Field +from django.core.files.base import File +from django.core.files.storage import default_storage +from django.core.files.images import ImageFile +from django.db.models import signals +from django.utils.encoding import force_str, force_text +from django.utils import six +from django.utils.translation import ugettext_lazy as _ + +class FieldFile(File): + def __init__(self, instance, field, name): + super(FieldFile, self).__init__(None, name) + self.instance = instance + self.field = field + self.storage = field.storage + self._committed = True + + def __eq__(self, other): + # Older code may be expecting FileField values to be simple strings. + # By overriding the == operator, it can remain backwards compatibility. + if hasattr(other, 'name'): + return self.name == other.name + return self.name == other + + def __ne__(self, other): + return not self.__eq__(other) + + def __hash__(self): + return hash(self.name) + + # The standard File contains most of the necessary properties, but + # FieldFiles can be instantiated without a name, so that needs to + # be checked for here. + + def _require_file(self): + if not self: + raise ValueError("The '%s' attribute has no file associated with it." % self.field.name) + + def _get_file(self): + self._require_file() + if not hasattr(self, '_file') or self._file is None: + self._file = self.storage.open(self.name, 'rb') + return self._file + + def _set_file(self, file): + self._file = file + + def _del_file(self): + del self._file + + file = property(_get_file, _set_file, _del_file) + + def _get_path(self): + self._require_file() + return self.storage.path(self.name) + path = property(_get_path) + + def _get_url(self): + self._require_file() + return self.storage.url(self.name) + url = property(_get_url) + + def _get_size(self): + self._require_file() + if not self._committed: + return self.file.size + return self.storage.size(self.name) + size = property(_get_size) + + def open(self, mode='rb'): + self._require_file() + self.file.open(mode) + # open() doesn't alter the file's contents, but it does reset the pointer + open.alters_data = True + + # In addition to the standard File API, FieldFiles have extra methods + # to further manipulate the underlying file, as well as update the + # associated model instance. + + def save(self, name, content, save=True): + name = self.field.generate_filename(self.instance, name) + self.name = self.storage.save(name, content) + setattr(self.instance, self.field.name, self.name) + + # Update the filesize cache + self._size = content.size + self._committed = True + + # Save the object because it has changed, unless save is False + if save: + self.instance.save() + save.alters_data = True + + def delete(self, save=True): + if not self: + return + # Only close the file if it's already open, which we know by the + # presence of self._file + if hasattr(self, '_file'): + self.close() + del self.file + + self.storage.delete(self.name) + + self.name = None + setattr(self.instance, self.field.name, self.name) + + # Delete the filesize cache + if hasattr(self, '_size'): + del self._size + self._committed = False + + if save: + self.instance.save() + delete.alters_data = True + + def _get_closed(self): + file = getattr(self, '_file', None) + return file is None or file.closed + closed = property(_get_closed) + + def close(self): + file = getattr(self, '_file', None) + if file is not None: + file.close() + + def __getstate__(self): + # FieldFile needs access to its associated model field and an instance + # it's attached to in order to work properly, but the only necessary + # data to be pickled is the file's name itself. Everything else will + # be restored later, by FileDescriptor below. + return {'name': self.name, 'closed': False, '_committed': True, '_file': None} + +class FileDescriptor(object): + """ + The descriptor for the file attribute on the model instance. Returns a + FieldFile when accessed so you can do stuff like:: + + >>> instance.file.size + + Assigns a file object on assignment so you can do:: + + >>> instance.file = File(...) + + """ + def __init__(self, field): + self.field = field + + def __get__(self, instance=None, owner=None): + if instance is None: + raise AttributeError( + "The '%s' attribute can only be accessed from %s instances." + % (self.field.name, owner.__name__)) + + # This is slightly complicated, so worth an explanation. + # instance.file`needs to ultimately return some instance of `File`, + # probably a subclass. Additionally, this returned object needs to have + # the FieldFile API so that users can easily do things like + # instance.file.path and have that delegated to the file storage engine. + # Easy enough if we're strict about assignment in __set__, but if you + # peek below you can see that we're not. So depending on the current + # value of the field we have to dynamically construct some sort of + # "thing" to return. + + # The instance dict contains whatever was originally assigned + # in __set__. + file = instance.__dict__[self.field.name] + + # If this value is a string (instance.file = "path/to/file") or None + # then we simply wrap it with the appropriate attribute class according + # to the file field. [This is FieldFile for FileFields and + # ImageFieldFile for ImageFields; it's also conceivable that user + # subclasses might also want to subclass the attribute class]. This + # object understands how to convert a path to a file, and also how to + # handle None. + if isinstance(file, six.string_types) or file is None: + attr = self.field.attr_class(instance, self.field, file) + instance.__dict__[self.field.name] = attr + + # Other types of files may be assigned as well, but they need to have + # the FieldFile interface added to the. Thus, we wrap any other type of + # File inside a FieldFile (well, the field's attr_class, which is + # usually FieldFile). + elif isinstance(file, File) and not isinstance(file, FieldFile): + file_copy = self.field.attr_class(instance, self.field, file.name) + file_copy.file = file + file_copy._committed = False + instance.__dict__[self.field.name] = file_copy + + # Finally, because of the (some would say boneheaded) way pickle works, + # the underlying FieldFile might not actually itself have an associated + # file. So we need to reset the details of the FieldFile in those cases. + elif isinstance(file, FieldFile) and not hasattr(file, 'field'): + file.instance = instance + file.field = self.field + file.storage = self.field.storage + + # That was fun, wasn't it? + return instance.__dict__[self.field.name] + + def __set__(self, instance, value): + instance.__dict__[self.field.name] = value + +class FileField(Field): + + # The class to wrap instance attributes in. Accessing the file object off + # the instance will always return an instance of attr_class. + attr_class = FieldFile + + # The descriptor to use for accessing the attribute off of the class. + descriptor_class = FileDescriptor + + description = _("File") + + def __init__(self, verbose_name=None, name=None, upload_to='', storage=None, **kwargs): + for arg in ('primary_key', 'unique'): + if arg in kwargs: + raise TypeError("'%s' is not a valid argument for %s." % (arg, self.__class__)) + + self.storage = storage or default_storage + self.upload_to = upload_to + if callable(upload_to): + self.generate_filename = upload_to + + kwargs['max_length'] = kwargs.get('max_length', 100) + super(FileField, self).__init__(verbose_name, name, **kwargs) + + def get_internal_type(self): + return "FileField" + + def get_prep_lookup(self, lookup_type, value): + if hasattr(value, 'name'): + value = value.name + return super(FileField, self).get_prep_lookup(lookup_type, value) + + def get_prep_value(self, value): + "Returns field's value prepared for saving into a database." + # Need to convert File objects provided via a form to unicode for database insertion + if value is None: + return None + return six.text_type(value) + + def pre_save(self, model_instance, add): + "Returns field's value just before saving." + file = super(FileField, self).pre_save(model_instance, add) + if file and not file._committed: + # Commit the file to storage prior to saving the model + file.save(file.name, file, save=False) + return file + + def contribute_to_class(self, cls, name): + super(FileField, self).contribute_to_class(cls, name) + setattr(cls, self.name, self.descriptor_class(self)) + + def get_directory_name(self): + return os.path.normpath(force_text(datetime.datetime.now().strftime(force_str(self.upload_to)))) + + def get_filename(self, filename): + return os.path.normpath(self.storage.get_valid_name(os.path.basename(filename))) + + def generate_filename(self, instance, filename): + return os.path.join(self.get_directory_name(), self.get_filename(filename)) + + def save_form_data(self, instance, data): + # Important: None means "no change", other false value means "clear" + # This subtle distinction (rather than a more explicit marker) is + # needed because we need to consume values that are also sane for a + # regular (non Model-) Form to find in its cleaned_data dictionary. + if data is not None: + # This value will be converted to unicode and stored in the + # database, so leaving False as-is is not acceptable. + if not data: + data = '' + setattr(instance, self.name, data) + + def formfield(self, **kwargs): + defaults = {'form_class': forms.FileField, 'max_length': self.max_length} + # If a file has been provided previously, then the form doesn't require + # that a new file is provided this time. + # The code to mark the form field as not required is used by + # form_for_instance, but can probably be removed once form_for_instance + # is gone. ModelForm uses a different method to check for an existing file. + if 'initial' in kwargs: + defaults['required'] = False + defaults.update(kwargs) + return super(FileField, self).formfield(**defaults) + +class ImageFileDescriptor(FileDescriptor): + """ + Just like the FileDescriptor, but for ImageFields. The only difference is + assigning the width/height to the width_field/height_field, if appropriate. + """ + def __set__(self, instance, value): + previous_file = instance.__dict__.get(self.field.name) + super(ImageFileDescriptor, self).__set__(instance, value) + + # To prevent recalculating image dimensions when we are instantiating + # an object from the database (bug #11084), only update dimensions if + # the field had a value before this assignment. Since the default + # value for FileField subclasses is an instance of field.attr_class, + # previous_file will only be None when we are called from + # Model.__init__(). The ImageField.update_dimension_fields method + # hooked up to the post_init signal handles the Model.__init__() cases. + # Assignment happening outside of Model.__init__() will trigger the + # update right here. + if previous_file is not None: + self.field.update_dimension_fields(instance, force=True) + +class ImageFieldFile(ImageFile, FieldFile): + + def delete(self, save=True): + # Clear the image dimensions cache + if hasattr(self, '_dimensions_cache'): + del self._dimensions_cache + super(ImageFieldFile, self).delete(save) + +class ImageField(FileField): + attr_class = ImageFieldFile + descriptor_class = ImageFileDescriptor + description = _("Image") + + def __init__(self, verbose_name=None, name=None, width_field=None, + height_field=None, **kwargs): + self.width_field, self.height_field = width_field, height_field + super(ImageField, self).__init__(verbose_name, name, **kwargs) + + def contribute_to_class(self, cls, name): + super(ImageField, self).contribute_to_class(cls, name) + # Attach update_dimension_fields so that dimension fields declared + # after their corresponding image field don't stay cleared by + # Model.__init__, see bug #11196. + signals.post_init.connect(self.update_dimension_fields, sender=cls) + + def update_dimension_fields(self, instance, force=False, *args, **kwargs): + """ + Updates field's width and height fields, if defined. + + This method is hooked up to model's post_init signal to update + dimensions after instantiating a model instance. However, dimensions + won't be updated if the dimensions fields are already populated. This + avoids unnecessary recalculation when loading an object from the + database. + + Dimensions can be forced to update with force=True, which is how + ImageFileDescriptor.__set__ calls this method. + """ + # Nothing to update if the field doesn't have have dimension fields. + has_dimension_fields = self.width_field or self.height_field + if not has_dimension_fields: + return + + # getattr will call the ImageFileDescriptor's __get__ method, which + # coerces the assigned value into an instance of self.attr_class + # (ImageFieldFile in this case). + file = getattr(instance, self.attname) + + # Nothing to update if we have no file and not being forced to update. + if not file and not force: + return + + dimension_fields_filled = not( + (self.width_field and not getattr(instance, self.width_field)) + or (self.height_field and not getattr(instance, self.height_field)) + ) + # When both dimension fields have values, we are most likely loading + # data from the database or updating an image field that already had + # an image stored. In the first case, we don't want to update the + # dimension fields because we are already getting their values from the + # database. In the second case, we do want to update the dimensions + # fields and will skip this return because force will be True since we + # were called from ImageFileDescriptor.__set__. + if dimension_fields_filled and not force: + return + + # file should be an instance of ImageFieldFile or should be None. + if file: + width = file.width + height = file.height + else: + # No file, so clear dimensions fields. + width = None + height = None + + # Update the width and height fields. + if self.width_field: + setattr(instance, self.width_field, width) + if self.height_field: + setattr(instance, self.height_field, height) + + def formfield(self, **kwargs): + defaults = {'form_class': forms.ImageField} + defaults.update(kwargs) + return super(ImageField, self).formfield(**defaults) diff --git a/lib/python2.7/site-packages/django/db/models/fields/proxy.py b/lib/python2.7/site-packages/django/db/models/fields/proxy.py new file mode 100644 index 0000000..c0cc873 --- /dev/null +++ b/lib/python2.7/site-packages/django/db/models/fields/proxy.py @@ -0,0 +1,17 @@ +""" +Field-like classes that aren't really fields. It's easier to use objects that +have the same attributes as fields sometimes (avoids a lot of special casing). +""" + +from django.db.models import fields + +class OrderWrt(fields.IntegerField): + """ + A proxy for the _order database field that is used when + Meta.order_with_respect_to is specified. + """ + + def __init__(self, *args, **kwargs): + kwargs['name'] = '_order' + kwargs['editable'] = False + super(OrderWrt, self).__init__(*args, **kwargs) diff --git a/lib/python2.7/site-packages/django/db/models/fields/related.py b/lib/python2.7/site-packages/django/db/models/fields/related.py new file mode 100644 index 0000000..a4bc374 --- /dev/null +++ b/lib/python2.7/site-packages/django/db/models/fields/related.py @@ -0,0 +1,1545 @@ +from operator import attrgetter + +from django.db import connection, connections, router +from django.db.backends import util +from django.db.models import signals, get_model +from django.db.models.fields import (AutoField, Field, IntegerField, + PositiveIntegerField, PositiveSmallIntegerField, FieldDoesNotExist) +from django.db.models.related import RelatedObject, PathInfo +from django.db.models.query import QuerySet +from django.db.models.deletion import CASCADE +from django.utils.encoding import smart_text +from django.utils import six +from django.utils.deprecation import RenameMethodsBase +from django.utils.translation import ugettext_lazy as _ +from django.utils.functional import curry, cached_property +from django.core import exceptions +from django import forms + +RECURSIVE_RELATIONSHIP_CONSTANT = 'self' + +pending_lookups = {} + + +def add_lazy_relation(cls, field, relation, operation): + """ + Adds a lookup on ``cls`` when a related field is defined using a string, + i.e.:: + + class MyModel(Model): + fk = ForeignKey("AnotherModel") + + This string can be: + + * RECURSIVE_RELATIONSHIP_CONSTANT (i.e. "self") to indicate a recursive + relation. + + * The name of a model (i.e "AnotherModel") to indicate another model in + the same app. + + * An app-label and model name (i.e. "someapp.AnotherModel") to indicate + another model in a different app. + + If the other model hasn't yet been loaded -- almost a given if you're using + lazy relationships -- then the relation won't be set up until the + class_prepared signal fires at the end of model initialization. + + operation is the work that must be performed once the relation can be resolved. + """ + # Check for recursive relations + if relation == RECURSIVE_RELATIONSHIP_CONSTANT: + app_label = cls._meta.app_label + model_name = cls.__name__ + + else: + # Look for an "app.Model" relation + + if isinstance(relation, six.string_types): + try: + app_label, model_name = relation.split(".") + except ValueError: + # If we can't split, assume a model in current app + app_label = cls._meta.app_label + model_name = relation + else: + # it's actually a model class + app_label = relation._meta.app_label + model_name = relation._meta.object_name + + # Try to look up the related model, and if it's already loaded resolve the + # string right away. If get_model returns None, it means that the related + # model isn't loaded yet, so we need to pend the relation until the class + # is prepared. + model = get_model(app_label, model_name, + seed_cache=False, only_installed=False) + if model: + operation(field, model, cls) + else: + key = (app_label, model_name) + value = (cls, field, operation) + pending_lookups.setdefault(key, []).append(value) + + +def do_pending_lookups(sender, **kwargs): + """ + Handle any pending relations to the sending model. Sent from class_prepared. + """ + key = (sender._meta.app_label, sender.__name__) + for cls, field, operation in pending_lookups.pop(key, []): + operation(field, sender, cls) + +signals.class_prepared.connect(do_pending_lookups) + + +#HACK +class RelatedField(Field): + def db_type(self, connection): + '''By default related field will not have a column + as it relates columns to another table''' + return None + + def contribute_to_class(self, cls, name, virtual_only=False): + sup = super(RelatedField, self) + + # Store the opts for related_query_name() + self.opts = cls._meta + + if hasattr(sup, 'contribute_to_class'): + sup.contribute_to_class(cls, name, virtual_only=virtual_only) + + if not cls._meta.abstract and self.rel.related_name: + related_name = self.rel.related_name % { + 'class': cls.__name__.lower(), + 'app_label': cls._meta.app_label.lower() + } + self.rel.related_name = related_name + other = self.rel.to + if isinstance(other, six.string_types) or other._meta.pk is None: + def resolve_related_class(field, model, cls): + field.rel.to = model + field.do_related_class(model, cls) + add_lazy_relation(cls, self, other, resolve_related_class) + else: + self.do_related_class(other, cls) + + def set_attributes_from_rel(self): + self.name = self.name or (self.rel.to._meta.model_name + '_' + self.rel.to._meta.pk.name) + if self.verbose_name is None: + self.verbose_name = self.rel.to._meta.verbose_name + self.rel.set_field_name() + + def do_related_class(self, other, cls): + self.set_attributes_from_rel() + self.related = RelatedObject(other, cls, self) + if not cls._meta.abstract: + self.contribute_to_related_class(other, self.related) + + def related_query_name(self): + # This method defines the name that can be used to identify this + # related object in a table-spanning query. It uses the lower-cased + # object_name by default, but this can be overridden with the + # "related_name" option. + return self.rel.related_query_name or self.rel.related_name or self.opts.model_name + + +class RenameRelatedObjectDescriptorMethods(RenameMethodsBase): + renamed_methods = ( + ('get_query_set', 'get_queryset', PendingDeprecationWarning), + ('get_prefetch_query_set', 'get_prefetch_queryset', PendingDeprecationWarning), + ) + + +class SingleRelatedObjectDescriptor(six.with_metaclass(RenameRelatedObjectDescriptorMethods)): + # This class provides the functionality that makes the related-object + # managers available as attributes on a model class, for fields that have + # a single "remote" value, on the class pointed to by a related field. + # In the example "place.restaurant", the restaurant attribute is a + # SingleRelatedObjectDescriptor instance. + def __init__(self, related): + self.related = related + self.cache_name = related.get_cache_name() + + def is_cached(self, instance): + return hasattr(instance, self.cache_name) + + def get_queryset(self, **db_hints): + db = router.db_for_read(self.related.model, **db_hints) + return self.related.model._base_manager.using(db) + + def get_prefetch_queryset(self, instances): + rel_obj_attr = attrgetter(self.related.field.attname) + instance_attr = lambda obj: obj._get_pk_val() + instances_dict = dict((instance_attr(inst), inst) for inst in instances) + query = {'%s__in' % self.related.field.name: instances} + qs = self.get_queryset(instance=instances[0]).filter(**query) + # Since we're going to assign directly in the cache, + # we must manage the reverse relation cache manually. + rel_obj_cache_name = self.related.field.get_cache_name() + for rel_obj in qs: + instance = instances_dict[rel_obj_attr(rel_obj)] + setattr(rel_obj, rel_obj_cache_name, instance) + return qs, rel_obj_attr, instance_attr, True, self.cache_name + + def __get__(self, instance, instance_type=None): + if instance is None: + return self + try: + rel_obj = getattr(instance, self.cache_name) + except AttributeError: + related_pk = instance._get_pk_val() + if related_pk is None: + rel_obj = None + else: + params = {} + for lh_field, rh_field in self.related.field.related_fields: + params['%s__%s' % (self.related.field.name, rh_field.name)] = getattr(instance, rh_field.attname) + try: + rel_obj = self.get_queryset(instance=instance).get(**params) + except self.related.model.DoesNotExist: + rel_obj = None + else: + setattr(rel_obj, self.related.field.get_cache_name(), instance) + setattr(instance, self.cache_name, rel_obj) + if rel_obj is None: + raise self.related.model.DoesNotExist("%s has no %s." % ( + instance.__class__.__name__, + self.related.get_accessor_name())) + else: + return rel_obj + + def __set__(self, instance, value): + # The similarity of the code below to the code in + # ReverseSingleRelatedObjectDescriptor is annoying, but there's a bunch + # of small differences that would make a common base class convoluted. + + # If null=True, we can assign null here, but otherwise the value needs + # to be an instance of the related class. + if value is None and self.related.field.null == False: + raise ValueError('Cannot assign None: "%s.%s" does not allow null values.' % + (instance._meta.object_name, self.related.get_accessor_name())) + elif value is not None and not isinstance(value, self.related.model): + raise ValueError('Cannot assign "%r": "%s.%s" must be a "%s" instance.' % + (value, instance._meta.object_name, + self.related.get_accessor_name(), self.related.opts.object_name)) + elif value is not None: + if instance._state.db is None: + instance._state.db = router.db_for_write(instance.__class__, instance=value) + elif value._state.db is None: + value._state.db = router.db_for_write(value.__class__, instance=instance) + elif value._state.db is not None and instance._state.db is not None: + if not router.allow_relation(value, instance): + raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value) + + related_pk = tuple([getattr(instance, field.attname) for field in self.related.field.foreign_related_fields]) + if None in related_pk: + raise ValueError('Cannot assign "%r": "%s" instance isn\'t saved in the database.' % + (value, instance._meta.object_name)) + + # Set the value of the related field to the value of the related object's related field + for index, field in enumerate(self.related.field.local_related_fields): + setattr(value, field.attname, related_pk[index]) + + # Since we already know what the related object is, seed the related + # object caches now, too. This avoids another db hit if you get the + # object you just set. + setattr(instance, self.cache_name, value) + setattr(value, self.related.field.get_cache_name(), instance) + + +class ReverseSingleRelatedObjectDescriptor(six.with_metaclass(RenameRelatedObjectDescriptorMethods)): + # This class provides the functionality that makes the related-object + # managers available as attributes on a model class, for fields that have + # a single "remote" value, on the class that defines the related field. + # In the example "choice.poll", the poll attribute is a + # ReverseSingleRelatedObjectDescriptor instance. + def __init__(self, field_with_rel): + self.field = field_with_rel + self.cache_name = self.field.get_cache_name() + + def is_cached(self, instance): + return hasattr(instance, self.cache_name) + + def get_queryset(self, **db_hints): + db = router.db_for_read(self.field.rel.to, **db_hints) + rel_mgr = self.field.rel.to._default_manager + # If the related manager indicates that it should be used for + # related fields, respect that. + if getattr(rel_mgr, 'use_for_related_fields', False): + return rel_mgr.using(db) + else: + return QuerySet(self.field.rel.to).using(db) + + def get_prefetch_queryset(self, instances): + rel_obj_attr = self.field.get_foreign_related_value + instance_attr = self.field.get_local_related_value + instances_dict = dict((instance_attr(inst), inst) for inst in instances) + related_field = self.field.foreign_related_fields[0] + + # FIXME: This will need to be revisited when we introduce support for + # composite fields. In the meantime we take this practical approach to + # solve a regression on 1.6 when the reverse manager in hidden + # (related_name ends with a '+'). Refs #21410. + # The check for len(...) == 1 is a special case that allows the query + # to be join-less and smaller. Refs #21760. + if self.field.rel.is_hidden() or len(self.field.foreign_related_fields) == 1: + query = {'%s__in' % related_field.name: set(instance_attr(inst)[0] for inst in instances)} + else: + query = {'%s__in' % self.field.related_query_name(): instances} + + qs = self.get_queryset(instance=instances[0]).filter(**query) + # Since we're going to assign directly in the cache, + # we must manage the reverse relation cache manually. + if not self.field.rel.multiple: + rel_obj_cache_name = self.field.related.get_cache_name() + for rel_obj in qs: + instance = instances_dict[rel_obj_attr(rel_obj)] + setattr(rel_obj, rel_obj_cache_name, instance) + return qs, rel_obj_attr, instance_attr, True, self.cache_name + + def __get__(self, instance, instance_type=None): + if instance is None: + return self + try: + rel_obj = getattr(instance, self.cache_name) + except AttributeError: + val = self.field.get_local_related_value(instance) + if None in val: + rel_obj = None + else: + params = dict( + (rh_field.attname, getattr(instance, lh_field.attname)) + for lh_field, rh_field in self.field.related_fields) + qs = self.get_queryset(instance=instance) + extra_filter = self.field.get_extra_descriptor_filter(instance) + if isinstance(extra_filter, dict): + params.update(extra_filter) + qs = qs.filter(**params) + else: + qs = qs.filter(extra_filter, **params) + # Assuming the database enforces foreign keys, this won't fail. + rel_obj = qs.get() + if not self.field.rel.multiple: + setattr(rel_obj, self.field.related.get_cache_name(), instance) + setattr(instance, self.cache_name, rel_obj) + if rel_obj is None and not self.field.null: + raise self.field.rel.to.DoesNotExist( + "%s has no %s." % (self.field.model.__name__, self.field.name)) + else: + return rel_obj + + def __set__(self, instance, value): + # If null=True, we can assign null here, but otherwise the value needs + # to be an instance of the related class. + if value is None and self.field.null == False: + raise ValueError('Cannot assign None: "%s.%s" does not allow null values.' % + (instance._meta.object_name, self.field.name)) + elif value is not None and not isinstance(value, self.field.rel.to): + raise ValueError('Cannot assign "%r": "%s.%s" must be a "%s" instance.' % + (value, instance._meta.object_name, + self.field.name, self.field.rel.to._meta.object_name)) + elif value is not None: + if instance._state.db is None: + instance._state.db = router.db_for_write(instance.__class__, instance=value) + elif value._state.db is None: + value._state.db = router.db_for_write(value.__class__, instance=instance) + elif value._state.db is not None and instance._state.db is not None: + if not router.allow_relation(value, instance): + raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value) + + # If we're setting the value of a OneToOneField to None, we need to clear + # out the cache on any old related object. Otherwise, deleting the + # previously-related object will also cause this object to be deleted, + # which is wrong. + if value is None: + # Look up the previously-related object, which may still be available + # since we've not yet cleared out the related field. + # Use the cache directly, instead of the accessor; if we haven't + # populated the cache, then we don't care - we're only accessing + # the object to invalidate the accessor cache, so there's no + # need to populate the cache just to expire it again. + related = getattr(instance, self.cache_name, None) + + # If we've got an old related object, we need to clear out its + # cache. This cache also might not exist if the related object + # hasn't been accessed yet. + if related is not None: + setattr(related, self.field.related.get_cache_name(), None) + + # Set the value of the related field + for lh_field, rh_field in self.field.related_fields: + try: + setattr(instance, lh_field.attname, getattr(value, rh_field.attname)) + except AttributeError: + setattr(instance, lh_field.attname, None) + + # Since we already know what the related object is, seed the related + # object caches now, too. This avoids another db hit if you get the + # object you just set. + setattr(instance, self.cache_name, value) + if value is not None and not self.field.rel.multiple: + setattr(value, self.field.related.get_cache_name(), instance) + + +class ForeignRelatedObjectsDescriptor(object): + # This class provides the functionality that makes the related-object + # managers available as attributes on a model class, for fields that have + # multiple "remote" values and have a ForeignKey pointed at them by + # some other model. In the example "poll.choice_set", the choice_set + # attribute is a ForeignRelatedObjectsDescriptor instance. + def __init__(self, related): + self.related = related # RelatedObject instance + + def __get__(self, instance, instance_type=None): + if instance is None: + return self + + return self.related_manager_cls(instance) + + def __set__(self, instance, value): + manager = self.__get__(instance) + # If the foreign key can support nulls, then completely clear the related set. + # Otherwise, just move the named objects into the set. + if self.related.field.null: + manager.clear() + manager.add(*value) + + @cached_property + def related_manager_cls(self): + # Dynamically create a class that subclasses the related model's default + # manager. + superclass = self.related.model._default_manager.__class__ + rel_field = self.related.field + rel_model = self.related.model + + class RelatedManager(superclass): + def __init__(self, instance): + super(RelatedManager, self).__init__() + self.instance = instance + self.core_filters= {'%s__exact' % rel_field.name: instance} + self.model = rel_model + + def get_queryset(self): + try: + return self.instance._prefetched_objects_cache[rel_field.related_query_name()] + except (AttributeError, KeyError): + db = self._db or router.db_for_read(self.model, instance=self.instance) + qs = super(RelatedManager, self).get_queryset().using(db).filter(**self.core_filters) + empty_strings_as_null = connections[db].features.interprets_empty_strings_as_nulls + for field in rel_field.foreign_related_fields: + val = getattr(self.instance, field.attname) + if val is None or (val == '' and empty_strings_as_null): + return qs.none() + qs._known_related_objects = {rel_field: {self.instance.pk: self.instance}} + return qs + + def get_prefetch_queryset(self, instances): + rel_obj_attr = rel_field.get_local_related_value + instance_attr = rel_field.get_foreign_related_value + instances_dict = dict((instance_attr(inst), inst) for inst in instances) + db = self._db or router.db_for_read(self.model, instance=instances[0]) + query = {'%s__in' % rel_field.name: instances} + qs = super(RelatedManager, self).get_queryset().using(db).filter(**query) + # Since we just bypassed this class' get_queryset(), we must manage + # the reverse relation manually. + for rel_obj in qs: + instance = instances_dict[rel_obj_attr(rel_obj)] + setattr(rel_obj, rel_field.name, instance) + cache_name = rel_field.related_query_name() + return qs, rel_obj_attr, instance_attr, False, cache_name + + def add(self, *objs): + for obj in objs: + if not isinstance(obj, self.model): + raise TypeError("'%s' instance expected, got %r" % (self.model._meta.object_name, obj)) + setattr(obj, rel_field.name, self.instance) + obj.save() + add.alters_data = True + + def create(self, **kwargs): + kwargs[rel_field.name] = self.instance + db = router.db_for_write(self.model, instance=self.instance) + return super(RelatedManager, self.db_manager(db)).create(**kwargs) + create.alters_data = True + + def get_or_create(self, **kwargs): + # Update kwargs with the related object that this + # ForeignRelatedObjectsDescriptor knows about. + kwargs[rel_field.name] = self.instance + db = router.db_for_write(self.model, instance=self.instance) + return super(RelatedManager, self.db_manager(db)).get_or_create(**kwargs) + get_or_create.alters_data = True + + # remove() and clear() are only provided if the ForeignKey can have a value of null. + if rel_field.null: + def remove(self, *objs): + val = rel_field.get_foreign_related_value(self.instance) + for obj in objs: + # Is obj actually part of this descriptor set? + if rel_field.get_local_related_value(obj) == val: + setattr(obj, rel_field.name, None) + obj.save() + else: + raise rel_field.rel.to.DoesNotExist("%r is not related to %r." % (obj, self.instance)) + remove.alters_data = True + + def clear(self): + self.update(**{rel_field.name: None}) + clear.alters_data = True + + return RelatedManager + + +def create_many_related_manager(superclass, rel): + """Creates a manager that subclasses 'superclass' (which is a Manager) + and adds behavior for many-to-many related objects.""" + class ManyRelatedManager(superclass): + def __init__(self, model=None, query_field_name=None, instance=None, symmetrical=None, + source_field_name=None, target_field_name=None, reverse=False, + through=None, prefetch_cache_name=None): + super(ManyRelatedManager, self).__init__() + self.model = model + self.query_field_name = query_field_name + + source_field = through._meta.get_field(source_field_name) + source_related_fields = source_field.related_fields + + self.core_filters = {} + for lh_field, rh_field in source_related_fields: + self.core_filters['%s__%s' % (query_field_name, rh_field.name)] = getattr(instance, rh_field.attname) + + self.instance = instance + self.symmetrical = symmetrical + self.source_field = source_field + self.source_field_name = source_field_name + self.target_field_name = target_field_name + self.reverse = reverse + self.through = through + self.prefetch_cache_name = prefetch_cache_name + self.related_val = source_field.get_foreign_related_value(instance) + # Used for single column related auto created models + self._fk_val = self.related_val[0] + if None in self.related_val: + raise ValueError('"%r" needs to have a value for field "%s" before ' + 'this many-to-many relationship can be used.' % + (instance, source_field_name)) + # Even if this relation is not to pk, we require still pk value. + # The wish is that the instance has been already saved to DB, + # although having a pk value isn't a guarantee of that. + if instance.pk is None: + raise ValueError("%r instance needs to have a primary key value before " + "a many-to-many relationship can be used." % + instance.__class__.__name__) + + + def _get_fk_val(self, obj, field_name): + """ + Returns the correct value for this relationship's foreign key. This + might be something else than pk value when to_field is used. + """ + fk = self.through._meta.get_field(field_name) + if fk.rel.field_name and fk.rel.field_name != fk.rel.to._meta.pk.attname: + attname = fk.rel.get_related_field().get_attname() + return fk.get_prep_lookup('exact', getattr(obj, attname)) + else: + return obj.pk + + def get_queryset(self): + try: + return self.instance._prefetched_objects_cache[self.prefetch_cache_name] + except (AttributeError, KeyError): + db = self._db or router.db_for_read(self.instance.__class__, instance=self.instance) + return super(ManyRelatedManager, self).get_queryset().using(db)._next_is_sticky().filter(**self.core_filters) + + def get_prefetch_queryset(self, instances): + instance = instances[0] + db = self._db or router.db_for_read(instance.__class__, instance=instance) + query = {'%s__in' % self.query_field_name: instances} + qs = super(ManyRelatedManager, self).get_queryset().using(db)._next_is_sticky().filter(**query) + + # M2M: need to annotate the query in order to get the primary model + # that the secondary model was actually related to. We know that + # there will already be a join on the join table, so we can just add + # the select. + + # For non-autocreated 'through' models, can't assume we are + # dealing with PK values. + fk = self.through._meta.get_field(self.source_field_name) + join_table = self.through._meta.db_table + connection = connections[db] + qn = connection.ops.quote_name + qs = qs.extra(select=dict( + ('_prefetch_related_val_%s' % f.attname, + '%s.%s' % (qn(join_table), qn(f.column))) for f in fk.local_related_fields)) + return (qs, + lambda result: tuple([getattr(result, '_prefetch_related_val_%s' % f.attname) for f in fk.local_related_fields]), + lambda inst: tuple([getattr(inst, f.attname) for f in fk.foreign_related_fields]), + False, + self.prefetch_cache_name) + + # If the ManyToMany relation has an intermediary model, + # the add and remove methods do not exist. + if rel.through._meta.auto_created: + def add(self, *objs): + self._add_items(self.source_field_name, self.target_field_name, *objs) + + # If this is a symmetrical m2m relation to self, add the mirror entry in the m2m table + if self.symmetrical: + self._add_items(self.target_field_name, self.source_field_name, *objs) + add.alters_data = True + + def remove(self, *objs): + self._remove_items(self.source_field_name, self.target_field_name, *objs) + + # If this is a symmetrical m2m relation to self, remove the mirror entry in the m2m table + if self.symmetrical: + self._remove_items(self.target_field_name, self.source_field_name, *objs) + remove.alters_data = True + + def clear(self): + self._clear_items(self.source_field_name) + + # If this is a symmetrical m2m relation to self, clear the mirror entry in the m2m table + if self.symmetrical: + self._clear_items(self.target_field_name) + clear.alters_data = True + + def create(self, **kwargs): + # This check needs to be done here, since we can't later remove this + # from the method lookup table, as we do with add and remove. + if not self.through._meta.auto_created: + opts = self.through._meta + raise AttributeError("Cannot use create() on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name)) + db = router.db_for_write(self.instance.__class__, instance=self.instance) + new_obj = super(ManyRelatedManager, self.db_manager(db)).create(**kwargs) + self.add(new_obj) + return new_obj + create.alters_data = True + + def get_or_create(self, **kwargs): + db = router.db_for_write(self.instance.__class__, instance=self.instance) + obj, created = \ + super(ManyRelatedManager, self.db_manager(db)).get_or_create(**kwargs) + # We only need to add() if created because if we got an object back + # from get() then the relationship already exists. + if created: + self.add(obj) + return obj, created + get_or_create.alters_data = True + + def _add_items(self, source_field_name, target_field_name, *objs): + # source_field_name: the PK fieldname in join table for the source object + # target_field_name: the PK fieldname in join table for the target object + # *objs - objects to add. Either object instances, or primary keys of object instances. + + # If there aren't any objects, there is nothing to do. + from django.db.models import Model + if objs: + new_ids = set() + for obj in objs: + if isinstance(obj, self.model): + if not router.allow_relation(obj, self.instance): + raise ValueError('Cannot add "%r": instance is on database "%s", value is on database "%s"' % + (obj, self.instance._state.db, obj._state.db)) + fk_val = self._get_fk_val(obj, target_field_name) + if fk_val is None: + raise ValueError('Cannot add "%r": the value for field "%s" is None' % + (obj, target_field_name)) + new_ids.add(self._get_fk_val(obj, target_field_name)) + elif isinstance(obj, Model): + raise TypeError("'%s' instance expected, got %r" % (self.model._meta.object_name, obj)) + else: + new_ids.add(obj) + db = router.db_for_write(self.through, instance=self.instance) + vals = self.through._default_manager.using(db).values_list(target_field_name, flat=True) + vals = vals.filter(**{ + source_field_name: self._fk_val, + '%s__in' % target_field_name: new_ids, + }) + new_ids = new_ids - set(vals) + + if self.reverse or source_field_name == self.source_field_name: + # Don't send the signal when we are inserting the + # duplicate data row for symmetrical reverse entries. + signals.m2m_changed.send(sender=self.through, action='pre_add', + instance=self.instance, reverse=self.reverse, + model=self.model, pk_set=new_ids, using=db) + # Add the ones that aren't there already + self.through._default_manager.using(db).bulk_create([ + self.through(**{ + '%s_id' % source_field_name: self._fk_val, + '%s_id' % target_field_name: obj_id, + }) + for obj_id in new_ids + ]) + + if self.reverse or source_field_name == self.source_field_name: + # Don't send the signal when we are inserting the + # duplicate data row for symmetrical reverse entries. + signals.m2m_changed.send(sender=self.through, action='post_add', + instance=self.instance, reverse=self.reverse, + model=self.model, pk_set=new_ids, using=db) + + def _remove_items(self, source_field_name, target_field_name, *objs): + # source_field_name: the PK colname in join table for the source object + # target_field_name: the PK colname in join table for the target object + # *objs - objects to remove + + # If there aren't any objects, there is nothing to do. + if objs: + # Check that all the objects are of the right type + old_ids = set() + for obj in objs: + if isinstance(obj, self.model): + old_ids.add(self._get_fk_val(obj, target_field_name)) + else: + old_ids.add(obj) + # Work out what DB we're operating on + db = router.db_for_write(self.through, instance=self.instance) + # Send a signal to the other end if need be. + if self.reverse or source_field_name == self.source_field_name: + # Don't send the signal when we are deleting the + # duplicate data row for symmetrical reverse entries. + signals.m2m_changed.send(sender=self.through, action="pre_remove", + instance=self.instance, reverse=self.reverse, + model=self.model, pk_set=old_ids, using=db) + # Remove the specified objects from the join table + self.through._default_manager.using(db).filter(**{ + source_field_name: self._fk_val, + '%s__in' % target_field_name: old_ids + }).delete() + if self.reverse or source_field_name == self.source_field_name: + # Don't send the signal when we are deleting the + # duplicate data row for symmetrical reverse entries. + signals.m2m_changed.send(sender=self.through, action="post_remove", + instance=self.instance, reverse=self.reverse, + model=self.model, pk_set=old_ids, using=db) + + def _clear_items(self, source_field_name): + db = router.db_for_write(self.through, instance=self.instance) + # source_field_name: the PK colname in join table for the source object + if self.reverse or source_field_name == self.source_field_name: + # Don't send the signal when we are clearing the + # duplicate data rows for symmetrical reverse entries. + signals.m2m_changed.send(sender=self.through, action="pre_clear", + instance=self.instance, reverse=self.reverse, + model=self.model, pk_set=None, using=db) + self.through._default_manager.using(db).filter(**{ + source_field_name: self.related_val + }).delete() + if self.reverse or source_field_name == self.source_field_name: + # Don't send the signal when we are clearing the + # duplicate data rows for symmetrical reverse entries. + signals.m2m_changed.send(sender=self.through, action="post_clear", + instance=self.instance, reverse=self.reverse, + model=self.model, pk_set=None, using=db) + + return ManyRelatedManager + + +class ManyRelatedObjectsDescriptor(object): + # This class provides the functionality that makes the related-object + # managers available as attributes on a model class, for fields that have + # multiple "remote" values and have a ManyToManyField pointed at them by + # some other model (rather than having a ManyToManyField themselves). + # In the example "publication.article_set", the article_set attribute is a + # ManyRelatedObjectsDescriptor instance. + def __init__(self, related): + self.related = related # RelatedObject instance + + @cached_property + def related_manager_cls(self): + # Dynamically create a class that subclasses the related + # model's default manager. + return create_many_related_manager( + self.related.model._default_manager.__class__, + self.related.field.rel + ) + + def __get__(self, instance, instance_type=None): + if instance is None: + return self + + rel_model = self.related.model + + manager = self.related_manager_cls( + model=rel_model, + query_field_name=self.related.field.name, + prefetch_cache_name=self.related.field.related_query_name(), + instance=instance, + symmetrical=False, + source_field_name=self.related.field.m2m_reverse_field_name(), + target_field_name=self.related.field.m2m_field_name(), + reverse=True, + through=self.related.field.rel.through, + ) + + return manager + + def __set__(self, instance, value): + if not self.related.field.rel.through._meta.auto_created: + opts = self.related.field.rel.through._meta + raise AttributeError("Cannot set values on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name)) + + manager = self.__get__(instance) + manager.clear() + manager.add(*value) + + +class ReverseManyRelatedObjectsDescriptor(object): + # This class provides the functionality that makes the related-object + # managers available as attributes on a model class, for fields that have + # multiple "remote" values and have a ManyToManyField defined in their + # model (rather than having another model pointed *at* them). + # In the example "article.publications", the publications attribute is a + # ReverseManyRelatedObjectsDescriptor instance. + def __init__(self, m2m_field): + self.field = m2m_field + + @property + def through(self): + # through is provided so that you have easy access to the through + # model (Book.authors.through) for inlines, etc. This is done as + # a property to ensure that the fully resolved value is returned. + return self.field.rel.through + + @cached_property + def related_manager_cls(self): + # Dynamically create a class that subclasses the related model's + # default manager. + return create_many_related_manager( + self.field.rel.to._default_manager.__class__, + self.field.rel + ) + + def __get__(self, instance, instance_type=None): + if instance is None: + return self + + manager = self.related_manager_cls( + model=self.field.rel.to, + query_field_name=self.field.related_query_name(), + prefetch_cache_name=self.field.name, + instance=instance, + symmetrical=self.field.rel.symmetrical, + source_field_name=self.field.m2m_field_name(), + target_field_name=self.field.m2m_reverse_field_name(), + reverse=False, + through=self.field.rel.through, + ) + + return manager + + def __set__(self, instance, value): + if not self.field.rel.through._meta.auto_created: + opts = self.field.rel.through._meta + raise AttributeError("Cannot set values on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name)) + + manager = self.__get__(instance) + # clear() can change expected output of 'value' queryset, we force evaluation + # of queryset before clear; ticket #19816 + value = tuple(value) + manager.clear() + manager.add(*value) + +class ForeignObjectRel(object): + def __init__(self, field, to, related_name=None, limit_choices_to=None, + parent_link=False, on_delete=None, related_query_name=None): + try: + to._meta + except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT + assert isinstance(to, six.string_types), "'to' must be either a model, a model name or the string %r" % RECURSIVE_RELATIONSHIP_CONSTANT + + self.field = field + self.to = to + self.related_name = related_name + self.related_query_name = related_query_name + self.limit_choices_to = {} if limit_choices_to is None else limit_choices_to + self.multiple = True + self.parent_link = parent_link + self.on_delete = on_delete + + def is_hidden(self): + "Should the related object be hidden?" + return self.related_name and self.related_name[-1] == '+' + + def get_joining_columns(self): + return self.field.get_reverse_joining_columns() + + def get_extra_restriction(self, where_class, alias, related_alias): + return self.field.get_extra_restriction(where_class, related_alias, alias) + + def set_field_name(self): + """ + Sets the related field's name, this is not available until later stages + of app loading, so set_field_name is called from + set_attributes_from_rel() + """ + # By default foreign object doesn't relate to any remote field (for + # example custom multicolumn joins currently have no remote field). + self.field_name = None + +class ManyToOneRel(ForeignObjectRel): + def __init__(self, field, to, field_name, related_name=None, limit_choices_to=None, + parent_link=False, on_delete=None, related_query_name=None): + super(ManyToOneRel, self).__init__( + field, to, related_name=related_name, limit_choices_to=limit_choices_to, + parent_link=parent_link, on_delete=on_delete, related_query_name=related_query_name) + self.field_name = field_name + + def get_related_field(self): + """ + Returns the Field in the 'to' object to which this relationship is + tied. + """ + data = self.to._meta.get_field_by_name(self.field_name) + if not data[2]: + raise FieldDoesNotExist("No related field named '%s'" % + self.field_name) + return data[0] + + def set_field_name(self): + self.field_name = self.field_name or self.to._meta.pk.name + + +class OneToOneRel(ManyToOneRel): + def __init__(self, field, to, field_name, related_name=None, limit_choices_to=None, + parent_link=False, on_delete=None, related_query_name=None): + super(OneToOneRel, self).__init__(field, to, field_name, + related_name=related_name, limit_choices_to=limit_choices_to, + parent_link=parent_link, on_delete=on_delete, related_query_name=related_query_name, + ) + self.multiple = False + + +class ManyToManyRel(object): + def __init__(self, to, related_name=None, limit_choices_to=None, + symmetrical=True, through=None, db_constraint=True, related_query_name=None): + if through and not db_constraint: + raise ValueError("Can't supply a through model and db_constraint=False") + self.to = to + self.related_name = related_name + self.related_query_name = related_query_name + if limit_choices_to is None: + limit_choices_to = {} + self.limit_choices_to = limit_choices_to + self.symmetrical = symmetrical + self.multiple = True + self.through = through + self.db_constraint = db_constraint + + def is_hidden(self): + "Should the related object be hidden?" + return self.related_name and self.related_name[-1] == '+' + + def get_related_field(self): + """ + Returns the field in the to' object to which this relationship is tied + (this is always the primary key on the target model). Provided for + symmetry with ManyToOneRel. + """ + return self.to._meta.pk + + +class ForeignObject(RelatedField): + requires_unique_target = True + generate_reverse_relation = True + + def __init__(self, to, from_fields, to_fields, **kwargs): + self.from_fields = from_fields + self.to_fields = to_fields + + if 'rel' not in kwargs: + kwargs['rel'] = ForeignObjectRel( + self, to, + related_name=kwargs.pop('related_name', None), + related_query_name=kwargs.pop('related_query_name', None), + limit_choices_to=kwargs.pop('limit_choices_to', None), + parent_link=kwargs.pop('parent_link', False), + on_delete=kwargs.pop('on_delete', CASCADE), + ) + kwargs['verbose_name'] = kwargs.get('verbose_name', None) + + super(ForeignObject, self).__init__(**kwargs) + + def resolve_related_fields(self): + if len(self.from_fields) < 1 or len(self.from_fields) != len(self.to_fields): + raise ValueError('Foreign Object from and to fields must be the same non-zero length') + related_fields = [] + for index in range(len(self.from_fields)): + from_field_name = self.from_fields[index] + to_field_name = self.to_fields[index] + from_field = (self if from_field_name == 'self' + else self.opts.get_field_by_name(from_field_name)[0]) + to_field = (self.rel.to._meta.pk if to_field_name is None + else self.rel.to._meta.get_field_by_name(to_field_name)[0]) + related_fields.append((from_field, to_field)) + return related_fields + + @property + def related_fields(self): + if not hasattr(self, '_related_fields'): + self._related_fields = self.resolve_related_fields() + return self._related_fields + + @property + def reverse_related_fields(self): + return [(rhs_field, lhs_field) for lhs_field, rhs_field in self.related_fields] + + @property + def local_related_fields(self): + return tuple([lhs_field for lhs_field, rhs_field in self.related_fields]) + + @property + def foreign_related_fields(self): + return tuple([rhs_field for lhs_field, rhs_field in self.related_fields]) + + def get_local_related_value(self, instance): + return self.get_instance_value_for_fields(instance, self.local_related_fields) + + def get_foreign_related_value(self, instance): + return self.get_instance_value_for_fields(instance, self.foreign_related_fields) + + @staticmethod + def get_instance_value_for_fields(instance, fields): + ret = [] + for field in fields: + # Gotcha: in some cases (like fixture loading) a model can have + # different values in parent_ptr_id and parent's id. So, use + # instance.pk (that is, parent_ptr_id) when asked for instance.id. + if field.primary_key: + ret.append(instance.pk) + else: + ret.append(getattr(instance, field.attname)) + return tuple(ret) + + def get_attname_column(self): + attname, column = super(ForeignObject, self).get_attname_column() + return attname, None + + def get_joining_columns(self, reverse_join=False): + source = self.reverse_related_fields if reverse_join else self.related_fields + return tuple([(lhs_field.column, rhs_field.column) for lhs_field, rhs_field in source]) + + def get_reverse_joining_columns(self): + return self.get_joining_columns(reverse_join=True) + + def get_extra_descriptor_filter(self, instance): + """ + Returns an extra filter condition for related object fetching when + user does 'instance.fieldname', that is the extra filter is used in + the descriptor of the field. + + The filter should be either a dict usable in .filter(**kwargs) call or + a Q-object. The condition will be ANDed together with the relation's + joining columns. + + A parallel method is get_extra_restriction() which is used in + JOIN and subquery conditions. + """ + return {} + + def get_extra_restriction(self, where_class, alias, related_alias): + """ + Returns a pair condition used for joining and subquery pushdown. The + condition is something that responds to as_sql(qn, connection) method. + + Note that currently referring both the 'alias' and 'related_alias' + will not work in some conditions, like subquery pushdown. + + A parallel method is get_extra_descriptor_filter() which is used in + instance.fieldname related object fetching. + """ + return None + + def get_path_info(self): + """ + Get path from this field to the related model. + """ + opts = self.rel.to._meta + from_opts = self.model._meta + return [PathInfo(from_opts, opts, self.foreign_related_fields, self, False, True)] + + def get_reverse_path_info(self): + """ + Get path from the related model to this field's model. + """ + opts = self.model._meta + from_opts = self.rel.to._meta + pathinfos = [PathInfo(from_opts, opts, (opts.pk,), self.rel, not self.unique, False)] + return pathinfos + + def get_lookup_constraint(self, constraint_class, alias, targets, sources, lookup_type, + raw_value): + from django.db.models.sql.where import SubqueryConstraint, Constraint, AND, OR + root_constraint = constraint_class() + assert len(targets) == len(sources) + + def get_normalized_value(value): + + from django.db.models import Model + if isinstance(value, Model): + value_list = [] + for source in sources: + # Account for one-to-one relations when sent a different model + while not isinstance(value, source.model) and source.rel: + source = source.rel.to._meta.get_field(source.rel.field_name) + value_list.append(getattr(value, source.attname)) + return tuple(value_list) + elif not isinstance(value, tuple): + return (value,) + return value + + is_multicolumn = len(self.related_fields) > 1 + if (hasattr(raw_value, '_as_sql') or + hasattr(raw_value, 'get_compiler')): + root_constraint.add(SubqueryConstraint(alias, [target.column for target in targets], + [source.name for source in sources], raw_value), + AND) + elif lookup_type == 'isnull': + root_constraint.add( + (Constraint(alias, targets[0].column, targets[0]), lookup_type, raw_value), AND) + elif (lookup_type == 'exact' or (lookup_type in ['gt', 'lt', 'gte', 'lte'] + and not is_multicolumn)): + value = get_normalized_value(raw_value) + for index, source in enumerate(sources): + root_constraint.add( + (Constraint(alias, targets[index].column, sources[index]), lookup_type, + value[index]), AND) + elif lookup_type in ['range', 'in'] and not is_multicolumn: + values = [get_normalized_value(value) for value in raw_value] + value = [val[0] for val in values] + root_constraint.add( + (Constraint(alias, targets[0].column, sources[0]), lookup_type, value), AND) + elif lookup_type == 'in': + values = [get_normalized_value(value) for value in raw_value] + for value in values: + value_constraint = constraint_class() + for index, target in enumerate(targets): + value_constraint.add( + (Constraint(alias, target.column, sources[index]), 'exact', value[index]), + AND) + root_constraint.add(value_constraint, OR) + else: + raise TypeError('Related Field got invalid lookup: %s' % lookup_type) + return root_constraint + + @property + def attnames(self): + return tuple([field.attname for field in self.local_related_fields]) + + def get_defaults(self): + return tuple([field.get_default() for field in self.local_related_fields]) + + def contribute_to_class(self, cls, name, virtual_only=False): + super(ForeignObject, self).contribute_to_class(cls, name, virtual_only=virtual_only) + setattr(cls, self.name, ReverseSingleRelatedObjectDescriptor(self)) + + def contribute_to_related_class(self, cls, related): + # Internal FK's - i.e., those with a related name ending with '+' - + # and swapped models don't get a related descriptor. + if not self.rel.is_hidden() and not related.model._meta.swapped: + setattr(cls, related.get_accessor_name(), ForeignRelatedObjectsDescriptor(related)) + if self.rel.limit_choices_to: + cls._meta.related_fkey_lookups.append(self.rel.limit_choices_to) + + +class ForeignKey(ForeignObject): + empty_strings_allowed = False + default_error_messages = { + 'invalid': _('%(model)s instance with pk %(pk)r does not exist.') + } + description = _("Foreign Key (type determined by related field)") + + def __init__(self, to, to_field=None, rel_class=ManyToOneRel, + db_constraint=True, **kwargs): + try: + to_name = to._meta.object_name.lower() + except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT + assert isinstance(to, six.string_types), "%s(%r) is invalid. First parameter to ForeignKey must be either a model, a model name, or the string %r" % (self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT) + else: + assert not to._meta.abstract, "%s cannot define a relation with abstract class %s" % (self.__class__.__name__, to._meta.object_name) + # For backwards compatibility purposes, we need to *try* and set + # the to_field during FK construction. It won't be guaranteed to + # be correct until contribute_to_class is called. Refs #12190. + to_field = to_field or (to._meta.pk and to._meta.pk.name) + + if 'db_index' not in kwargs: + kwargs['db_index'] = True + + self.db_constraint = db_constraint + + kwargs['rel'] = rel_class( + self, to, to_field, + related_name=kwargs.pop('related_name', None), + related_query_name=kwargs.pop('related_query_name', None), + limit_choices_to=kwargs.pop('limit_choices_to', None), + parent_link=kwargs.pop('parent_link', False), + on_delete=kwargs.pop('on_delete', CASCADE), + ) + super(ForeignKey, self).__init__(to, ['self'], [to_field], **kwargs) + + @property + def related_field(self): + return self.foreign_related_fields[0] + + def get_reverse_path_info(self): + """ + Get path from the related model to this field's model. + """ + opts = self.model._meta + from_opts = self.rel.to._meta + pathinfos = [PathInfo(from_opts, opts, (opts.pk,), self.rel, not self.unique, False)] + return pathinfos + + def validate(self, value, model_instance): + if self.rel.parent_link: + return + super(ForeignKey, self).validate(value, model_instance) + if value is None: + return + + using = router.db_for_read(model_instance.__class__, instance=model_instance) + qs = self.rel.to._default_manager.using(using).filter( + **{self.rel.field_name: value} + ) + qs = qs.complex_filter(self.rel.limit_choices_to) + if not qs.exists(): + raise exceptions.ValidationError( + self.error_messages['invalid'], + code='invalid', + params={'model': self.rel.to._meta.verbose_name, 'pk': value}, + ) + + def get_attname(self): + return '%s_id' % self.name + + def get_attname_column(self): + attname = self.get_attname() + column = self.db_column or attname + return attname, column + + def get_validator_unique_lookup_type(self): + return '%s__%s__exact' % (self.name, self.related_field.name) + + def get_default(self): + "Here we check if the default value is an object and return the to_field if so." + field_default = super(ForeignKey, self).get_default() + if isinstance(field_default, self.rel.to): + return getattr(field_default, self.related_field.attname) + return field_default + + def get_db_prep_save(self, value, connection): + if value == '' or value == None: + return None + else: + return self.related_field.get_db_prep_save(value, + connection=connection) + + def value_to_string(self, obj): + if not obj: + # In required many-to-one fields with only one available choice, + # select that one available choice. Note: For SelectFields + # we have to check that the length of choices is *2*, not 1, + # because SelectFields always have an initial "blank" value. + if not self.blank and self.choices: + choice_list = self.get_choices_default() + if len(choice_list) == 2: + return smart_text(choice_list[1][0]) + return super(ForeignKey, self).value_to_string(obj) + + def contribute_to_related_class(self, cls, related): + super(ForeignKey, self).contribute_to_related_class(cls, related) + if self.rel.field_name is None: + self.rel.field_name = cls._meta.pk.name + + def formfield(self, **kwargs): + db = kwargs.pop('using', None) + if isinstance(self.rel.to, six.string_types): + raise ValueError("Cannot create form field for %r yet, because " + "its related model %r has not been loaded yet" % + (self.name, self.rel.to)) + defaults = { + 'form_class': forms.ModelChoiceField, + 'queryset': self.rel.to._default_manager.using(db).complex_filter(self.rel.limit_choices_to), + 'to_field_name': self.rel.field_name, + } + defaults.update(kwargs) + return super(ForeignKey, self).formfield(**defaults) + + def db_type(self, connection): + # The database column type of a ForeignKey is the column type + # of the field to which it points. An exception is if the ForeignKey + # points to an AutoField/PositiveIntegerField/PositiveSmallIntegerField, + # in which case the column type is simply that of an IntegerField. + # If the database needs similar types for key fields however, the only + # thing we can do is making AutoField an IntegerField. + rel_field = self.related_field + if (isinstance(rel_field, AutoField) or + (not connection.features.related_fields_match_type and + isinstance(rel_field, (PositiveIntegerField, + PositiveSmallIntegerField)))): + return IntegerField().db_type(connection=connection) + return rel_field.db_type(connection=connection) + + +class OneToOneField(ForeignKey): + """ + A OneToOneField is essentially the same as a ForeignKey, with the exception + that always carries a "unique" constraint with it and the reverse relation + always returns the object pointed to (since there will only ever be one), + rather than returning a list. + """ + description = _("One-to-one relationship") + + def __init__(self, to, to_field=None, **kwargs): + kwargs['unique'] = True + super(OneToOneField, self).__init__(to, to_field, OneToOneRel, **kwargs) + + def contribute_to_related_class(self, cls, related): + setattr(cls, related.get_accessor_name(), + SingleRelatedObjectDescriptor(related)) + + def formfield(self, **kwargs): + if self.rel.parent_link: + return None + return super(OneToOneField, self).formfield(**kwargs) + + def save_form_data(self, instance, data): + if isinstance(data, self.rel.to): + setattr(instance, self.name, data) + else: + setattr(instance, self.attname, data) + + +def create_many_to_many_intermediary_model(field, klass): + from django.db import models + managed = True + if isinstance(field.rel.to, six.string_types) and field.rel.to != RECURSIVE_RELATIONSHIP_CONSTANT: + to_model = field.rel.to + to = to_model.split('.')[-1] + + def set_managed(field, model, cls): + field.rel.through._meta.managed = model._meta.managed or cls._meta.managed + add_lazy_relation(klass, field, to_model, set_managed) + elif isinstance(field.rel.to, six.string_types): + to = klass._meta.object_name + to_model = klass + managed = klass._meta.managed + else: + to = field.rel.to._meta.object_name + to_model = field.rel.to + managed = klass._meta.managed or to_model._meta.managed + name = '%s_%s' % (klass._meta.object_name, field.name) + if field.rel.to == RECURSIVE_RELATIONSHIP_CONSTANT or to == klass._meta.object_name: + from_ = 'from_%s' % to.lower() + to = 'to_%s' % to.lower() + else: + from_ = klass._meta.model_name + to = to.lower() + meta = type('Meta', (object,), { + 'db_table': field._get_m2m_db_table(klass._meta), + 'managed': managed, + 'auto_created': klass, + 'app_label': klass._meta.app_label, + 'db_tablespace': klass._meta.db_tablespace, + 'unique_together': (from_, to), + 'verbose_name': '%(from)s-%(to)s relationship' % {'from': from_, 'to': to}, + 'verbose_name_plural': '%(from)s-%(to)s relationships' % {'from': from_, 'to': to}, + }) + # Construct and return the new class. + return type(str(name), (models.Model,), { + 'Meta': meta, + '__module__': klass.__module__, + from_: models.ForeignKey(klass, related_name='%s+' % name, db_tablespace=field.db_tablespace, db_constraint=field.rel.db_constraint), + to: models.ForeignKey(to_model, related_name='%s+' % name, db_tablespace=field.db_tablespace, db_constraint=field.rel.db_constraint) + }) + + +class ManyToManyField(RelatedField): + description = _("Many-to-many relationship") + + def __init__(self, to, db_constraint=True, **kwargs): + try: + assert not to._meta.abstract, "%s cannot define a relation with abstract class %s" % (self.__class__.__name__, to._meta.object_name) + except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT + assert isinstance(to, six.string_types), "%s(%r) is invalid. First parameter to ManyToManyField must be either a model, a model name, or the string %r" % (self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT) + # Python 2.6 and earlier require dictionary keys to be of str type, + # not unicode and class names must be ASCII (in Python 2.x), so we + # forcibly coerce it here (breaks early if there's a problem). + to = str(to) + + kwargs['verbose_name'] = kwargs.get('verbose_name', None) + kwargs['rel'] = ManyToManyRel(to, + related_name=kwargs.pop('related_name', None), + related_query_name=kwargs.pop('related_query_name', None), + limit_choices_to=kwargs.pop('limit_choices_to', None), + symmetrical=kwargs.pop('symmetrical', to == RECURSIVE_RELATIONSHIP_CONSTANT), + through=kwargs.pop('through', None), + db_constraint=db_constraint, + ) + + self.db_table = kwargs.pop('db_table', None) + if kwargs['rel'].through is not None: + assert self.db_table is None, "Cannot specify a db_table if an intermediary model is used." + + super(ManyToManyField, self).__init__(**kwargs) + + def _get_path_info(self, direct=False): + """ + Called by both direct an indirect m2m traversal. + """ + pathinfos = [] + int_model = self.rel.through + linkfield1 = int_model._meta.get_field_by_name(self.m2m_field_name())[0] + linkfield2 = int_model._meta.get_field_by_name(self.m2m_reverse_field_name())[0] + if direct: + join1infos = linkfield1.get_reverse_path_info() + join2infos = linkfield2.get_path_info() + else: + join1infos = linkfield2.get_reverse_path_info() + join2infos = linkfield1.get_path_info() + pathinfos.extend(join1infos) + pathinfos.extend(join2infos) + return pathinfos + + def get_path_info(self): + return self._get_path_info(direct=True) + + def get_reverse_path_info(self): + return self._get_path_info(direct=False) + + def get_choices_default(self): + return Field.get_choices(self, include_blank=False) + + def _get_m2m_db_table(self, opts): + "Function that can be curried to provide the m2m table name for this relation" + if self.rel.through is not None: + return self.rel.through._meta.db_table + elif self.db_table: + return self.db_table + else: + return util.truncate_name('%s_%s' % (opts.db_table, self.name), + connection.ops.max_name_length()) + + def _get_m2m_attr(self, related, attr): + "Function that can be curried to provide the source accessor or DB column name for the m2m table" + cache_attr = '_m2m_%s_cache' % attr + if hasattr(self, cache_attr): + return getattr(self, cache_attr) + for f in self.rel.through._meta.fields: + if hasattr(f, 'rel') and f.rel and f.rel.to == related.model: + setattr(self, cache_attr, getattr(f, attr)) + return getattr(self, cache_attr) + + def _get_m2m_reverse_attr(self, related, attr): + "Function that can be curried to provide the related accessor or DB column name for the m2m table" + cache_attr = '_m2m_reverse_%s_cache' % attr + if hasattr(self, cache_attr): + return getattr(self, cache_attr) + found = False + for f in self.rel.through._meta.fields: + if hasattr(f, 'rel') and f.rel and f.rel.to == related.parent_model: + if related.model == related.parent_model: + # If this is an m2m-intermediate to self, + # the first foreign key you find will be + # the source column. Keep searching for + # the second foreign key. + if found: + setattr(self, cache_attr, getattr(f, attr)) + break + else: + found = True + else: + setattr(self, cache_attr, getattr(f, attr)) + break + return getattr(self, cache_attr) + + def value_to_string(self, obj): + data = '' + if obj: + qs = getattr(obj, self.name).all() + data = [instance._get_pk_val() for instance in qs] + else: + # In required many-to-many fields with only one available choice, + # select that one available choice. + if not self.blank: + choices_list = self.get_choices_default() + if len(choices_list) == 1: + data = [choices_list[0][0]] + return smart_text(data) + + def contribute_to_class(self, cls, name): + # To support multiple relations to self, it's useful to have a non-None + # related name on symmetrical relations for internal reasons. The + # concept doesn't make a lot of sense externally ("you want me to + # specify *what* on my non-reversible relation?!"), so we set it up + # automatically. The funky name reduces the chance of an accidental + # clash. + if self.rel.symmetrical and (self.rel.to == "self" or self.rel.to == cls._meta.object_name): + self.rel.related_name = "%s_rel_+" % name + + super(ManyToManyField, self).contribute_to_class(cls, name) + + # The intermediate m2m model is not auto created if: + # 1) There is a manually specified intermediate, or + # 2) The class owning the m2m field is abstract. + # 3) The class owning the m2m field has been swapped out. + if not self.rel.through and not cls._meta.abstract and not cls._meta.swapped: + self.rel.through = create_many_to_many_intermediary_model(self, cls) + + # Add the descriptor for the m2m relation + setattr(cls, self.name, ReverseManyRelatedObjectsDescriptor(self)) + + # Set up the accessor for the m2m table name for the relation + self.m2m_db_table = curry(self._get_m2m_db_table, cls._meta) + + # Populate some necessary rel arguments so that cross-app relations + # work correctly. + if isinstance(self.rel.through, six.string_types): + def resolve_through_model(field, model, cls): + field.rel.through = model + add_lazy_relation(cls, self, self.rel.through, resolve_through_model) + + def contribute_to_related_class(self, cls, related): + # Internal M2Ms (i.e., those with a related name ending with '+') + # and swapped models don't get a related descriptor. + if not self.rel.is_hidden() and not related.model._meta.swapped: + setattr(cls, related.get_accessor_name(), ManyRelatedObjectsDescriptor(related)) + + # Set up the accessors for the column names on the m2m table + self.m2m_column_name = curry(self._get_m2m_attr, related, 'column') + self.m2m_reverse_name = curry(self._get_m2m_reverse_attr, related, 'column') + + self.m2m_field_name = curry(self._get_m2m_attr, related, 'name') + self.m2m_reverse_field_name = curry(self._get_m2m_reverse_attr, related, 'name') + + get_m2m_rel = curry(self._get_m2m_attr, related, 'rel') + self.m2m_target_field_name = lambda: get_m2m_rel().field_name + get_m2m_reverse_rel = curry(self._get_m2m_reverse_attr, related, 'rel') + self.m2m_reverse_target_field_name = lambda: get_m2m_reverse_rel().field_name + + def set_attributes_from_rel(self): + pass + + def value_from_object(self, obj): + "Returns the value of this field in the given model instance." + return getattr(obj, self.attname).all() + + def save_form_data(self, instance, data): + setattr(instance, self.attname, data) + + def formfield(self, **kwargs): + db = kwargs.pop('using', None) + defaults = { + 'form_class': forms.ModelMultipleChoiceField, + 'queryset': self.rel.to._default_manager.using(db).complex_filter(self.rel.limit_choices_to) + } + defaults.update(kwargs) + # If initial is passed in, it's a list of related objects, but the + # MultipleChoiceField takes a list of IDs. + if defaults.get('initial') is not None: + initial = defaults['initial'] + if callable(initial): + initial = initial() + defaults['initial'] = [i._get_pk_val() for i in initial] + return super(ManyToManyField, self).formfield(**defaults) diff --git a/lib/python2.7/site-packages/django/db/models/fields/subclassing.py b/lib/python2.7/site-packages/django/db/models/fields/subclassing.py new file mode 100644 index 0000000..e6153ae --- /dev/null +++ b/lib/python2.7/site-packages/django/db/models/fields/subclassing.py @@ -0,0 +1,53 @@ +""" +Convenience routines for creating non-trivial Field subclasses, as well as +backwards compatibility utilities. + +Add SubfieldBase as the metaclass for your Field subclass, implement +to_python() and the other necessary methods and everything will work +seamlessly. +""" + +class SubfieldBase(type): + """ + A metaclass for custom Field subclasses. This ensures the model's attribute + has the descriptor protocol attached to it. + """ + def __new__(cls, name, bases, attrs): + new_class = super(SubfieldBase, cls).__new__(cls, name, bases, attrs) + new_class.contribute_to_class = make_contrib( + new_class, attrs.get('contribute_to_class') + ) + return new_class + +class Creator(object): + """ + A placeholder class that provides a way to set the attribute on the model. + """ + def __init__(self, field): + self.field = field + + def __get__(self, obj, type=None): + if obj is None: + raise AttributeError('Can only be accessed via an instance.') + return obj.__dict__[self.field.name] + + def __set__(self, obj, value): + obj.__dict__[self.field.name] = self.field.to_python(value) + +def make_contrib(superclass, func=None): + """ + Returns a suitable contribute_to_class() method for the Field subclass. + + If 'func' is passed in, it is the existing contribute_to_class() method on + the subclass and it is called before anything else. It is assumed in this + case that the existing contribute_to_class() calls all the necessary + superclass methods. + """ + def contribute_to_class(self, cls, name): + if func: + func(self, cls, name) + else: + super(superclass, self).contribute_to_class(cls, name) + setattr(cls, self.name, Creator(self)) + + return contribute_to_class diff --git a/lib/python2.7/site-packages/django/db/models/loading.py b/lib/python2.7/site-packages/django/db/models/loading.py new file mode 100644 index 0000000..bb87728 --- /dev/null +++ b/lib/python2.7/site-packages/django/db/models/loading.py @@ -0,0 +1,327 @@ +"Utilities for loading models and the modules that contain them." + +from django.conf import settings +from django.core.exceptions import ImproperlyConfigured +from django.utils.datastructures import SortedDict +from django.utils.importlib import import_module +from django.utils.module_loading import module_has_submodule +from django.utils._os import upath +from django.utils import six + +import imp +import sys +import os + +__all__ = ('get_apps', 'get_app', 'get_models', 'get_model', 'register_models', + 'load_app', 'app_cache_ready') + +class UnavailableApp(Exception): + pass + +class AppCache(object): + """ + A cache that stores installed applications and their models. Used to + provide reverse-relations and for app introspection (e.g. admin). + """ + # Use the Borg pattern to share state between all instances. Details at + # http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66531. + __shared_state = dict( + # Keys of app_store are the model modules for each application. + app_store=SortedDict(), + + # Mapping of installed app_labels to model modules for that app. + app_labels={}, + + # Mapping of app_labels to a dictionary of model names to model code. + # May contain apps that are not installed. + app_models=SortedDict(), + + # Mapping of app_labels to errors raised when trying to import the app. + app_errors={}, + + # -- Everything below here is only used when populating the cache -- + loaded=False, + handled=set(), + postponed=[], + nesting_level=0, + _get_models_cache={}, + available_apps=None, + ) + + def __init__(self): + self.__dict__ = self.__shared_state + + def _populate(self): + """ + Fill in all the cache information. This method is threadsafe, in the + sense that every caller will see the same state upon return, and if the + cache is already initialised, it does no work. + """ + if self.loaded: + return + # Note that we want to use the import lock here - the app loading is + # in many cases initiated implicitly by importing, and thus it is + # possible to end up in deadlock when one thread initiates loading + # without holding the importer lock and another thread then tries to + # import something which also launches the app loading. For details of + # this situation see #18251. + imp.acquire_lock() + try: + if self.loaded: + return + for app_name in settings.INSTALLED_APPS: + if app_name in self.handled: + continue + self.load_app(app_name, True) + if not self.nesting_level: + for app_name in self.postponed: + self.load_app(app_name) + self.loaded = True + finally: + imp.release_lock() + + def _label_for(self, app_mod): + """ + Return app_label for given models module. + + """ + return app_mod.__name__.split('.')[-2] + + def load_app(self, app_name, can_postpone=False): + """ + Loads the app with the provided fully qualified name, and returns the + model module. + """ + self.handled.add(app_name) + self.nesting_level += 1 + app_module = import_module(app_name) + try: + models = import_module('%s.models' % app_name) + except ImportError: + self.nesting_level -= 1 + # If the app doesn't have a models module, we can just ignore the + # ImportError and return no models for it. + if not module_has_submodule(app_module, 'models'): + return None + # But if the app does have a models module, we need to figure out + # whether to suppress or propagate the error. If can_postpone is + # True then it may be that the package is still being imported by + # Python and the models module isn't available yet. So we add the + # app to the postponed list and we'll try it again after all the + # recursion has finished (in populate). If can_postpone is False + # then it's time to raise the ImportError. + else: + if can_postpone: + self.postponed.append(app_name) + return None + else: + raise + + self.nesting_level -= 1 + if models not in self.app_store: + self.app_store[models] = len(self.app_store) + self.app_labels[self._label_for(models)] = models + return models + + def app_cache_ready(self): + """ + Returns true if the model cache is fully populated. + + Useful for code that wants to cache the results of get_models() for + themselves once it is safe to do so. + """ + return self.loaded + + def get_apps(self): + """ + Returns a list of all installed modules that contain models. + """ + self._populate() + + apps = self.app_store.items() + if self.available_apps is not None: + apps = [elt for elt in apps + if self._label_for(elt[0]) in self.available_apps] + + # Ensure the returned list is always in the same order (with new apps + # added at the end). This avoids unstable ordering on the admin app + # list page, for example. + apps = sorted(apps, key=lambda elt: elt[1]) + + return [elt[0] for elt in apps] + + def get_app_paths(self): + """ + Returns a list of paths to all installed apps. + + Useful for discovering files at conventional locations inside apps + (static files, templates, etc.) + """ + self._populate() + + app_paths = [] + for app in self.get_apps(): + if hasattr(app, '__path__'): # models/__init__.py package + app_paths.extend([upath(path) for path in app.__path__]) + else: # models.py module + app_paths.append(upath(app.__file__)) + return app_paths + + def get_app(self, app_label, emptyOK=False): + """ + Returns the module containing the models for the given app_label. + + Returns None if the app has no models in it and emptyOK is True. + + Raises UnavailableApp when set_available_apps() in in effect and + doesn't include app_label. + """ + self._populate() + imp.acquire_lock() + try: + for app_name in settings.INSTALLED_APPS: + if app_label == app_name.split('.')[-1]: + mod = self.load_app(app_name, False) + if mod is None and not emptyOK: + raise ImproperlyConfigured("App with label %s is missing a models.py module." % app_label) + if self.available_apps is not None and app_label not in self.available_apps: + raise UnavailableApp("App with label %s isn't available." % app_label) + return mod + raise ImproperlyConfigured("App with label %s could not be found" % app_label) + finally: + imp.release_lock() + + def get_app_errors(self): + "Returns the map of known problems with the INSTALLED_APPS." + self._populate() + return self.app_errors + + def get_models(self, app_mod=None, + include_auto_created=False, include_deferred=False, + only_installed=True, include_swapped=False): + """ + Given a module containing models, returns a list of the models. + Otherwise returns a list of all installed models. + + By default, auto-created models (i.e., m2m models without an + explicit intermediate table) are not included. However, if you + specify include_auto_created=True, they will be. + + By default, models created to satisfy deferred attribute + queries are *not* included in the list of models. However, if + you specify include_deferred, they will be. + + By default, models that aren't part of installed apps will *not* + be included in the list of models. However, if you specify + only_installed=False, they will be. + + By default, models that have been swapped out will *not* be + included in the list of models. However, if you specify + include_swapped, they will be. + """ + cache_key = (app_mod, include_auto_created, include_deferred, only_installed, include_swapped) + model_list = None + try: + model_list = self._get_models_cache[cache_key] + if self.available_apps is not None and only_installed: + model_list = [m for m in model_list + if m._meta.app_label in self.available_apps] + return model_list + except KeyError: + pass + self._populate() + if app_mod: + if app_mod in self.app_store: + app_list = [self.app_models.get(self._label_for(app_mod), + SortedDict())] + else: + app_list = [] + else: + if only_installed: + app_list = [self.app_models.get(app_label, SortedDict()) + for app_label in six.iterkeys(self.app_labels)] + else: + app_list = six.itervalues(self.app_models) + model_list = [] + for app in app_list: + model_list.extend( + model for model in app.values() + if ((not model._deferred or include_deferred) and + (not model._meta.auto_created or include_auto_created) and + (not model._meta.swapped or include_swapped)) + ) + self._get_models_cache[cache_key] = model_list + if self.available_apps is not None and only_installed: + model_list = [m for m in model_list + if m._meta.app_label in self.available_apps] + return model_list + + def get_model(self, app_label, model_name, + seed_cache=True, only_installed=True): + """ + Returns the model matching the given app_label and case-insensitive + model_name. + + Returns None if no model is found. + + Raises UnavailableApp when set_available_apps() in in effect and + doesn't include app_label. + """ + if seed_cache: + self._populate() + if only_installed and app_label not in self.app_labels: + return None + if (self.available_apps is not None and only_installed + and app_label not in self.available_apps): + raise UnavailableApp("App with label %s isn't available." % app_label) + try: + return self.app_models[app_label][model_name.lower()] + except KeyError: + return None + + def register_models(self, app_label, *models): + """ + Register a set of models as belonging to an app. + """ + for model in models: + # Store as 'name: model' pair in a dictionary + # in the app_models dictionary + model_name = model._meta.model_name + model_dict = self.app_models.setdefault(app_label, SortedDict()) + if model_name in model_dict: + # The same model may be imported via different paths (e.g. + # appname.models and project.appname.models). We use the source + # filename as a means to detect identity. + fname1 = os.path.abspath(upath(sys.modules[model.__module__].__file__)) + fname2 = os.path.abspath(upath(sys.modules[model_dict[model_name].__module__].__file__)) + # Since the filename extension could be .py the first time and + # .pyc or .pyo the second time, ignore the extension when + # comparing. + if os.path.splitext(fname1)[0] == os.path.splitext(fname2)[0]: + continue + model_dict[model_name] = model + self._get_models_cache.clear() + + def set_available_apps(self, available): + if not set(available).issubset(set(settings.INSTALLED_APPS)): + extra = set(available) - set(settings.INSTALLED_APPS) + raise ValueError("Available apps isn't a subset of installed " + "apps, extra apps: " + ", ".join(extra)) + self.available_apps = set(app.rsplit('.', 1)[-1] for app in available) + + def unset_available_apps(self): + self.available_apps = None + +cache = AppCache() + +# These methods were always module level, so are kept that way for backwards +# compatibility. +get_apps = cache.get_apps +get_app_paths = cache.get_app_paths +get_app = cache.get_app +get_app_errors = cache.get_app_errors +get_models = cache.get_models +get_model = cache.get_model +register_models = cache.register_models +load_app = cache.load_app +app_cache_ready = cache.app_cache_ready diff --git a/lib/python2.7/site-packages/django/db/models/manager.py b/lib/python2.7/site-packages/django/db/models/manager.py new file mode 100644 index 0000000..a1aa79f --- /dev/null +++ b/lib/python2.7/site-packages/django/db/models/manager.py @@ -0,0 +1,283 @@ +import copy +from django.db import router +from django.db.models.query import QuerySet, insert_query, RawQuerySet +from django.db.models import signals +from django.db.models.fields import FieldDoesNotExist +from django.utils import six +from django.utils.deprecation import RenameMethodsBase + +def ensure_default_manager(sender, **kwargs): + """ + Ensures that a Model subclass contains a default manager and sets the + _default_manager attribute on the class. Also sets up the _base_manager + points to a plain Manager instance (which could be the same as + _default_manager if it's not a subclass of Manager). + """ + cls = sender + if cls._meta.abstract: + setattr(cls, 'objects', AbstractManagerDescriptor(cls)) + return + elif cls._meta.swapped: + setattr(cls, 'objects', SwappedManagerDescriptor(cls)) + return + if not getattr(cls, '_default_manager', None): + # Create the default manager, if needed. + try: + cls._meta.get_field('objects') + raise ValueError("Model %s must specify a custom Manager, because it has a field named 'objects'" % cls.__name__) + except FieldDoesNotExist: + pass + cls.add_to_class('objects', Manager()) + cls._base_manager = cls.objects + elif not getattr(cls, '_base_manager', None): + default_mgr = cls._default_manager.__class__ + if (default_mgr is Manager or + getattr(default_mgr, "use_for_related_fields", False)): + cls._base_manager = cls._default_manager + else: + # Default manager isn't a plain Manager class, or a suitable + # replacement, so we walk up the base class hierarchy until we hit + # something appropriate. + for base_class in default_mgr.mro()[1:]: + if (base_class is Manager or + getattr(base_class, "use_for_related_fields", False)): + cls.add_to_class('_base_manager', base_class()) + return + raise AssertionError("Should never get here. Please report a bug, including your model and model manager setup.") + +signals.class_prepared.connect(ensure_default_manager) + + +class RenameManagerMethods(RenameMethodsBase): + renamed_methods = ( + ('get_query_set', 'get_queryset', PendingDeprecationWarning), + ('get_prefetch_query_set', 'get_prefetch_queryset', PendingDeprecationWarning), + ) + + +class Manager(six.with_metaclass(RenameManagerMethods)): + # Tracks each time a Manager instance is created. Used to retain order. + creation_counter = 0 + + def __init__(self): + super(Manager, self).__init__() + self._set_creation_counter() + self.model = None + self._inherited = False + self._db = None + + def contribute_to_class(self, model, name): + # TODO: Use weakref because of possible memory leak / circular reference. + self.model = model + # Only contribute the manager if the model is concrete + if model._meta.abstract: + setattr(model, name, AbstractManagerDescriptor(model)) + elif model._meta.swapped: + setattr(model, name, SwappedManagerDescriptor(model)) + else: + # if not model._meta.abstract and not model._meta.swapped: + setattr(model, name, ManagerDescriptor(self)) + if not getattr(model, '_default_manager', None) or self.creation_counter < model._default_manager.creation_counter: + model._default_manager = self + if model._meta.abstract or (self._inherited and not self.model._meta.proxy): + model._meta.abstract_managers.append((self.creation_counter, name, + self)) + else: + model._meta.concrete_managers.append((self.creation_counter, name, + self)) + + def _set_creation_counter(self): + """ + Sets the creation counter value for this instance and increments the + class-level copy. + """ + self.creation_counter = Manager.creation_counter + Manager.creation_counter += 1 + + def _copy_to_model(self, model): + """ + Makes a copy of the manager and assigns it to 'model', which should be + a child of the existing model (used when inheriting a manager from an + abstract base class). + """ + assert issubclass(model, self.model) + mgr = copy.copy(self) + mgr._set_creation_counter() + mgr.model = model + mgr._inherited = True + return mgr + + def db_manager(self, using): + obj = copy.copy(self) + obj._db = using + return obj + + @property + def db(self): + return self._db or router.db_for_read(self.model) + + ####################### + # PROXIES TO QUERYSET # + ####################### + + def get_queryset(self): + """Returns a new QuerySet object. Subclasses can override this method + to easily customize the behavior of the Manager. + """ + return QuerySet(self.model, using=self._db) + + def none(self): + return self.get_queryset().none() + + def all(self): + return self.get_queryset() + + def count(self): + return self.get_queryset().count() + + def dates(self, *args, **kwargs): + return self.get_queryset().dates(*args, **kwargs) + + def datetimes(self, *args, **kwargs): + return self.get_queryset().datetimes(*args, **kwargs) + + def distinct(self, *args, **kwargs): + return self.get_queryset().distinct(*args, **kwargs) + + def extra(self, *args, **kwargs): + return self.get_queryset().extra(*args, **kwargs) + + def get(self, *args, **kwargs): + return self.get_queryset().get(*args, **kwargs) + + def get_or_create(self, **kwargs): + return self.get_queryset().get_or_create(**kwargs) + + def create(self, **kwargs): + return self.get_queryset().create(**kwargs) + + def bulk_create(self, *args, **kwargs): + return self.get_queryset().bulk_create(*args, **kwargs) + + def filter(self, *args, **kwargs): + return self.get_queryset().filter(*args, **kwargs) + + def aggregate(self, *args, **kwargs): + return self.get_queryset().aggregate(*args, **kwargs) + + def annotate(self, *args, **kwargs): + return self.get_queryset().annotate(*args, **kwargs) + + def complex_filter(self, *args, **kwargs): + return self.get_queryset().complex_filter(*args, **kwargs) + + def exclude(self, *args, **kwargs): + return self.get_queryset().exclude(*args, **kwargs) + + def in_bulk(self, *args, **kwargs): + return self.get_queryset().in_bulk(*args, **kwargs) + + def iterator(self, *args, **kwargs): + return self.get_queryset().iterator(*args, **kwargs) + + def earliest(self, *args, **kwargs): + return self.get_queryset().earliest(*args, **kwargs) + + def latest(self, *args, **kwargs): + return self.get_queryset().latest(*args, **kwargs) + + def first(self): + return self.get_queryset().first() + + def last(self): + return self.get_queryset().last() + + def order_by(self, *args, **kwargs): + return self.get_queryset().order_by(*args, **kwargs) + + def select_for_update(self, *args, **kwargs): + return self.get_queryset().select_for_update(*args, **kwargs) + + def select_related(self, *args, **kwargs): + return self.get_queryset().select_related(*args, **kwargs) + + def prefetch_related(self, *args, **kwargs): + return self.get_queryset().prefetch_related(*args, **kwargs) + + def values(self, *args, **kwargs): + return self.get_queryset().values(*args, **kwargs) + + def values_list(self, *args, **kwargs): + return self.get_queryset().values_list(*args, **kwargs) + + def update(self, *args, **kwargs): + return self.get_queryset().update(*args, **kwargs) + + def reverse(self, *args, **kwargs): + return self.get_queryset().reverse(*args, **kwargs) + + def defer(self, *args, **kwargs): + return self.get_queryset().defer(*args, **kwargs) + + def only(self, *args, **kwargs): + return self.get_queryset().only(*args, **kwargs) + + def using(self, *args, **kwargs): + return self.get_queryset().using(*args, **kwargs) + + def exists(self, *args, **kwargs): + return self.get_queryset().exists(*args, **kwargs) + + def _insert(self, objs, fields, **kwargs): + return insert_query(self.model, objs, fields, **kwargs) + + def _update(self, values, **kwargs): + return self.get_queryset()._update(values, **kwargs) + + def raw(self, raw_query, params=None, *args, **kwargs): + return RawQuerySet(raw_query=raw_query, model=self.model, params=params, using=self._db, *args, **kwargs) + + +class ManagerDescriptor(object): + # This class ensures managers aren't accessible via model instances. + # For example, Poll.objects works, but poll_obj.objects raises AttributeError. + def __init__(self, manager): + self.manager = manager + + def __get__(self, instance, type=None): + if instance != None: + raise AttributeError("Manager isn't accessible via %s instances" % type.__name__) + return self.manager + + +class AbstractManagerDescriptor(object): + # This class provides a better error message when you try to access a + # manager on an abstract model. + def __init__(self, model): + self.model = model + + def __get__(self, instance, type=None): + raise AttributeError("Manager isn't available; %s is abstract" % ( + self.model._meta.object_name, + )) + + +class SwappedManagerDescriptor(object): + # This class provides a better error message when you try to access a + # manager on a swapped model. + def __init__(self, model): + self.model = model + + def __get__(self, instance, type=None): + raise AttributeError("Manager isn't available; %s has been swapped for '%s'" % ( + self.model._meta.object_name, self.model._meta.swapped + )) + + +class EmptyManager(Manager): + def __init__(self, model): + super(EmptyManager, self).__init__() + self.model = model + + def get_queryset(self): + return super(EmptyManager, self).get_queryset().none() diff --git a/lib/python2.7/site-packages/django/db/models/options.py b/lib/python2.7/site-packages/django/db/models/options.py new file mode 100644 index 0000000..6ccc67d --- /dev/null +++ b/lib/python2.7/site-packages/django/db/models/options.py @@ -0,0 +1,589 @@ +from __future__ import unicode_literals + +import re +from bisect import bisect +import warnings + +from django.conf import settings +from django.db.models.fields.related import ManyToManyRel +from django.db.models.fields import AutoField, FieldDoesNotExist +from django.db.models.fields.proxy import OrderWrt +from django.db.models.loading import get_models, app_cache_ready +from django.utils import six +from django.utils.functional import cached_property +from django.utils.datastructures import SortedDict +from django.utils.encoding import force_text, smart_text, python_2_unicode_compatible +from django.utils.translation import activate, deactivate_all, get_language, string_concat + +# Calculate the verbose_name by converting from InitialCaps to "lowercase with spaces". +get_verbose_name = lambda class_name: re.sub('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))', ' \\1', class_name).lower().strip() + +DEFAULT_NAMES = ('verbose_name', 'verbose_name_plural', 'db_table', 'ordering', + 'unique_together', 'permissions', 'get_latest_by', + 'order_with_respect_to', 'app_label', 'db_tablespace', + 'abstract', 'managed', 'proxy', 'swappable', 'auto_created', + 'index_together', 'select_on_save') + + +@python_2_unicode_compatible +class Options(object): + def __init__(self, meta, app_label=None): + self.local_fields, self.local_many_to_many = [], [] + self.virtual_fields = [] + self.model_name, self.verbose_name = None, None + self.verbose_name_plural = None + self.db_table = '' + self.ordering = [] + self.unique_together = [] + self.index_together = [] + self.select_on_save = False + self.permissions = [] + self.object_name, self.app_label = None, app_label + self.get_latest_by = None + self.order_with_respect_to = None + self.db_tablespace = settings.DEFAULT_TABLESPACE + self.meta = meta + self.pk = None + self.has_auto_field, self.auto_field = False, None + self.abstract = False + self.managed = True + self.proxy = False + # For any class that is a proxy (including automatically created + # classes for deferred object loading), proxy_for_model tells us + # which class this model is proxying. Note that proxy_for_model + # can create a chain of proxy models. For non-proxy models, the + # variable is always None. + self.proxy_for_model = None + # For any non-abstract class, the concrete class is the model + # in the end of the proxy_for_model chain. In particular, for + # concrete models, the concrete_model is always the class itself. + self.concrete_model = None + self.swappable = None + self.parents = SortedDict() + self.auto_created = False + + # To handle various inheritance situations, we need to track where + # managers came from (concrete or abstract base classes). + self.abstract_managers = [] + self.concrete_managers = [] + + # List of all lookups defined in ForeignKey 'limit_choices_to' options + # from *other* models. Needed for some admin checks. Internal use only. + self.related_fkey_lookups = [] + + def contribute_to_class(self, cls, name): + from django.db import connection + from django.db.backends.util import truncate_name + + cls._meta = self + self.model = cls + self.installed = re.sub('\.models$', '', cls.__module__) in settings.INSTALLED_APPS + # First, construct the default values for these options. + self.object_name = cls.__name__ + self.model_name = self.object_name.lower() + self.verbose_name = get_verbose_name(self.object_name) + + # Next, apply any overridden values from 'class Meta'. + if self.meta: + meta_attrs = self.meta.__dict__.copy() + for name in self.meta.__dict__: + # Ignore any private attributes that Django doesn't care about. + # NOTE: We can't modify a dictionary's contents while looping + # over it, so we loop over the *original* dictionary instead. + if name.startswith('_'): + del meta_attrs[name] + for attr_name in DEFAULT_NAMES: + if attr_name in meta_attrs: + setattr(self, attr_name, meta_attrs.pop(attr_name)) + elif hasattr(self.meta, attr_name): + setattr(self, attr_name, getattr(self.meta, attr_name)) + + # unique_together can be either a tuple of tuples, or a single + # tuple of two strings. Normalize it to a tuple of tuples, so that + # calling code can uniformly expect that. + ut = meta_attrs.pop('unique_together', self.unique_together) + if ut and not isinstance(ut[0], (tuple, list)): + ut = (ut,) + self.unique_together = ut + + # verbose_name_plural is a special case because it uses a 's' + # by default. + if self.verbose_name_plural is None: + self.verbose_name_plural = string_concat(self.verbose_name, 's') + + # Any leftover attributes must be invalid. + if meta_attrs != {}: + raise TypeError("'class Meta' got invalid attribute(s): %s" % ','.join(meta_attrs.keys())) + else: + self.verbose_name_plural = string_concat(self.verbose_name, 's') + del self.meta + + # If the db_table wasn't provided, use the app_label + model_name. + if not self.db_table: + self.db_table = "%s_%s" % (self.app_label, self.model_name) + self.db_table = truncate_name(self.db_table, connection.ops.max_name_length()) + + @property + def module_name(self): + """ + This property has been deprecated in favor of `model_name`. refs #19689 + """ + warnings.warn( + "Options.module_name has been deprecated in favor of model_name", + PendingDeprecationWarning, stacklevel=2) + return self.model_name + + def _prepare(self, model): + if self.order_with_respect_to: + self.order_with_respect_to = self.get_field(self.order_with_respect_to) + self.ordering = ('_order',) + model.add_to_class('_order', OrderWrt()) + else: + self.order_with_respect_to = None + + if self.pk is None: + if self.parents: + # Promote the first parent link in lieu of adding yet another + # field. + field = next(six.itervalues(self.parents)) + # Look for a local field with the same name as the + # first parent link. If a local field has already been + # created, use it instead of promoting the parent + already_created = [fld for fld in self.local_fields if fld.name == field.name] + if already_created: + field = already_created[0] + field.primary_key = True + self.setup_pk(field) + else: + auto = AutoField(verbose_name='ID', primary_key=True, + auto_created=True) + model.add_to_class('id', auto) + + def add_field(self, field): + # Insert the given field in the order in which it was created, using + # the "creation_counter" attribute of the field. + # Move many-to-many related fields from self.fields into + # self.many_to_many. + if field.rel and isinstance(field.rel, ManyToManyRel): + self.local_many_to_many.insert(bisect(self.local_many_to_many, field), field) + if hasattr(self, '_m2m_cache'): + del self._m2m_cache + else: + self.local_fields.insert(bisect(self.local_fields, field), field) + self.setup_pk(field) + if hasattr(self, '_field_cache'): + del self._field_cache + del self._field_name_cache + # The fields, concrete_fields and local_concrete_fields are + # implemented as cached properties for performance reasons. + # The attrs will not exists if the cached property isn't + # accessed yet, hence the try-excepts. + try: + del self.fields + except AttributeError: + pass + try: + del self.concrete_fields + except AttributeError: + pass + try: + del self.local_concrete_fields + except AttributeError: + pass + + if hasattr(self, '_name_map'): + del self._name_map + + def add_virtual_field(self, field): + self.virtual_fields.append(field) + + def setup_pk(self, field): + if not self.pk and field.primary_key: + self.pk = field + field.serialize = False + + def pk_index(self): + """ + Returns the index of the primary key field in the self.concrete_fields + list. + """ + return self.concrete_fields.index(self.pk) + + def setup_proxy(self, target): + """ + Does the internal setup so that the current model is a proxy for + "target". + """ + self.pk = target._meta.pk + self.proxy_for_model = target + self.db_table = target._meta.db_table + + def __repr__(self): + return '<Options for %s>' % self.object_name + + def __str__(self): + return "%s.%s" % (smart_text(self.app_label), smart_text(self.model_name)) + + def verbose_name_raw(self): + """ + There are a few places where the untranslated verbose name is needed + (so that we get the same value regardless of currently active + locale). + """ + lang = get_language() + deactivate_all() + raw = force_text(self.verbose_name) + activate(lang) + return raw + verbose_name_raw = property(verbose_name_raw) + + def _swapped(self): + """ + Has this model been swapped out for another? If so, return the model + name of the replacement; otherwise, return None. + + For historical reasons, model name lookups using get_model() are + case insensitive, so we make sure we are case insensitive here. + """ + if self.swappable: + model_label = '%s.%s' % (self.app_label, self.model_name) + swapped_for = getattr(settings, self.swappable, None) + if swapped_for: + try: + swapped_label, swapped_object = swapped_for.split('.') + except ValueError: + # setting not in the format app_label.model_name + # raising ImproperlyConfigured here causes problems with + # test cleanup code - instead it is raised in get_user_model + # or as part of validation. + return swapped_for + + if '%s.%s' % (swapped_label, swapped_object.lower()) not in (None, model_label): + return swapped_for + return None + swapped = property(_swapped) + + @cached_property + def fields(self): + """ + The getter for self.fields. This returns the list of field objects + available to this model (including through parent models). + + Callers are not permitted to modify this list, since it's a reference + to this instance (not a copy). + """ + try: + self._field_name_cache + except AttributeError: + self._fill_fields_cache() + return self._field_name_cache + + @cached_property + def concrete_fields(self): + return [f for f in self.fields if f.column is not None] + + @cached_property + def local_concrete_fields(self): + return [f for f in self.local_fields if f.column is not None] + + def get_fields_with_model(self): + """ + Returns a sequence of (field, model) pairs for all fields. The "model" + element is None for fields on the current model. Mostly of use when + constructing queries so that we know which model a field belongs to. + """ + try: + self._field_cache + except AttributeError: + self._fill_fields_cache() + return self._field_cache + + def get_concrete_fields_with_model(self): + return [(field, model) for field, model in self.get_fields_with_model() if + field.column is not None] + + def _fill_fields_cache(self): + cache = [] + for parent in self.parents: + for field, model in parent._meta.get_fields_with_model(): + if model: + cache.append((field, model)) + else: + cache.append((field, parent)) + cache.extend([(f, None) for f in self.local_fields]) + self._field_cache = tuple(cache) + self._field_name_cache = [x for x, _ in cache] + + def _many_to_many(self): + try: + self._m2m_cache + except AttributeError: + self._fill_m2m_cache() + return list(self._m2m_cache) + many_to_many = property(_many_to_many) + + def get_m2m_with_model(self): + """ + The many-to-many version of get_fields_with_model(). + """ + try: + self._m2m_cache + except AttributeError: + self._fill_m2m_cache() + return list(six.iteritems(self._m2m_cache)) + + def _fill_m2m_cache(self): + cache = SortedDict() + for parent in self.parents: + for field, model in parent._meta.get_m2m_with_model(): + if model: + cache[field] = model + else: + cache[field] = parent + for field in self.local_many_to_many: + cache[field] = None + self._m2m_cache = cache + + def get_field(self, name, many_to_many=True): + """ + Returns the requested field by name. Raises FieldDoesNotExist on error. + """ + to_search = (self.fields + self.many_to_many) if many_to_many else self.fields + for f in to_search: + if f.name == name: + return f + raise FieldDoesNotExist('%s has no field named %r' % (self.object_name, name)) + + def get_field_by_name(self, name): + """ + Returns the (field_object, model, direct, m2m), where field_object is + the Field instance for the given name, model is the model containing + this field (None for local fields), direct is True if the field exists + on this model, and m2m is True for many-to-many relations. When + 'direct' is False, 'field_object' is the corresponding RelatedObject + for this field (since the field doesn't have an instance associated + with it). + + Uses a cache internally, so after the first access, this is very fast. + """ + try: + try: + return self._name_map[name] + except AttributeError: + cache = self.init_name_map() + return cache[name] + except KeyError: + raise FieldDoesNotExist('%s has no field named %r' + % (self.object_name, name)) + + def get_all_field_names(self): + """ + Returns a list of all field names that are possible for this model + (including reverse relation names). This is used for pretty printing + debugging output (a list of choices), so any internal-only field names + are not included. + """ + try: + cache = self._name_map + except AttributeError: + cache = self.init_name_map() + names = sorted(cache.keys()) + # Internal-only names end with "+" (symmetrical m2m related names being + # the main example). Trim them. + return [val for val in names if not val.endswith('+')] + + def init_name_map(self): + """ + Initialises the field name -> field object mapping. + """ + cache = {} + # We intentionally handle related m2m objects first so that symmetrical + # m2m accessor names can be overridden, if necessary. + for f, model in self.get_all_related_m2m_objects_with_model(): + cache[f.field.related_query_name()] = (f, model, False, True) + for f, model in self.get_all_related_objects_with_model(): + cache[f.field.related_query_name()] = (f, model, False, False) + for f, model in self.get_m2m_with_model(): + cache[f.name] = (f, model, True, True) + for f, model in self.get_fields_with_model(): + cache[f.name] = (f, model, True, False) + for f in self.virtual_fields: + if hasattr(f, 'related'): + cache[f.name] = (f.related, None if f.model == self.model else f.model, True, False) + if app_cache_ready(): + self._name_map = cache + return cache + + def get_add_permission(self): + """ + This method has been deprecated in favor of + `django.contrib.auth.get_permission_codename`. refs #20642 + """ + warnings.warn( + "`Options.get_add_permission` has been deprecated in favor " + "of `django.contrib.auth.get_permission_codename`.", + PendingDeprecationWarning, stacklevel=2) + return 'add_%s' % self.model_name + + def get_change_permission(self): + """ + This method has been deprecated in favor of + `django.contrib.auth.get_permission_codename`. refs #20642 + """ + warnings.warn( + "`Options.get_change_permission` has been deprecated in favor " + "of `django.contrib.auth.get_permission_codename`.", + PendingDeprecationWarning, stacklevel=2) + return 'change_%s' % self.model_name + + def get_delete_permission(self): + """ + This method has been deprecated in favor of + `django.contrib.auth.get_permission_codename`. refs #20642 + """ + warnings.warn( + "`Options.get_delete_permission` has been deprecated in favor " + "of `django.contrib.auth.get_permission_codename`.", + PendingDeprecationWarning, stacklevel=2) + return 'delete_%s' % self.model_name + + def get_all_related_objects(self, local_only=False, include_hidden=False, + include_proxy_eq=False): + return [k for k, v in self.get_all_related_objects_with_model( + local_only=local_only, include_hidden=include_hidden, + include_proxy_eq=include_proxy_eq)] + + def get_all_related_objects_with_model(self, local_only=False, + include_hidden=False, + include_proxy_eq=False): + """ + Returns a list of (related-object, model) pairs. Similar to + get_fields_with_model(). + """ + try: + self._related_objects_cache + except AttributeError: + self._fill_related_objects_cache() + predicates = [] + if local_only: + predicates.append(lambda k, v: not v) + if not include_hidden: + predicates.append(lambda k, v: not k.field.rel.is_hidden()) + cache = (self._related_objects_proxy_cache if include_proxy_eq + else self._related_objects_cache) + return [t for t in cache.items() if all(p(*t) for p in predicates)] + + def _fill_related_objects_cache(self): + cache = SortedDict() + parent_list = self.get_parent_list() + for parent in self.parents: + for obj, model in parent._meta.get_all_related_objects_with_model(include_hidden=True): + if (obj.field.creation_counter < 0 or obj.field.rel.parent_link) and obj.model not in parent_list: + continue + if not model: + cache[obj] = parent + else: + cache[obj] = model + # Collect also objects which are in relation to some proxy child/parent of self. + proxy_cache = cache.copy() + for klass in get_models(include_auto_created=True, only_installed=False): + if not klass._meta.swapped: + for f in klass._meta.local_fields: + if f.rel and not isinstance(f.rel.to, six.string_types) and f.generate_reverse_relation: + if self == f.rel.to._meta: + cache[f.related] = None + proxy_cache[f.related] = None + elif self.concrete_model == f.rel.to._meta.concrete_model: + proxy_cache[f.related] = None + self._related_objects_cache = cache + self._related_objects_proxy_cache = proxy_cache + + def get_all_related_many_to_many_objects(self, local_only=False): + try: + cache = self._related_many_to_many_cache + except AttributeError: + cache = self._fill_related_many_to_many_cache() + if local_only: + return [k for k, v in cache.items() if not v] + return list(cache) + + def get_all_related_m2m_objects_with_model(self): + """ + Returns a list of (related-m2m-object, model) pairs. Similar to + get_fields_with_model(). + """ + try: + cache = self._related_many_to_many_cache + except AttributeError: + cache = self._fill_related_many_to_many_cache() + return list(six.iteritems(cache)) + + def _fill_related_many_to_many_cache(self): + cache = SortedDict() + parent_list = self.get_parent_list() + for parent in self.parents: + for obj, model in parent._meta.get_all_related_m2m_objects_with_model(): + if obj.field.creation_counter < 0 and obj.model not in parent_list: + continue + if not model: + cache[obj] = parent + else: + cache[obj] = model + for klass in get_models(only_installed=False): + if not klass._meta.swapped: + for f in klass._meta.local_many_to_many: + if (f.rel + and not isinstance(f.rel.to, six.string_types) + and self == f.rel.to._meta): + cache[f.related] = None + if app_cache_ready(): + self._related_many_to_many_cache = cache + return cache + + def get_base_chain(self, model): + """ + Returns a list of parent classes leading to 'model' (order from closet + to most distant ancestor). This has to handle the case were 'model' is + a granparent or even more distant relation. + """ + if not self.parents: + return None + if model in self.parents: + return [model] + for parent in self.parents: + res = parent._meta.get_base_chain(model) + if res: + res.insert(0, parent) + return res + return None + + def get_parent_list(self): + """ + Returns a list of all the ancestor of this model as a list. Useful for + determining if something is an ancestor, regardless of lineage. + """ + result = set() + for parent in self.parents: + result.add(parent) + result.update(parent._meta.get_parent_list()) + return result + + def get_ancestor_link(self, ancestor): + """ + Returns the field on the current model which points to the given + "ancestor". This is possible an indirect link (a pointer to a parent + model, which points, eventually, to the ancestor). Used when + constructing table joins for model inheritance. + + Returns None if the model isn't an ancestor of this one. + """ + if ancestor in self.parents: + return self.parents[ancestor] + for parent in self.parents: + # Tries to get a link field from the immediate parent + parent_link = parent._meta.get_ancestor_link(ancestor) + if parent_link: + # In case of a proxied model, the first link + # of the chain to the ancestor is that parent + # links + return self.parents[parent] or parent_link diff --git a/lib/python2.7/site-packages/django/db/models/query.py b/lib/python2.7/site-packages/django/db/models/query.py new file mode 100644 index 0000000..44047d4 --- /dev/null +++ b/lib/python2.7/site-packages/django/db/models/query.py @@ -0,0 +1,1731 @@ +""" +The main QuerySet implementation. This provides the public API for the ORM. +""" + +import copy +import itertools +import sys +import warnings + +from django.conf import settings +from django.core import exceptions +from django.db import connections, router, transaction, DatabaseError +from django.db.models.constants import LOOKUP_SEP +from django.db.models.fields import AutoField +from django.db.models.query_utils import (Q, select_related_descend, + deferred_class_factory, InvalidQuery) +from django.db.models.deletion import Collector +from django.db.models import sql +from django.utils.functional import partition +from django.utils import six +from django.utils import timezone + +# The maximum number of items to display in a QuerySet.__repr__ +REPR_OUTPUT_SIZE = 20 + +# Pull into this namespace for backwards compatibility. +EmptyResultSet = sql.EmptyResultSet + + +class QuerySet(object): + """ + Represents a lazy database lookup for a set of objects. + """ + def __init__(self, model=None, query=None, using=None): + self.model = model + self._db = using + self.query = query or sql.Query(self.model) + self._result_cache = None + self._sticky_filter = False + self._for_write = False + self._prefetch_related_lookups = [] + self._prefetch_done = False + self._known_related_objects = {} # {rel_field, {pk: rel_obj}} + + ######################## + # PYTHON MAGIC METHODS # + ######################## + + def __deepcopy__(self, memo): + """ + Deep copy of a QuerySet doesn't populate the cache + """ + obj = self.__class__() + for k, v in self.__dict__.items(): + if k == '_result_cache': + obj.__dict__[k] = None + else: + obj.__dict__[k] = copy.deepcopy(v, memo) + return obj + + def __getstate__(self): + """ + Allows the QuerySet to be pickled. + """ + # Force the cache to be fully populated. + self._fetch_all() + obj_dict = self.__dict__.copy() + return obj_dict + + def __repr__(self): + data = list(self[:REPR_OUTPUT_SIZE + 1]) + if len(data) > REPR_OUTPUT_SIZE: + data[-1] = "...(remaining elements truncated)..." + return repr(data) + + def __len__(self): + self._fetch_all() + return len(self._result_cache) + + def __iter__(self): + """ + The queryset iterator protocol uses three nested iterators in the + default case: + 1. sql.compiler:execute_sql() + - Returns 100 rows at time (constants.GET_ITERATOR_CHUNK_SIZE) + using cursor.fetchmany(). This part is responsible for + doing some column masking, and returning the rows in chunks. + 2. sql/compiler.results_iter() + - Returns one row at time. At this point the rows are still just + tuples. In some cases the return values are converted to + Python values at this location (see resolve_columns(), + resolve_aggregate()). + 3. self.iterator() + - Responsible for turning the rows into model objects. + """ + self._fetch_all() + return iter(self._result_cache) + + def __nonzero__(self): + self._fetch_all() + return bool(self._result_cache) + + def __getitem__(self, k): + """ + Retrieves an item or slice from the set of results. + """ + if not isinstance(k, (slice,) + six.integer_types): + raise TypeError + assert ((not isinstance(k, slice) and (k >= 0)) + or (isinstance(k, slice) and (k.start is None or k.start >= 0) + and (k.stop is None or k.stop >= 0))), \ + "Negative indexing is not supported." + + if self._result_cache is not None: + return self._result_cache[k] + + if isinstance(k, slice): + qs = self._clone() + if k.start is not None: + start = int(k.start) + else: + start = None + if k.stop is not None: + stop = int(k.stop) + else: + stop = None + qs.query.set_limits(start, stop) + return list(qs)[::k.step] if k.step else qs + + qs = self._clone() + qs.query.set_limits(k, k + 1) + return list(qs)[0] + + def __and__(self, other): + self._merge_sanity_check(other) + if isinstance(other, EmptyQuerySet): + return other + if isinstance(self, EmptyQuerySet): + return self + combined = self._clone() + combined._merge_known_related_objects(other) + combined.query.combine(other.query, sql.AND) + return combined + + def __or__(self, other): + self._merge_sanity_check(other) + if isinstance(self, EmptyQuerySet): + return other + if isinstance(other, EmptyQuerySet): + return self + combined = self._clone() + combined._merge_known_related_objects(other) + combined.query.combine(other.query, sql.OR) + return combined + + #################################### + # METHODS THAT DO DATABASE QUERIES # + #################################### + + def iterator(self): + """ + An iterator over the results from applying this QuerySet to the + database. + """ + fill_cache = False + if connections[self.db].features.supports_select_related: + fill_cache = self.query.select_related + if isinstance(fill_cache, dict): + requested = fill_cache + else: + requested = None + max_depth = self.query.max_depth + + extra_select = list(self.query.extra_select) + aggregate_select = list(self.query.aggregate_select) + + only_load = self.query.get_loaded_field_names() + if not fill_cache: + fields = self.model._meta.concrete_fields + + load_fields = [] + # If only/defer clauses have been specified, + # build the list of fields that are to be loaded. + if only_load: + for field, model in self.model._meta.get_concrete_fields_with_model(): + if model is None: + model = self.model + try: + if field.name in only_load[model]: + # Add a field that has been explicitly included + load_fields.append(field.name) + except KeyError: + # Model wasn't explicitly listed in the only_load table + # Therefore, we need to load all fields from this model + load_fields.append(field.name) + + index_start = len(extra_select) + aggregate_start = index_start + len(load_fields or self.model._meta.concrete_fields) + + skip = None + if load_fields and not fill_cache: + # Some fields have been deferred, so we have to initialise + # via keyword arguments. + skip = set() + init_list = [] + for field in fields: + if field.name not in load_fields: + skip.add(field.attname) + else: + init_list.append(field.attname) + model_cls = deferred_class_factory(self.model, skip) + + # Cache db and model outside the loop + db = self.db + model = self.model + compiler = self.query.get_compiler(using=db) + if fill_cache: + klass_info = get_klass_info(model, max_depth=max_depth, + requested=requested, only_load=only_load) + for row in compiler.results_iter(): + if fill_cache: + obj, _ = get_cached_row(row, index_start, db, klass_info, + offset=len(aggregate_select)) + else: + # Omit aggregates in object creation. + row_data = row[index_start:aggregate_start] + if skip: + obj = model_cls(**dict(zip(init_list, row_data))) + else: + obj = model(*row_data) + + # Store the source database of the object + obj._state.db = db + # This object came from the database; it's not being added. + obj._state.adding = False + + if extra_select: + for i, k in enumerate(extra_select): + setattr(obj, k, row[i]) + + # Add the aggregates to the model + if aggregate_select: + for i, aggregate in enumerate(aggregate_select): + setattr(obj, aggregate, row[i + aggregate_start]) + + # Add the known related objects to the model, if there are any + if self._known_related_objects: + for field, rel_objs in self._known_related_objects.items(): + pk = getattr(obj, field.get_attname()) + try: + rel_obj = rel_objs[pk] + except KeyError: + pass # may happen in qs1 | qs2 scenarios + else: + setattr(obj, field.name, rel_obj) + + yield obj + + def aggregate(self, *args, **kwargs): + """ + Returns a dictionary containing the calculations (aggregation) + over the current queryset + + If args is present the expression is passed as a kwarg using + the Aggregate object's default alias. + """ + if self.query.distinct_fields: + raise NotImplementedError("aggregate() + distinct(fields) not implemented.") + for arg in args: + kwargs[arg.default_alias] = arg + + query = self.query.clone() + + for (alias, aggregate_expr) in kwargs.items(): + query.add_aggregate(aggregate_expr, self.model, alias, + is_summary=True) + + return query.get_aggregation(using=self.db) + + def count(self): + """ + Performs a SELECT COUNT() and returns the number of records as an + integer. + + If the QuerySet is already fully cached this simply returns the length + of the cached results set to avoid multiple SELECT COUNT(*) calls. + """ + if self._result_cache is not None: + return len(self._result_cache) + + return self.query.get_count(using=self.db) + + def get(self, *args, **kwargs): + """ + Performs the query and returns a single object matching the given + keyword arguments. + """ + clone = self.filter(*args, **kwargs) + if self.query.can_filter(): + clone = clone.order_by() + num = len(clone) + if num == 1: + return clone._result_cache[0] + if not num: + raise self.model.DoesNotExist( + "%s matching query does not exist." % + self.model._meta.object_name) + raise self.model.MultipleObjectsReturned( + "get() returned more than one %s -- it returned %s!" % + (self.model._meta.object_name, num)) + + def create(self, **kwargs): + """ + Creates a new object with the given kwargs, saving it to the database + and returning the created object. + """ + obj = self.model(**kwargs) + self._for_write = True + obj.save(force_insert=True, using=self.db) + return obj + + def bulk_create(self, objs, batch_size=None): + """ + Inserts each of the instances into the database. This does *not* call + save() on each of the instances, does not send any pre/post save + signals, and does not set the primary key attribute if it is an + autoincrement field. + """ + # So this case is fun. When you bulk insert you don't get the primary + # keys back (if it's an autoincrement), so you can't insert into the + # child tables which references this. There are two workarounds, 1) + # this could be implemented if you didn't have an autoincrement pk, + # and 2) you could do it by doing O(n) normal inserts into the parent + # tables to get the primary keys back, and then doing a single bulk + # insert into the childmost table. Some databases might allow doing + # this by using RETURNING clause for the insert query. We're punting + # on these for now because they are relatively rare cases. + assert batch_size is None or batch_size > 0 + if self.model._meta.parents: + raise ValueError("Can't bulk create an inherited model") + if not objs: + return objs + self._for_write = True + connection = connections[self.db] + fields = self.model._meta.local_concrete_fields + with transaction.commit_on_success_unless_managed(using=self.db): + if (connection.features.can_combine_inserts_with_and_without_auto_increment_pk + and self.model._meta.has_auto_field): + self._batched_insert(objs, fields, batch_size) + else: + objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs) + if objs_with_pk: + self._batched_insert(objs_with_pk, fields, batch_size) + if objs_without_pk: + fields= [f for f in fields if not isinstance(f, AutoField)] + self._batched_insert(objs_without_pk, fields, batch_size) + + return objs + + def get_or_create(self, **kwargs): + """ + Looks up an object with the given kwargs, creating one if necessary. + Returns a tuple of (object, created), where created is a boolean + specifying whether an object was created. + """ + defaults = kwargs.pop('defaults', {}) + lookup = kwargs.copy() + for f in self.model._meta.fields: + if f.attname in lookup: + lookup[f.name] = lookup.pop(f.attname) + try: + self._for_write = True + return self.get(**lookup), False + except self.model.DoesNotExist: + try: + params = dict((k, v) for k, v in kwargs.items() if LOOKUP_SEP not in k) + params.update(defaults) + obj = self.model(**params) + with transaction.atomic(using=self.db): + obj.save(force_insert=True, using=self.db) + return obj, True + except DatabaseError: + exc_info = sys.exc_info() + try: + return self.get(**lookup), False + except self.model.DoesNotExist: + # Re-raise the DatabaseError with its original traceback. + six.reraise(*exc_info) + + def _earliest_or_latest(self, field_name=None, direction="-"): + """ + Returns the latest object, according to the model's + 'get_latest_by' option or optional given field_name. + """ + order_by = field_name or getattr(self.model._meta, 'get_latest_by') + assert bool(order_by), "earliest() and latest() require either a "\ + "field_name parameter or 'get_latest_by' in the model" + assert self.query.can_filter(), \ + "Cannot change a query once a slice has been taken." + obj = self._clone() + obj.query.set_limits(high=1) + obj.query.clear_ordering(force_empty=True) + obj.query.add_ordering('%s%s' % (direction, order_by)) + return obj.get() + + def earliest(self, field_name=None): + return self._earliest_or_latest(field_name=field_name, direction="") + + def latest(self, field_name=None): + return self._earliest_or_latest(field_name=field_name, direction="-") + + def first(self): + """ + Returns the first object of a query, returns None if no match is found. + """ + qs = self if self.ordered else self.order_by('pk') + try: + return qs[0] + except IndexError: + return None + + def last(self): + """ + Returns the last object of a query, returns None if no match is found. + """ + qs = self.reverse() if self.ordered else self.order_by('-pk') + try: + return qs[0] + except IndexError: + return None + + def in_bulk(self, id_list): + """ + Returns a dictionary mapping each of the given IDs to the object with + that ID. + """ + assert self.query.can_filter(), \ + "Cannot use 'limit' or 'offset' with in_bulk" + if not id_list: + return {} + qs = self.filter(pk__in=id_list).order_by() + return dict([(obj._get_pk_val(), obj) for obj in qs]) + + def delete(self): + """ + Deletes the records in the current QuerySet. + """ + assert self.query.can_filter(), \ + "Cannot use 'limit' or 'offset' with delete." + + del_query = self._clone() + + # The delete is actually 2 queries - one to find related objects, + # and one to delete. Make sure that the discovery of related + # objects is performed on the same database as the deletion. + del_query._for_write = True + + # Disable non-supported fields. + del_query.query.select_for_update = False + del_query.query.select_related = False + del_query.query.clear_ordering(force_empty=True) + + collector = Collector(using=del_query.db) + collector.collect(del_query) + collector.delete() + + # Clear the result cache, in case this QuerySet gets reused. + self._result_cache = None + delete.alters_data = True + + def _raw_delete(self, using): + """ + Deletes objects found from the given queryset in single direct SQL + query. No signals are sent, and there is no protection for cascades. + """ + sql.DeleteQuery(self.model).delete_qs(self, using) + _raw_delete.alters_data = True + + def update(self, **kwargs): + """ + Updates all elements in the current QuerySet, setting all the given + fields to the appropriate values. + """ + assert self.query.can_filter(), \ + "Cannot update a query once a slice has been taken." + self._for_write = True + query = self.query.clone(sql.UpdateQuery) + query.add_update_values(kwargs) + with transaction.commit_on_success_unless_managed(using=self.db): + rows = query.get_compiler(self.db).execute_sql(None) + self._result_cache = None + return rows + update.alters_data = True + + def _update(self, values): + """ + A version of update that accepts field objects instead of field names. + Used primarily for model saving and not intended for use by general + code (it requires too much poking around at model internals to be + useful at that level). + """ + assert self.query.can_filter(), \ + "Cannot update a query once a slice has been taken." + query = self.query.clone(sql.UpdateQuery) + query.add_update_fields(values) + self._result_cache = None + return query.get_compiler(self.db).execute_sql(None) + _update.alters_data = True + + def exists(self): + if self._result_cache is None: + return self.query.has_results(using=self.db) + return bool(self._result_cache) + + def _prefetch_related_objects(self): + # This method can only be called once the result cache has been filled. + prefetch_related_objects(self._result_cache, self._prefetch_related_lookups) + self._prefetch_done = True + + ################################################## + # PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS # + ################################################## + + def values(self, *fields): + return self._clone(klass=ValuesQuerySet, setup=True, _fields=fields) + + def values_list(self, *fields, **kwargs): + flat = kwargs.pop('flat', False) + if kwargs: + raise TypeError('Unexpected keyword arguments to values_list: %s' + % (list(kwargs),)) + if flat and len(fields) > 1: + raise TypeError("'flat' is not valid when values_list is called with more than one field.") + return self._clone(klass=ValuesListQuerySet, setup=True, flat=flat, + _fields=fields) + + def dates(self, field_name, kind, order='ASC'): + """ + Returns a list of date objects representing all available dates for + the given field_name, scoped to 'kind'. + """ + assert kind in ("year", "month", "day"), \ + "'kind' must be one of 'year', 'month' or 'day'." + assert order in ('ASC', 'DESC'), \ + "'order' must be either 'ASC' or 'DESC'." + return self._clone(klass=DateQuerySet, setup=True, + _field_name=field_name, _kind=kind, _order=order) + + def datetimes(self, field_name, kind, order='ASC', tzinfo=None): + """ + Returns a list of datetime objects representing all available + datetimes for the given field_name, scoped to 'kind'. + """ + assert kind in ("year", "month", "day", "hour", "minute", "second"), \ + "'kind' must be one of 'year', 'month', 'day', 'hour', 'minute' or 'second'." + assert order in ('ASC', 'DESC'), \ + "'order' must be either 'ASC' or 'DESC'." + if settings.USE_TZ: + if tzinfo is None: + tzinfo = timezone.get_current_timezone() + else: + tzinfo = None + return self._clone(klass=DateTimeQuerySet, setup=True, + _field_name=field_name, _kind=kind, _order=order, _tzinfo=tzinfo) + + def none(self): + """ + Returns an empty QuerySet. + """ + clone = self._clone() + clone.query.set_empty() + return clone + + ################################################################## + # PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET # + ################################################################## + + def all(self): + """ + Returns a new QuerySet that is a copy of the current one. This allows a + QuerySet to proxy for a model manager in some cases. + """ + return self._clone() + + def filter(self, *args, **kwargs): + """ + Returns a new QuerySet instance with the args ANDed to the existing + set. + """ + return self._filter_or_exclude(False, *args, **kwargs) + + def exclude(self, *args, **kwargs): + """ + Returns a new QuerySet instance with NOT (args) ANDed to the existing + set. + """ + return self._filter_or_exclude(True, *args, **kwargs) + + def _filter_or_exclude(self, negate, *args, **kwargs): + if args or kwargs: + assert self.query.can_filter(), \ + "Cannot filter a query once a slice has been taken." + + clone = self._clone() + if negate: + clone.query.add_q(~Q(*args, **kwargs)) + else: + clone.query.add_q(Q(*args, **kwargs)) + return clone + + def complex_filter(self, filter_obj): + """ + Returns a new QuerySet instance with filter_obj added to the filters. + + filter_obj can be a Q object (or anything with an add_to_query() + method) or a dictionary of keyword lookup arguments. + + This exists to support framework features such as 'limit_choices_to', + and usually it will be more natural to use other methods. + """ + if isinstance(filter_obj, Q) or hasattr(filter_obj, 'add_to_query'): + clone = self._clone() + clone.query.add_q(filter_obj) + return clone + else: + return self._filter_or_exclude(None, **filter_obj) + + def select_for_update(self, **kwargs): + """ + Returns a new QuerySet instance that will select objects with a + FOR UPDATE lock. + """ + # Default to false for nowait + nowait = kwargs.pop('nowait', False) + obj = self._clone() + obj._for_write = True + obj.query.select_for_update = True + obj.query.select_for_update_nowait = nowait + return obj + + def select_related(self, *fields, **kwargs): + """ + Returns a new QuerySet instance that will select related objects. + + If fields are specified, they must be ForeignKey fields and only those + related objects are included in the selection. + + If select_related(None) is called, the list is cleared. + """ + if 'depth' in kwargs: + warnings.warn('The "depth" keyword argument has been deprecated.\n' + 'Use related field names instead.', DeprecationWarning, stacklevel=2) + depth = kwargs.pop('depth', 0) + if kwargs: + raise TypeError('Unexpected keyword arguments to select_related: %s' + % (list(kwargs),)) + obj = self._clone() + if fields == (None,): + obj.query.select_related = False + elif fields: + if depth: + raise TypeError('Cannot pass both "depth" and fields to select_related()') + obj.query.add_select_related(fields) + else: + obj.query.select_related = True + if depth: + obj.query.max_depth = depth + return obj + + def prefetch_related(self, *lookups): + """ + Returns a new QuerySet instance that will prefetch the specified + Many-To-One and Many-To-Many related objects when the QuerySet is + evaluated. + + When prefetch_related() is called more than once, the list of lookups to + prefetch is appended to. If prefetch_related(None) is called, the + the list is cleared. + """ + clone = self._clone() + if lookups == (None,): + clone._prefetch_related_lookups = [] + else: + clone._prefetch_related_lookups.extend(lookups) + return clone + + def annotate(self, *args, **kwargs): + """ + Return a query set in which the returned objects have been annotated + with data aggregated from related fields. + """ + for arg in args: + if arg.default_alias in kwargs: + raise ValueError("The named annotation '%s' conflicts with the " + "default name for another annotation." + % arg.default_alias) + kwargs[arg.default_alias] = arg + + names = getattr(self, '_fields', None) + if names is None: + names = set(self.model._meta.get_all_field_names()) + for aggregate in kwargs: + if aggregate in names: + raise ValueError("The annotation '%s' conflicts with a field on " + "the model." % aggregate) + + obj = self._clone() + + obj._setup_aggregate_query(list(kwargs)) + + # Add the aggregates to the query + for (alias, aggregate_expr) in kwargs.items(): + obj.query.add_aggregate(aggregate_expr, self.model, alias, + is_summary=False) + + return obj + + def order_by(self, *field_names): + """ + Returns a new QuerySet instance with the ordering changed. + """ + assert self.query.can_filter(), \ + "Cannot reorder a query once a slice has been taken." + obj = self._clone() + obj.query.clear_ordering(force_empty=False) + obj.query.add_ordering(*field_names) + return obj + + def distinct(self, *field_names): + """ + Returns a new QuerySet instance that will select only distinct results. + """ + assert self.query.can_filter(), \ + "Cannot create distinct fields once a slice has been taken." + obj = self._clone() + obj.query.add_distinct_fields(*field_names) + return obj + + def extra(self, select=None, where=None, params=None, tables=None, + order_by=None, select_params=None): + """ + Adds extra SQL fragments to the query. + """ + assert self.query.can_filter(), \ + "Cannot change a query once a slice has been taken" + clone = self._clone() + clone.query.add_extra(select, select_params, where, params, tables, order_by) + return clone + + def reverse(self): + """ + Reverses the ordering of the QuerySet. + """ + clone = self._clone() + clone.query.standard_ordering = not clone.query.standard_ordering + return clone + + def defer(self, *fields): + """ + Defers the loading of data for certain fields until they are accessed. + The set of fields to defer is added to any existing set of deferred + fields. The only exception to this is if None is passed in as the only + parameter, in which case all deferrals are removed (None acts as a + reset option). + """ + clone = self._clone() + if fields == (None,): + clone.query.clear_deferred_loading() + else: + clone.query.add_deferred_loading(fields) + return clone + + def only(self, *fields): + """ + Essentially, the opposite of defer. Only the fields passed into this + method and that are not already specified as deferred are loaded + immediately when the queryset is evaluated. + """ + if fields == (None,): + # Can only pass None to defer(), not only(), as the rest option. + # That won't stop people trying to do this, so let's be explicit. + raise TypeError("Cannot pass None as an argument to only().") + clone = self._clone() + clone.query.add_immediate_loading(fields) + return clone + + def using(self, alias): + """ + Selects which database this QuerySet should excecute its query against. + """ + clone = self._clone() + clone._db = alias + return clone + + ################################### + # PUBLIC INTROSPECTION ATTRIBUTES # + ################################### + + def ordered(self): + """ + Returns True if the QuerySet is ordered -- i.e. has an order_by() + clause or a default ordering on the model. + """ + if self.query.extra_order_by or self.query.order_by: + return True + elif self.query.default_ordering and self.query.get_meta().ordering: + return True + else: + return False + ordered = property(ordered) + + @property + def db(self): + "Return the database that will be used if this query is executed now" + if self._for_write: + return self._db or router.db_for_write(self.model) + return self._db or router.db_for_read(self.model) + + ################### + # PRIVATE METHODS # + ################### + def _batched_insert(self, objs, fields, batch_size): + """ + A little helper method for bulk_insert to insert the bulk one batch + at a time. Inserts recursively a batch from the front of the bulk and + then _batched_insert() the remaining objects again. + """ + if not objs: + return + ops = connections[self.db].ops + batch_size = (batch_size or max(ops.bulk_batch_size(fields, objs), 1)) + for batch in [objs[i:i+batch_size] + for i in range(0, len(objs), batch_size)]: + self.model._base_manager._insert(batch, fields=fields, + using=self.db) + + def _clone(self, klass=None, setup=False, **kwargs): + if klass is None: + klass = self.__class__ + query = self.query.clone() + if self._sticky_filter: + query.filter_is_sticky = True + c = klass(model=self.model, query=query, using=self._db) + c._for_write = self._for_write + c._prefetch_related_lookups = self._prefetch_related_lookups[:] + c._known_related_objects = self._known_related_objects + c.__dict__.update(kwargs) + if setup and hasattr(c, '_setup_query'): + c._setup_query() + return c + + def _fetch_all(self): + if self._result_cache is None: + self._result_cache = list(self.iterator()) + if self._prefetch_related_lookups and not self._prefetch_done: + self._prefetch_related_objects() + + def _next_is_sticky(self): + """ + Indicates that the next filter call and the one following that should + be treated as a single filter. This is only important when it comes to + determining when to reuse tables for many-to-many filters. Required so + that we can filter naturally on the results of related managers. + + This doesn't return a clone of the current QuerySet (it returns + "self"). The method is only used internally and should be immediately + followed by a filter() that does create a clone. + """ + self._sticky_filter = True + return self + + def _merge_sanity_check(self, other): + """ + Checks that we are merging two comparable QuerySet classes. By default + this does nothing, but see the ValuesQuerySet for an example of where + it's useful. + """ + pass + + def _merge_known_related_objects(self, other): + """ + Keep track of all known related objects from either QuerySet instance. + """ + for field, objects in other._known_related_objects.items(): + self._known_related_objects.setdefault(field, {}).update(objects) + + def _setup_aggregate_query(self, aggregates): + """ + Prepare the query for computing a result that contains aggregate annotations. + """ + opts = self.model._meta + if self.query.group_by is None: + field_names = [f.attname for f in opts.concrete_fields] + self.query.add_fields(field_names, False) + self.query.set_group_by() + + def _prepare(self): + return self + + def _as_sql(self, connection): + """ + Returns the internal query's SQL and parameters (as a tuple). + """ + obj = self.values("pk") + if obj._db is None or connection == connections[obj._db]: + return obj.query.get_compiler(connection=connection).as_nested_sql() + raise ValueError("Can't do subqueries with queries on different DBs.") + + # When used as part of a nested query, a queryset will never be an "always + # empty" result. + value_annotation = True + +class InstanceCheckMeta(type): + def __instancecheck__(self, instance): + return instance.query.is_empty() + +class EmptyQuerySet(six.with_metaclass(InstanceCheckMeta)): + """ + Marker class usable for checking if a queryset is empty by .none(): + isinstance(qs.none(), EmptyQuerySet) -> True + """ + + def __init__(self, *args, **kwargs): + raise TypeError("EmptyQuerySet can't be instantiated") + +class ValuesQuerySet(QuerySet): + def __init__(self, *args, **kwargs): + super(ValuesQuerySet, self).__init__(*args, **kwargs) + # select_related isn't supported in values(). (FIXME -#3358) + self.query.select_related = False + + # QuerySet.clone() will also set up the _fields attribute with the + # names of the model fields to select. + + def iterator(self): + # Purge any extra columns that haven't been explicitly asked for + extra_names = list(self.query.extra_select) + field_names = self.field_names + aggregate_names = list(self.query.aggregate_select) + + names = extra_names + field_names + aggregate_names + + for row in self.query.get_compiler(self.db).results_iter(): + yield dict(zip(names, row)) + + def delete(self): + # values().delete() doesn't work currently - make sure it raises an + # user friendly error. + raise TypeError("Queries with .values() or .values_list() applied " + "can't be deleted") + + def _setup_query(self): + """ + Constructs the field_names list that the values query will be + retrieving. + + Called by the _clone() method after initializing the rest of the + instance. + """ + self.query.clear_deferred_loading() + self.query.clear_select_fields() + + if self._fields: + self.extra_names = [] + self.aggregate_names = [] + if not self.query.extra and not self.query.aggregates: + # Short cut - if there are no extra or aggregates, then + # the values() clause must be just field names. + self.field_names = list(self._fields) + else: + self.query.default_cols = False + self.field_names = [] + for f in self._fields: + # we inspect the full extra_select list since we might + # be adding back an extra select item that we hadn't + # had selected previously. + if f in self.query.extra: + self.extra_names.append(f) + elif f in self.query.aggregate_select: + self.aggregate_names.append(f) + else: + self.field_names.append(f) + else: + # Default to all fields. + self.extra_names = None + self.field_names = [f.attname for f in self.model._meta.concrete_fields] + self.aggregate_names = None + + self.query.select = [] + if self.extra_names is not None: + self.query.set_extra_mask(self.extra_names) + self.query.add_fields(self.field_names, True) + if self.aggregate_names is not None: + self.query.set_aggregate_mask(self.aggregate_names) + + def _clone(self, klass=None, setup=False, **kwargs): + """ + Cloning a ValuesQuerySet preserves the current fields. + """ + c = super(ValuesQuerySet, self)._clone(klass, **kwargs) + if not hasattr(c, '_fields'): + # Only clone self._fields if _fields wasn't passed into the cloning + # call directly. + c._fields = self._fields[:] + c.field_names = self.field_names + c.extra_names = self.extra_names + c.aggregate_names = self.aggregate_names + if setup and hasattr(c, '_setup_query'): + c._setup_query() + return c + + def _merge_sanity_check(self, other): + super(ValuesQuerySet, self)._merge_sanity_check(other) + if (set(self.extra_names) != set(other.extra_names) or + set(self.field_names) != set(other.field_names) or + self.aggregate_names != other.aggregate_names): + raise TypeError("Merging '%s' classes must involve the same values in each case." + % self.__class__.__name__) + + def _setup_aggregate_query(self, aggregates): + """ + Prepare the query for computing a result that contains aggregate annotations. + """ + self.query.set_group_by() + + if self.aggregate_names is not None: + self.aggregate_names.extend(aggregates) + self.query.set_aggregate_mask(self.aggregate_names) + + super(ValuesQuerySet, self)._setup_aggregate_query(aggregates) + + def _as_sql(self, connection): + """ + For ValuesQuerySet (and subclasses like ValuesListQuerySet), they can + only be used as nested queries if they're already set up to select only + a single field (in which case, that is the field column that is + returned). This differs from QuerySet.as_sql(), where the column to + select is set up by Django. + """ + if ((self._fields and len(self._fields) > 1) or + (not self._fields and len(self.model._meta.fields) > 1)): + raise TypeError('Cannot use a multi-field %s as a filter value.' + % self.__class__.__name__) + + obj = self._clone() + if obj._db is None or connection == connections[obj._db]: + return obj.query.get_compiler(connection=connection).as_nested_sql() + raise ValueError("Can't do subqueries with queries on different DBs.") + + def _prepare(self): + """ + Validates that we aren't trying to do a query like + value__in=qs.values('value1', 'value2'), which isn't valid. + """ + if ((self._fields and len(self._fields) > 1) or + (not self._fields and len(self.model._meta.fields) > 1)): + raise TypeError('Cannot use a multi-field %s as a filter value.' + % self.__class__.__name__) + return self + + +class ValuesListQuerySet(ValuesQuerySet): + def iterator(self): + if self.flat and len(self._fields) == 1: + for row in self.query.get_compiler(self.db).results_iter(): + yield row[0] + elif not self.query.extra_select and not self.query.aggregate_select: + for row in self.query.get_compiler(self.db).results_iter(): + yield tuple(row) + else: + # When extra(select=...) or an annotation is involved, the extra + # cols are always at the start of the row, and we need to reorder + # the fields to match the order in self._fields. + extra_names = list(self.query.extra_select) + field_names = self.field_names + aggregate_names = list(self.query.aggregate_select) + + names = extra_names + field_names + aggregate_names + + # If a field list has been specified, use it. Otherwise, use the + # full list of fields, including extras and aggregates. + if self._fields: + fields = list(self._fields) + [f for f in aggregate_names if f not in self._fields] + else: + fields = names + + for row in self.query.get_compiler(self.db).results_iter(): + data = dict(zip(names, row)) + yield tuple([data[f] for f in fields]) + + def _clone(self, *args, **kwargs): + clone = super(ValuesListQuerySet, self)._clone(*args, **kwargs) + if not hasattr(clone, "flat"): + # Only assign flat if the clone didn't already get it from kwargs + clone.flat = self.flat + return clone + + +class DateQuerySet(QuerySet): + def iterator(self): + return self.query.get_compiler(self.db).results_iter() + + def _setup_query(self): + """ + Sets up any special features of the query attribute. + + Called by the _clone() method after initializing the rest of the + instance. + """ + self.query.clear_deferred_loading() + self.query = self.query.clone(klass=sql.DateQuery, setup=True) + self.query.select = [] + self.query.add_select(self._field_name, self._kind, self._order) + + def _clone(self, klass=None, setup=False, **kwargs): + c = super(DateQuerySet, self)._clone(klass, False, **kwargs) + c._field_name = self._field_name + c._kind = self._kind + if setup and hasattr(c, '_setup_query'): + c._setup_query() + return c + + +class DateTimeQuerySet(QuerySet): + def iterator(self): + return self.query.get_compiler(self.db).results_iter() + + def _setup_query(self): + """ + Sets up any special features of the query attribute. + + Called by the _clone() method after initializing the rest of the + instance. + """ + self.query.clear_deferred_loading() + self.query = self.query.clone(klass=sql.DateTimeQuery, setup=True, tzinfo=self._tzinfo) + self.query.select = [] + self.query.add_select(self._field_name, self._kind, self._order) + + def _clone(self, klass=None, setup=False, **kwargs): + c = super(DateTimeQuerySet, self)._clone(klass, False, **kwargs) + c._field_name = self._field_name + c._kind = self._kind + c._tzinfo = self._tzinfo + if setup and hasattr(c, '_setup_query'): + c._setup_query() + return c + + +def get_klass_info(klass, max_depth=0, cur_depth=0, requested=None, + only_load=None, from_parent=None): + """ + Helper function that recursively returns an information for a klass, to be + used in get_cached_row. It exists just to compute this information only + once for entire queryset. Otherwise it would be computed for each row, which + leads to poor perfomance on large querysets. + + Arguments: + * klass - the class to retrieve (and instantiate) + * max_depth - the maximum depth to which a select_related() + relationship should be explored. + * cur_depth - the current depth in the select_related() tree. + Used in recursive calls to determin if we should dig deeper. + * requested - A dictionary describing the select_related() tree + that is to be retrieved. keys are field names; values are + dictionaries describing the keys on that related object that + are themselves to be select_related(). + * only_load - if the query has had only() or defer() applied, + this is the list of field names that will be returned. If None, + the full field list for `klass` can be assumed. + * from_parent - the parent model used to get to this model + + Note that when travelling from parent to child, we will only load child + fields which aren't in the parent. + """ + if max_depth and requested is None and cur_depth > max_depth: + # We've recursed deeply enough; stop now. + return None + + if only_load: + load_fields = only_load.get(klass) or set() + # When we create the object, we will also be creating populating + # all the parent classes, so traverse the parent classes looking + # for fields that must be included on load. + for parent in klass._meta.get_parent_list(): + fields = only_load.get(parent) + if fields: + load_fields.update(fields) + else: + load_fields = None + + if load_fields: + # Handle deferred fields. + skip = set() + init_list = [] + # Build the list of fields that *haven't* been requested + for field, model in klass._meta.get_concrete_fields_with_model(): + if field.name not in load_fields: + skip.add(field.attname) + elif from_parent and issubclass(from_parent, model.__class__): + # Avoid loading fields already loaded for parent model for + # child models. + continue + else: + init_list.append(field.attname) + # Retrieve all the requested fields + field_count = len(init_list) + if skip: + klass = deferred_class_factory(klass, skip) + field_names = init_list + else: + field_names = () + else: + # Load all fields on klass + + field_count = len(klass._meta.concrete_fields) + # Check if we need to skip some parent fields. + if from_parent and len(klass._meta.local_concrete_fields) != len(klass._meta.concrete_fields): + # Only load those fields which haven't been already loaded into + # 'from_parent'. + non_seen_models = [p for p in klass._meta.get_parent_list() + if not issubclass(from_parent, p)] + # Load local fields, too... + non_seen_models.append(klass) + field_names = [f.attname for f in klass._meta.concrete_fields + if f.model in non_seen_models] + field_count = len(field_names) + # Try to avoid populating field_names variable for perfomance reasons. + # If field_names variable is set, we use **kwargs based model init + # which is slower than normal init. + if field_count == len(klass._meta.concrete_fields): + field_names = () + + restricted = requested is not None + + related_fields = [] + for f in klass._meta.fields: + if select_related_descend(f, restricted, requested, load_fields): + if restricted: + next = requested[f.name] + else: + next = None + klass_info = get_klass_info(f.rel.to, max_depth=max_depth, cur_depth=cur_depth+1, + requested=next, only_load=only_load) + related_fields.append((f, klass_info)) + + reverse_related_fields = [] + if restricted: + for o in klass._meta.get_all_related_objects(): + if o.field.unique and select_related_descend(o.field, restricted, requested, + only_load.get(o.model), reverse=True): + next = requested[o.field.related_query_name()] + parent = klass if issubclass(o.model, klass) else None + klass_info = get_klass_info(o.model, max_depth=max_depth, cur_depth=cur_depth+1, + requested=next, only_load=only_load, from_parent=parent) + reverse_related_fields.append((o.field, klass_info)) + if field_names: + pk_idx = field_names.index(klass._meta.pk.attname) + else: + pk_idx = klass._meta.pk_index() + + return klass, field_names, field_count, related_fields, reverse_related_fields, pk_idx + + +def get_cached_row(row, index_start, using, klass_info, offset=0, + parent_data=()): + """ + Helper function that recursively returns an object with the specified + related attributes already populated. + + This method may be called recursively to populate deep select_related() + clauses. + + Arguments: + * row - the row of data returned by the database cursor + * index_start - the index of the row at which data for this + object is known to start + * offset - the number of additional fields that are known to + exist in row for `klass`. This usually means the number of + annotated results on `klass`. + * using - the database alias on which the query is being executed. + * klass_info - result of the get_klass_info function + * parent_data - parent model data in format (field, value). Used + to populate the non-local fields of child models. + """ + if klass_info is None: + return None + klass, field_names, field_count, related_fields, reverse_related_fields, pk_idx = klass_info + + + fields = row[index_start : index_start + field_count] + # If the pk column is None (or the Oracle equivalent ''), then the related + # object must be non-existent - set the relation to None. + if fields[pk_idx] == None or fields[pk_idx] == '': + obj = None + elif field_names: + fields = list(fields) + for rel_field, value in parent_data: + field_names.append(rel_field.attname) + fields.append(value) + obj = klass(**dict(zip(field_names, fields))) + else: + obj = klass(*fields) + # If an object was retrieved, set the database state. + if obj: + obj._state.db = using + obj._state.adding = False + + # Instantiate related fields + index_end = index_start + field_count + offset + # Iterate over each related object, populating any + # select_related() fields + for f, klass_info in related_fields: + # Recursively retrieve the data for the related object + cached_row = get_cached_row(row, index_end, using, klass_info) + # If the recursive descent found an object, populate the + # descriptor caches relevant to the object + if cached_row: + rel_obj, index_end = cached_row + if obj is not None: + # If the base object exists, populate the + # descriptor cache + setattr(obj, f.get_cache_name(), rel_obj) + if f.unique and rel_obj is not None: + # If the field is unique, populate the + # reverse descriptor cache on the related object + setattr(rel_obj, f.related.get_cache_name(), obj) + + # Now do the same, but for reverse related objects. + # Only handle the restricted case - i.e., don't do a depth + # descent into reverse relations unless explicitly requested + for f, klass_info in reverse_related_fields: + # Transfer data from this object to childs. + parent_data = [] + for rel_field, rel_model in klass_info[0]._meta.get_fields_with_model(): + if rel_model is not None and isinstance(obj, rel_model): + parent_data.append((rel_field, getattr(obj, rel_field.attname))) + # Recursively retrieve the data for the related object + cached_row = get_cached_row(row, index_end, using, klass_info, + parent_data=parent_data) + # If the recursive descent found an object, populate the + # descriptor caches relevant to the object + if cached_row: + rel_obj, index_end = cached_row + if obj is not None: + # populate the reverse descriptor cache + setattr(obj, f.related.get_cache_name(), rel_obj) + if rel_obj is not None: + # If the related object exists, populate + # the descriptor cache. + setattr(rel_obj, f.get_cache_name(), obj) + # Populate related object caches using parent data. + for rel_field, _ in parent_data: + if rel_field.rel: + setattr(rel_obj, rel_field.attname, getattr(obj, rel_field.attname)) + try: + cached_obj = getattr(obj, rel_field.get_cache_name()) + setattr(rel_obj, rel_field.get_cache_name(), cached_obj) + except AttributeError: + # Related object hasn't been cached yet + pass + return obj, index_end + + +class RawQuerySet(object): + """ + Provides an iterator which converts the results of raw SQL queries into + annotated model instances. + """ + def __init__(self, raw_query, model=None, query=None, params=None, + translations=None, using=None): + self.raw_query = raw_query + self.model = model + self._db = using + self.query = query or sql.RawQuery(sql=raw_query, using=self.db, params=params) + self.params = params or () + self.translations = translations or {} + + def __iter__(self): + # Mapping of attrnames to row column positions. Used for constructing + # the model using kwargs, needed when not all model's fields are present + # in the query. + model_init_field_names = {} + # A list of tuples of (column name, column position). Used for + # annotation fields. + annotation_fields = [] + + # Cache some things for performance reasons outside the loop. + db = self.db + compiler = connections[db].ops.compiler('SQLCompiler')( + self.query, connections[db], db + ) + need_resolv_columns = hasattr(compiler, 'resolve_columns') + + query = iter(self.query) + + # Find out which columns are model's fields, and which ones should be + # annotated to the model. + for pos, column in enumerate(self.columns): + if column in self.model_fields: + model_init_field_names[self.model_fields[column].attname] = pos + else: + annotation_fields.append((column, pos)) + + # Find out which model's fields are not present in the query. + skip = set() + for field in self.model._meta.fields: + if field.attname not in model_init_field_names: + skip.add(field.attname) + if skip: + if self.model._meta.pk.attname in skip: + raise InvalidQuery('Raw query must include the primary key') + model_cls = deferred_class_factory(self.model, skip) + else: + model_cls = self.model + # All model's fields are present in the query. So, it is possible + # to use *args based model instantation. For each field of the model, + # record the query column position matching that field. + model_init_field_pos = [] + for field in self.model._meta.fields: + model_init_field_pos.append(model_init_field_names[field.attname]) + if need_resolv_columns: + fields = [self.model_fields.get(c, None) for c in self.columns] + # Begin looping through the query values. + for values in query: + if need_resolv_columns: + values = compiler.resolve_columns(values, fields) + # Associate fields to values + if skip: + model_init_kwargs = {} + for attname, pos in six.iteritems(model_init_field_names): + model_init_kwargs[attname] = values[pos] + instance = model_cls(**model_init_kwargs) + else: + model_init_args = [values[pos] for pos in model_init_field_pos] + instance = model_cls(*model_init_args) + if annotation_fields: + for column, pos in annotation_fields: + setattr(instance, column, values[pos]) + + instance._state.db = db + instance._state.adding = False + + yield instance + + def __repr__(self): + text = self.raw_query + if self.params: + text = text % (self.params if hasattr(self.params, 'keys') else tuple(self.params)) + return "<RawQuerySet: %r>" % text + + def __getitem__(self, k): + return list(self)[k] + + @property + def db(self): + "Return the database that will be used if this query is executed now" + return self._db or router.db_for_read(self.model) + + def using(self, alias): + """ + Selects which database this Raw QuerySet should excecute it's query against. + """ + return RawQuerySet(self.raw_query, model=self.model, + query=self.query.clone(using=alias), + params=self.params, translations=self.translations, + using=alias) + + @property + def columns(self): + """ + A list of model field names in the order they'll appear in the + query results. + """ + if not hasattr(self, '_columns'): + self._columns = self.query.get_columns() + + # Adjust any column names which don't match field names + for (query_name, model_name) in self.translations.items(): + try: + index = self._columns.index(query_name) + self._columns[index] = model_name + except ValueError: + # Ignore translations for non-existant column names + pass + + return self._columns + + @property + def model_fields(self): + """ + A dict mapping column names to model field names. + """ + if not hasattr(self, '_model_fields'): + converter = connections[self.db].introspection.table_name_converter + self._model_fields = {} + for field in self.model._meta.fields: + name, column = field.get_attname_column() + self._model_fields[converter(column)] = field + return self._model_fields + + +def insert_query(model, objs, fields, return_id=False, raw=False, using=None): + """ + Inserts a new record for the given model. This provides an interface to + the InsertQuery class and is how Model.save() is implemented. It is not + part of the public API. + """ + query = sql.InsertQuery(model) + query.insert_values(fields, objs, raw=raw) + return query.get_compiler(using=using).execute_sql(return_id) + + +def prefetch_related_objects(result_cache, related_lookups): + """ + Helper function for prefetch_related functionality + + Populates prefetched objects caches for a list of results + from a QuerySet + """ + if len(result_cache) == 0: + return # nothing to do + + model = result_cache[0].__class__ + + # We need to be able to dynamically add to the list of prefetch_related + # lookups that we look up (see below). So we need some book keeping to + # ensure we don't do duplicate work. + done_lookups = set() # list of lookups like foo__bar__baz + done_queries = {} # dictionary of things like 'foo__bar': [results] + + auto_lookups = [] # we add to this as we go through. + followed_descriptors = set() # recursion protection + + all_lookups = itertools.chain(related_lookups, auto_lookups) + for lookup in all_lookups: + if lookup in done_lookups: + # We've done exactly this already, skip the whole thing + continue + done_lookups.add(lookup) + + # Top level, the list of objects to decorate is the result cache + # from the primary QuerySet. It won't be for deeper levels. + obj_list = result_cache + + attrs = lookup.split(LOOKUP_SEP) + for level, attr in enumerate(attrs): + # Prepare main instances + if len(obj_list) == 0: + break + + current_lookup = LOOKUP_SEP.join(attrs[0:level+1]) + if current_lookup in done_queries: + # Skip any prefetching, and any object preparation + obj_list = done_queries[current_lookup] + continue + + # Prepare objects: + good_objects = True + for obj in obj_list: + # Since prefetching can re-use instances, it is possible to have + # the same instance multiple times in obj_list, so obj might + # already be prepared. + if not hasattr(obj, '_prefetched_objects_cache'): + try: + obj._prefetched_objects_cache = {} + except AttributeError: + # Must be in a QuerySet subclass that is not returning + # Model instances, either in Django or 3rd + # party. prefetch_related() doesn't make sense, so quit + # now. + good_objects = False + break + if not good_objects: + break + + # Descend down tree + + # We assume that objects retrieved are homogenous (which is the premise + # of prefetch_related), so what applies to first object applies to all. + first_obj = obj_list[0] + prefetcher, descriptor, attr_found, is_fetched = get_prefetcher(first_obj, attr) + + if not attr_found: + raise AttributeError("Cannot find '%s' on %s object, '%s' is an invalid " + "parameter to prefetch_related()" % + (attr, first_obj.__class__.__name__, lookup)) + + if level == len(attrs) - 1 and prefetcher is None: + # Last one, this *must* resolve to something that supports + # prefetching, otherwise there is no point adding it and the + # developer asking for it has made a mistake. + raise ValueError("'%s' does not resolve to a item that supports " + "prefetching - this is an invalid parameter to " + "prefetch_related()." % lookup) + + if prefetcher is not None and not is_fetched: + obj_list, additional_prl = prefetch_one_level(obj_list, prefetcher, attr) + # We need to ensure we don't keep adding lookups from the + # same relationships to stop infinite recursion. So, if we + # are already on an automatically added lookup, don't add + # the new lookups from relationships we've seen already. + if not (lookup in auto_lookups and + descriptor in followed_descriptors): + for f in additional_prl: + new_prl = LOOKUP_SEP.join([current_lookup, f]) + auto_lookups.append(new_prl) + done_queries[current_lookup] = obj_list + followed_descriptors.add(descriptor) + else: + # Either a singly related object that has already been fetched + # (e.g. via select_related), or hopefully some other property + # that doesn't support prefetching but needs to be traversed. + + # We replace the current list of parent objects with the list + # of related objects, filtering out empty or missing values so + # that we can continue with nullable or reverse relations. + new_obj_list = [] + for obj in obj_list: + try: + new_obj = getattr(obj, attr) + except exceptions.ObjectDoesNotExist: + continue + if new_obj is None: + continue + new_obj_list.append(new_obj) + obj_list = new_obj_list + + +def get_prefetcher(instance, attr): + """ + For the attribute 'attr' on the given instance, finds + an object that has a get_prefetch_queryset(). + Returns a 4 tuple containing: + (the object with get_prefetch_queryset (or None), + the descriptor object representing this relationship (or None), + a boolean that is False if the attribute was not found at all, + a boolean that is True if the attribute has already been fetched) + """ + prefetcher = None + attr_found = False + is_fetched = False + + # For singly related objects, we have to avoid getting the attribute + # from the object, as this will trigger the query. So we first try + # on the class, in order to get the descriptor object. + rel_obj_descriptor = getattr(instance.__class__, attr, None) + if rel_obj_descriptor is None: + try: + rel_obj = getattr(instance, attr) + attr_found = True + except AttributeError: + pass + else: + attr_found = True + if rel_obj_descriptor: + # singly related object, descriptor object has the + # get_prefetch_queryset() method. + if hasattr(rel_obj_descriptor, 'get_prefetch_queryset'): + prefetcher = rel_obj_descriptor + if rel_obj_descriptor.is_cached(instance): + is_fetched = True + else: + # descriptor doesn't support prefetching, so we go ahead and get + # the attribute on the instance rather than the class to + # support many related managers + rel_obj = getattr(instance, attr) + if hasattr(rel_obj, 'get_prefetch_queryset'): + prefetcher = rel_obj + return prefetcher, rel_obj_descriptor, attr_found, is_fetched + + +def prefetch_one_level(instances, prefetcher, attname): + """ + Helper function for prefetch_related_objects + + Runs prefetches on all instances using the prefetcher object, + assigning results to relevant caches in instance. + + The prefetched objects are returned, along with any additional + prefetches that must be done due to prefetch_related lookups + found from default managers. + """ + # prefetcher must have a method get_prefetch_queryset() which takes a list + # of instances, and returns a tuple: + + # (queryset of instances of self.model that are related to passed in instances, + # callable that gets value to be matched for returned instances, + # callable that gets value to be matched for passed in instances, + # boolean that is True for singly related objects, + # cache name to assign to). + + # The 'values to be matched' must be hashable as they will be used + # in a dictionary. + + rel_qs, rel_obj_attr, instance_attr, single, cache_name =\ + prefetcher.get_prefetch_queryset(instances) + # We have to handle the possibility that the default manager itself added + # prefetch_related lookups to the QuerySet we just got back. We don't want to + # trigger the prefetch_related functionality by evaluating the query. + # Rather, we need to merge in the prefetch_related lookups. + additional_prl = getattr(rel_qs, '_prefetch_related_lookups', []) + if additional_prl: + # Don't need to clone because the manager should have given us a fresh + # instance, so we access an internal instead of using public interface + # for performance reasons. + rel_qs._prefetch_related_lookups = [] + + all_related_objects = list(rel_qs) + + rel_obj_cache = {} + for rel_obj in all_related_objects: + rel_attr_val = rel_obj_attr(rel_obj) + rel_obj_cache.setdefault(rel_attr_val, []).append(rel_obj) + + for obj in instances: + instance_attr_val = instance_attr(obj) + vals = rel_obj_cache.get(instance_attr_val, []) + if single: + # Need to assign to single cache on instance + setattr(obj, cache_name, vals[0] if vals else None) + else: + # Multi, attribute represents a manager with an .all() method that + # returns a QuerySet + qs = getattr(obj, attname).all() + qs._result_cache = vals + # We don't want the individual qs doing prefetch_related now, since we + # have merged this into the current work. + qs._prefetch_done = True + obj._prefetched_objects_cache[cache_name] = qs + return all_related_objects, additional_prl diff --git a/lib/python2.7/site-packages/django/db/models/query_utils.py b/lib/python2.7/site-packages/django/db/models/query_utils.py new file mode 100644 index 0000000..ee7a56a --- /dev/null +++ b/lib/python2.7/site-packages/django/db/models/query_utils.py @@ -0,0 +1,205 @@ +""" +Various data structures used in query construction. + +Factored out from django.db.models.query to avoid making the main module very +large and/or so that they can be used by other modules without getting into +circular import difficulties. +""" +from __future__ import unicode_literals + +from django.db.backends import util +from django.utils import six +from django.utils import tree + + +class InvalidQuery(Exception): + """ + The query passed to raw isn't a safe query to use with raw. + """ + pass + + +class QueryWrapper(object): + """ + A type that indicates the contents are an SQL fragment and the associate + parameters. Can be used to pass opaque data to a where-clause, for example. + """ + def __init__(self, sql, params): + self.data = sql, list(params) + + def as_sql(self, qn=None, connection=None): + return self.data + +class Q(tree.Node): + """ + Encapsulates filters as objects that can then be combined logically (using + & and |). + """ + # Connection types + AND = 'AND' + OR = 'OR' + default = AND + + def __init__(self, *args, **kwargs): + super(Q, self).__init__(children=list(args) + list(six.iteritems(kwargs))) + + def _combine(self, other, conn): + if not isinstance(other, Q): + raise TypeError(other) + obj = type(self)() + obj.connector = conn + obj.add(self, conn) + obj.add(other, conn) + return obj + + def __or__(self, other): + return self._combine(other, self.OR) + + def __and__(self, other): + return self._combine(other, self.AND) + + def __invert__(self): + obj = type(self)() + obj.add(self, self.AND) + obj.negate() + return obj + + def clone(self): + clone = self.__class__._new_instance( + children=[], connector=self.connector, negated=self.negated) + for child in self.children: + if hasattr(child, 'clone'): + clone.children.append(child.clone()) + else: + clone.children.append(child) + return clone + +class DeferredAttribute(object): + """ + A wrapper for a deferred-loading field. When the value is read from this + object the first time, the query is executed. + """ + def __init__(self, field_name, model): + self.field_name = field_name + + def __get__(self, instance, owner): + """ + Retrieves and caches the value from the datastore on the first lookup. + Returns the cached value. + """ + from django.db.models.fields import FieldDoesNotExist + non_deferred_model = instance._meta.proxy_for_model + opts = non_deferred_model._meta + + assert instance is not None + data = instance.__dict__ + if data.get(self.field_name, self) is self: + # self.field_name is the attname of the field, but only() takes the + # actual name, so we need to translate it here. + try: + f = opts.get_field_by_name(self.field_name)[0] + except FieldDoesNotExist: + f = [f for f in opts.fields + if f.attname == self.field_name][0] + name = f.name + # Let's see if the field is part of the parent chain. If so we + # might be able to reuse the already loaded value. Refs #18343. + val = self._check_parent_chain(instance, name) + if val is None: + # We use only() instead of values() here because we want the + # various data coersion methods (to_python(), etc.) to be + # called here. + val = getattr( + non_deferred_model._base_manager.only(name).using( + instance._state.db).get(pk=instance.pk), + self.field_name + ) + data[self.field_name] = val + return data[self.field_name] + + def __set__(self, instance, value): + """ + Deferred loading attributes can be set normally (which means there will + never be a database lookup involved. + """ + instance.__dict__[self.field_name] = value + + def _check_parent_chain(self, instance, name): + """ + Check if the field value can be fetched from a parent field already + loaded in the instance. This can be done if the to-be fetched + field is a primary key field. + """ + opts = instance._meta + f = opts.get_field_by_name(name)[0] + link_field = opts.get_ancestor_link(f.model) + if f.primary_key and f != link_field: + return getattr(instance, link_field.attname) + return None + + +def select_related_descend(field, restricted, requested, load_fields, reverse=False): + """ + Returns True if this field should be used to descend deeper for + select_related() purposes. Used by both the query construction code + (sql.query.fill_related_selections()) and the model instance creation code + (query.get_klass_info()). + + Arguments: + * field - the field to be checked + * restricted - a boolean field, indicating if the field list has been + manually restricted using a requested clause) + * requested - The select_related() dictionary. + * load_fields - the set of fields to be loaded on this model + * reverse - boolean, True if we are checking a reverse select related + """ + if not field.rel: + return False + if field.rel.parent_link and not reverse: + return False + if restricted: + if reverse and field.related_query_name() not in requested: + return False + if not reverse and field.name not in requested: + return False + if not restricted and field.null: + return False + if load_fields: + if field.name not in load_fields: + if restricted and field.name in requested: + raise InvalidQuery("Field %s.%s cannot be both deferred" + " and traversed using select_related" + " at the same time." % + (field.model._meta.object_name, field.name)) + return False + return True + +# This function is needed because data descriptors must be defined on a class +# object, not an instance, to have any effect. + +def deferred_class_factory(model, attrs): + """ + Returns a class object that is a copy of "model" with the specified "attrs" + being replaced with DeferredAttribute objects. The "pk_value" ties the + deferred attributes to a particular instance of the model. + """ + class Meta: + proxy = True + app_label = model._meta.app_label + + # The app_cache wants a unique name for each model, otherwise the new class + # won't be created (we get an old one back). Therefore, we generate the + # name using the passed in attrs. It's OK to reuse an existing class + # object if the attrs are identical. + name = "%s_Deferred_%s" % (model.__name__, '_'.join(sorted(list(attrs)))) + name = util.truncate_name(name, 80, 32) + + overrides = dict((attr, DeferredAttribute(attr, model)) for attr in attrs) + overrides["Meta"] = Meta + overrides["__module__"] = model.__module__ + overrides["_deferred"] = True + return type(str(name), (model,), overrides) + +# The above function is also used to unpickle model instances with deferred +# fields. +deferred_class_factory.__safe_for_unpickling__ = True diff --git a/lib/python2.7/site-packages/django/db/models/related.py b/lib/python2.7/site-packages/django/db/models/related.py new file mode 100644 index 0000000..4b00dd3 --- /dev/null +++ b/lib/python2.7/site-packages/django/db/models/related.py @@ -0,0 +1,67 @@ +from collections import namedtuple + +from django.utils.encoding import smart_text +from django.db.models.fields import BLANK_CHOICE_DASH + +# PathInfo is used when converting lookups (fk__somecol). The contents +# describe the relation in Model terms (model Options and Fields for both +# sides of the relation. The join_field is the field backing the relation. +PathInfo = namedtuple('PathInfo', + 'from_opts to_opts target_fields join_field ' + 'm2m direct') + +class RelatedObject(object): + def __init__(self, parent_model, model, field): + self.parent_model = parent_model + self.model = model + self.opts = model._meta + self.field = field + self.name = '%s:%s' % (self.opts.app_label, self.opts.model_name) + self.var_name = self.opts.model_name + + def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH, + limit_to_currently_related=False): + """Returns choices with a default blank choices included, for use + as SelectField choices for this field. + + Analogue of django.db.models.fields.Field.get_choices, provided + initially for utilisation by RelatedFieldListFilter. + """ + first_choice = blank_choice if include_blank else [] + queryset = self.model._default_manager.all() + if limit_to_currently_related: + queryset = queryset.complex_filter( + {'%s__isnull' % self.parent_model._meta.model_name: False}) + lst = [(x._get_pk_val(), smart_text(x)) for x in queryset] + return first_choice + lst + + def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False): + # Defer to the actual field definition for db prep + return self.field.get_db_prep_lookup(lookup_type, value, + connection=connection, prepared=prepared) + + def editable_fields(self): + "Get the fields in this class that should be edited inline." + return [f for f in self.opts.fields + self.opts.many_to_many if f.editable and f != self.field] + + def __repr__(self): + return "<RelatedObject: %s related to %s>" % (self.name, self.field.name) + + def get_accessor_name(self): + # This method encapsulates the logic that decides what name to give an + # accessor descriptor that retrieves related many-to-one or + # many-to-many objects. It uses the lower-cased object_name + "_set", + # but this can be overridden with the "related_name" option. + if self.field.rel.multiple: + # If this is a symmetrical m2m relation on self, there is no reverse accessor. + if getattr(self.field.rel, 'symmetrical', False) and self.model == self.parent_model: + return None + return self.field.rel.related_name or (self.opts.model_name + '_set') + else: + return self.field.rel.related_name or (self.opts.model_name) + + def get_cache_name(self): + return "_%s_cache" % self.get_accessor_name() + + def get_path_info(self): + return self.field.get_reverse_path_info() diff --git a/lib/python2.7/site-packages/django/db/models/signals.py b/lib/python2.7/site-packages/django/db/models/signals.py new file mode 100644 index 0000000..0782442 --- /dev/null +++ b/lib/python2.7/site-packages/django/db/models/signals.py @@ -0,0 +1,18 @@ +from django.dispatch import Signal + +class_prepared = Signal(providing_args=["class"]) + +pre_init = Signal(providing_args=["instance", "args", "kwargs"], use_caching=True) +post_init = Signal(providing_args=["instance"], use_caching=True) + +pre_save = Signal(providing_args=["instance", "raw", "using", "update_fields"], + use_caching=True) +post_save = Signal(providing_args=["instance", "raw", "created", "using", "update_fields"], use_caching=True) + +pre_delete = Signal(providing_args=["instance", "using"], use_caching=True) +post_delete = Signal(providing_args=["instance", "using"], use_caching=True) + +pre_syncdb = Signal(providing_args=["app", "create_models", "verbosity", "interactive", "db"]) +post_syncdb = Signal(providing_args=["class", "app", "created_models", "verbosity", "interactive", "db"]) + +m2m_changed = Signal(providing_args=["action", "instance", "reverse", "model", "pk_set", "using"], use_caching=True) diff --git a/lib/python2.7/site-packages/django/db/models/sql/__init__.py b/lib/python2.7/site-packages/django/db/models/sql/__init__.py new file mode 100644 index 0000000..df5b74e --- /dev/null +++ b/lib/python2.7/site-packages/django/db/models/sql/__init__.py @@ -0,0 +1,9 @@ +from __future__ import absolute_import + +from django.db.models.sql.datastructures import EmptyResultSet +from django.db.models.sql.subqueries import * +from django.db.models.sql.query import * +from django.db.models.sql.where import AND, OR + + +__all__ = ['Query', 'AND', 'OR', 'EmptyResultSet'] diff --git a/lib/python2.7/site-packages/django/db/models/sql/aggregates.py b/lib/python2.7/site-packages/django/db/models/sql/aggregates.py new file mode 100644 index 0000000..2bd2b2f --- /dev/null +++ b/lib/python2.7/site-packages/django/db/models/sql/aggregates.py @@ -0,0 +1,125 @@ +""" +Classes to represent the default SQL aggregate functions +""" +import copy + +from django.db.models.fields import IntegerField, FloatField + +# Fake fields used to identify aggregate types in data-conversion operations. +ordinal_aggregate_field = IntegerField() +computed_aggregate_field = FloatField() + +class Aggregate(object): + """ + Default SQL Aggregate. + """ + is_ordinal = False + is_computed = False + sql_template = '%(function)s(%(field)s)' + + def __init__(self, col, source=None, is_summary=False, **extra): + """Instantiate an SQL aggregate + + * col is a column reference describing the subject field + of the aggregate. It can be an alias, or a tuple describing + a table and column name. + * source is the underlying field or aggregate definition for + the column reference. If the aggregate is not an ordinal or + computed type, this reference is used to determine the coerced + output type of the aggregate. + * extra is a dictionary of additional data to provide for the + aggregate definition + + Also utilizes the class variables: + * sql_function, the name of the SQL function that implements the + aggregate. + * sql_template, a template string that is used to render the + aggregate into SQL. + * is_ordinal, a boolean indicating if the output of this aggregate + is an integer (e.g., a count) + * is_computed, a boolean indicating if this output of this aggregate + is a computed float (e.g., an average), regardless of the input + type. + + """ + self.col = col + self.source = source + self.is_summary = is_summary + self.extra = extra + + # Follow the chain of aggregate sources back until you find an + # actual field, or an aggregate that forces a particular output + # type. This type of this field will be used to coerce values + # retrieved from the database. + tmp = self + + while tmp and isinstance(tmp, Aggregate): + if getattr(tmp, 'is_ordinal', False): + tmp = ordinal_aggregate_field + elif getattr(tmp, 'is_computed', False): + tmp = computed_aggregate_field + else: + tmp = tmp.source + + self.field = tmp + + def relabeled_clone(self, change_map): + clone = copy.copy(self) + if isinstance(self.col, (list, tuple)): + clone.col = (change_map.get(self.col[0], self.col[0]), self.col[1]) + return clone + + def as_sql(self, qn, connection): + "Return the aggregate, rendered as SQL with parameters." + params = [] + + if hasattr(self.col, 'as_sql'): + field_name, params = self.col.as_sql(qn, connection) + elif isinstance(self.col, (list, tuple)): + field_name = '.'.join([qn(c) for c in self.col]) + else: + field_name = self.col + + substitutions = { + 'function': self.sql_function, + 'field': field_name + } + substitutions.update(self.extra) + + return self.sql_template % substitutions, params + + +class Avg(Aggregate): + is_computed = True + sql_function = 'AVG' + +class Count(Aggregate): + is_ordinal = True + sql_function = 'COUNT' + sql_template = '%(function)s(%(distinct)s%(field)s)' + + def __init__(self, col, distinct=False, **extra): + super(Count, self).__init__(col, distinct='DISTINCT ' if distinct else '', **extra) + +class Max(Aggregate): + sql_function = 'MAX' + +class Min(Aggregate): + sql_function = 'MIN' + +class StdDev(Aggregate): + is_computed = True + + def __init__(self, col, sample=False, **extra): + super(StdDev, self).__init__(col, **extra) + self.sql_function = 'STDDEV_SAMP' if sample else 'STDDEV_POP' + +class Sum(Aggregate): + sql_function = 'SUM' + +class Variance(Aggregate): + is_computed = True + + def __init__(self, col, sample=False, **extra): + super(Variance, self).__init__(col, **extra) + self.sql_function = 'VAR_SAMP' if sample else 'VAR_POP' diff --git a/lib/python2.7/site-packages/django/db/models/sql/compiler.py b/lib/python2.7/site-packages/django/db/models/sql/compiler.py new file mode 100644 index 0000000..ea7f9f4 --- /dev/null +++ b/lib/python2.7/site-packages/django/db/models/sql/compiler.py @@ -0,0 +1,1128 @@ +import datetime + +from django.conf import settings +from django.core.exceptions import FieldError +from django.db.backends.util import truncate_name +from django.db.models.constants import LOOKUP_SEP +from django.db.models.query_utils import select_related_descend, QueryWrapper +from django.db.models.sql.constants import (SINGLE, MULTI, ORDER_DIR, + GET_ITERATOR_CHUNK_SIZE, SelectInfo) +from django.db.models.sql.datastructures import EmptyResultSet +from django.db.models.sql.expressions import SQLEvaluator +from django.db.models.sql.query import get_order_dir, Query +from django.db.transaction import TransactionManagementError +from django.db.utils import DatabaseError +from django.utils import six +from django.utils.six.moves import zip +from django.utils import timezone + + +class SQLCompiler(object): + def __init__(self, query, connection, using): + self.query = query + self.connection = connection + self.using = using + self.quote_cache = {} + # When ordering a queryset with distinct on a column not part of the + # select set, the ordering column needs to be added to the select + # clause. This information is needed both in SQL construction and + # masking away the ordering selects from the returned row. + self.ordering_aliases = [] + self.ordering_params = [] + + def pre_sql_setup(self): + """ + Does any necessary class setup immediately prior to producing SQL. This + is for things that can't necessarily be done in __init__ because we + might not have all the pieces in place at that time. + # TODO: after the query has been executed, the altered state should be + # cleaned. We are not using a clone() of the query here. + """ + if not self.query.tables: + self.query.join((None, self.query.get_meta().db_table, None)) + if (not self.query.select and self.query.default_cols and not + self.query.included_inherited_models): + self.query.setup_inherited_models() + if self.query.select_related and not self.query.related_select_cols: + self.fill_related_selections() + + def quote_name_unless_alias(self, name): + """ + A wrapper around connection.ops.quote_name that doesn't quote aliases + for table names. This avoids problems with some SQL dialects that treat + quoted strings specially (e.g. PostgreSQL). + """ + if name in self.quote_cache: + return self.quote_cache[name] + if ((name in self.query.alias_map and name not in self.query.table_map) or + name in self.query.extra_select): + self.quote_cache[name] = name + return name + r = self.connection.ops.quote_name(name) + self.quote_cache[name] = r + return r + + def as_sql(self, with_limits=True, with_col_aliases=False): + """ + Creates the SQL for this query. Returns the SQL string and list of + parameters. + + If 'with_limits' is False, any limit/offset information is not included + in the query. + """ + if with_limits and self.query.low_mark == self.query.high_mark: + return '', () + + self.pre_sql_setup() + # After executing the query, we must get rid of any joins the query + # setup created. So, take note of alias counts before the query ran. + # However we do not want to get rid of stuff done in pre_sql_setup(), + # as the pre_sql_setup will modify query state in a way that forbids + # another run of it. + self.refcounts_before = self.query.alias_refcount.copy() + out_cols, s_params = self.get_columns(with_col_aliases) + ordering, o_params, ordering_group_by = self.get_ordering() + + distinct_fields = self.get_distinct() + + # This must come after 'select', 'ordering' and 'distinct' -- see + # docstring of get_from_clause() for details. + from_, f_params = self.get_from_clause() + + qn = self.quote_name_unless_alias + + where, w_params = self.query.where.as_sql(qn=qn, connection=self.connection) + having, h_params = self.query.having.as_sql(qn=qn, connection=self.connection) + having_group_by = self.query.having.get_cols() + params = [] + for val in six.itervalues(self.query.extra_select): + params.extend(val[1]) + + result = ['SELECT'] + + if self.query.distinct: + result.append(self.connection.ops.distinct_sql(distinct_fields)) + params.extend(o_params) + result.append(', '.join(out_cols + self.ordering_aliases)) + params.extend(s_params) + params.extend(self.ordering_params) + + result.append('FROM') + result.extend(from_) + params.extend(f_params) + + if where: + result.append('WHERE %s' % where) + params.extend(w_params) + + grouping, gb_params = self.get_grouping(having_group_by, ordering_group_by) + if grouping: + if distinct_fields: + raise NotImplementedError( + "annotate() + distinct(fields) not implemented.") + if not ordering: + ordering = self.connection.ops.force_no_ordering() + result.append('GROUP BY %s' % ', '.join(grouping)) + params.extend(gb_params) + + if having: + result.append('HAVING %s' % having) + params.extend(h_params) + + if ordering: + result.append('ORDER BY %s' % ', '.join(ordering)) + + if with_limits: + if self.query.high_mark is not None: + result.append('LIMIT %d' % (self.query.high_mark - self.query.low_mark)) + if self.query.low_mark: + if self.query.high_mark is None: + val = self.connection.ops.no_limit_value() + if val: + result.append('LIMIT %d' % val) + result.append('OFFSET %d' % self.query.low_mark) + + if self.query.select_for_update and self.connection.features.has_select_for_update: + if self.connection.get_autocommit(): + raise TransactionManagementError("select_for_update cannot be used outside of a transaction.") + + # If we've been asked for a NOWAIT query but the backend does not support it, + # raise a DatabaseError otherwise we could get an unexpected deadlock. + nowait = self.query.select_for_update_nowait + if nowait and not self.connection.features.has_select_for_update_nowait: + raise DatabaseError('NOWAIT is not supported on this database backend.') + result.append(self.connection.ops.for_update_sql(nowait=nowait)) + + # Finally do cleanup - get rid of the joins we created above. + self.query.reset_refcounts(self.refcounts_before) + + return ' '.join(result), tuple(params) + + def as_nested_sql(self): + """ + Perform the same functionality as the as_sql() method, returning an + SQL string and parameters. However, the alias prefixes are bumped + beforehand (in a copy -- the current query isn't changed), and any + ordering is removed if the query is unsliced. + + Used when nesting this query inside another. + """ + obj = self.query.clone() + if obj.low_mark == 0 and obj.high_mark is None: + # If there is no slicing in use, then we can safely drop all ordering + obj.clear_ordering(True) + obj.bump_prefix() + return obj.get_compiler(connection=self.connection).as_sql() + + def get_columns(self, with_aliases=False): + """ + Returns the list of columns to use in the select statement, as well as + a list any extra parameters that need to be included. If no columns + have been specified, returns all columns relating to fields in the + model. + + If 'with_aliases' is true, any column names that are duplicated + (without the table names) are given unique aliases. This is needed in + some cases to avoid ambiguity with nested queries. + """ + qn = self.quote_name_unless_alias + qn2 = self.connection.ops.quote_name + result = ['(%s) AS %s' % (col[0], qn2(alias)) for alias, col in six.iteritems(self.query.extra_select)] + params = [] + aliases = set(self.query.extra_select.keys()) + if with_aliases: + col_aliases = aliases.copy() + else: + col_aliases = set() + if self.query.select: + only_load = self.deferred_to_columns() + for col, _ in self.query.select: + if isinstance(col, (list, tuple)): + alias, column = col + table = self.query.alias_map[alias].table_name + if table in only_load and column not in only_load[table]: + continue + r = '%s.%s' % (qn(alias), qn(column)) + if with_aliases: + if col[1] in col_aliases: + c_alias = 'Col%d' % len(col_aliases) + result.append('%s AS %s' % (r, c_alias)) + aliases.add(c_alias) + col_aliases.add(c_alias) + else: + result.append('%s AS %s' % (r, qn2(col[1]))) + aliases.add(r) + col_aliases.add(col[1]) + else: + result.append(r) + aliases.add(r) + col_aliases.add(col[1]) + else: + col_sql, col_params = col.as_sql(qn, self.connection) + result.append(col_sql) + params.extend(col_params) + + if hasattr(col, 'alias'): + aliases.add(col.alias) + col_aliases.add(col.alias) + + elif self.query.default_cols: + cols, new_aliases = self.get_default_columns(with_aliases, + col_aliases) + result.extend(cols) + aliases.update(new_aliases) + + max_name_length = self.connection.ops.max_name_length() + for alias, aggregate in self.query.aggregate_select.items(): + agg_sql, agg_params = aggregate.as_sql(qn, self.connection) + if alias is None: + result.append(agg_sql) + else: + result.append('%s AS %s' % (agg_sql, qn(truncate_name(alias, max_name_length)))) + params.extend(agg_params) + + for (table, col), _ in self.query.related_select_cols: + r = '%s.%s' % (qn(table), qn(col)) + if with_aliases and col in col_aliases: + c_alias = 'Col%d' % len(col_aliases) + result.append('%s AS %s' % (r, c_alias)) + aliases.add(c_alias) + col_aliases.add(c_alias) + else: + result.append(r) + aliases.add(r) + col_aliases.add(col) + + self._select_aliases = aliases + return result, params + + def get_default_columns(self, with_aliases=False, col_aliases=None, + start_alias=None, opts=None, as_pairs=False, from_parent=None): + """ + Computes the default columns for selecting every field in the base + model. Will sometimes be called to pull in related models (e.g. via + select_related), in which case "opts" and "start_alias" will be given + to provide a starting point for the traversal. + + Returns a list of strings, quoted appropriately for use in SQL + directly, as well as a set of aliases used in the select statement (if + 'as_pairs' is True, returns a list of (alias, col_name) pairs instead + of strings as the first component and None as the second component). + """ + result = [] + if opts is None: + opts = self.query.get_meta() + qn = self.quote_name_unless_alias + qn2 = self.connection.ops.quote_name + aliases = set() + only_load = self.deferred_to_columns() + if not start_alias: + start_alias = self.query.get_initial_alias() + # The 'seen_models' is used to optimize checking the needed parent + # alias for a given field. This also includes None -> start_alias to + # be used by local fields. + seen_models = {None: start_alias} + + for field, model in opts.get_concrete_fields_with_model(): + if from_parent and model is not None and issubclass(from_parent, model): + # Avoid loading data for already loaded parents. + continue + alias = self.query.join_parent_model(opts, model, start_alias, + seen_models) + table = self.query.alias_map[alias].table_name + if table in only_load and field.column not in only_load[table]: + continue + if as_pairs: + result.append((alias, field)) + aliases.add(alias) + continue + if with_aliases and field.column in col_aliases: + c_alias = 'Col%d' % len(col_aliases) + result.append('%s.%s AS %s' % (qn(alias), + qn2(field.column), c_alias)) + col_aliases.add(c_alias) + aliases.add(c_alias) + else: + r = '%s.%s' % (qn(alias), qn2(field.column)) + result.append(r) + aliases.add(r) + if with_aliases: + col_aliases.add(field.column) + return result, aliases + + def get_distinct(self): + """ + Returns a quoted list of fields to use in DISTINCT ON part of the query. + + Note that this method can alter the tables in the query, and thus it + must be called before get_from_clause(). + """ + qn = self.quote_name_unless_alias + qn2 = self.connection.ops.quote_name + result = [] + opts = self.query.get_meta() + + for name in self.query.distinct_fields: + parts = name.split(LOOKUP_SEP) + field, cols, alias, _, _ = self._setup_joins(parts, opts, None) + cols, alias = self._final_join_removal(cols, alias) + for col in cols: + result.append("%s.%s" % (qn(alias), qn2(col))) + return result + + def get_ordering(self): + """ + Returns a tuple containing a list representing the SQL elements in the + "order by" clause, and the list of SQL elements that need to be added + to the GROUP BY clause as a result of the ordering. + + Also sets the ordering_aliases attribute on this instance to a list of + extra aliases needed in the select. + + Determining the ordering SQL can change the tables we need to include, + so this should be run *before* get_from_clause(). + """ + if self.query.extra_order_by: + ordering = self.query.extra_order_by + elif not self.query.default_ordering: + ordering = self.query.order_by + else: + ordering = (self.query.order_by + or self.query.get_meta().ordering + or []) + qn = self.quote_name_unless_alias + qn2 = self.connection.ops.quote_name + distinct = self.query.distinct + select_aliases = self._select_aliases + result = [] + group_by = [] + ordering_aliases = [] + if self.query.standard_ordering: + asc, desc = ORDER_DIR['ASC'] + else: + asc, desc = ORDER_DIR['DESC'] + + # It's possible, due to model inheritance, that normal usage might try + # to include the same field more than once in the ordering. We track + # the table/column pairs we use and discard any after the first use. + processed_pairs = set() + + params = [] + ordering_params = [] + for pos, field in enumerate(ordering): + if field == '?': + result.append(self.connection.ops.random_function_sql()) + continue + if isinstance(field, int): + if field < 0: + order = desc + field = -field + else: + order = asc + result.append('%s %s' % (field, order)) + group_by.append((str(field), [])) + continue + col, order = get_order_dir(field, asc) + if col in self.query.aggregate_select: + result.append('%s %s' % (qn(col), order)) + continue + if '.' in field: + # This came in through an extra(order_by=...) addition. Pass it + # on verbatim. + table, col = col.split('.', 1) + if (table, col) not in processed_pairs: + elt = '%s.%s' % (qn(table), col) + processed_pairs.add((table, col)) + if not distinct or elt in select_aliases: + result.append('%s %s' % (elt, order)) + group_by.append((elt, [])) + elif get_order_dir(field)[0] not in self.query.extra: + # 'col' is of the form 'field' or 'field1__field2' or + # '-field1__field2__field', etc. + for table, cols, order in self.find_ordering_name(field, + self.query.get_meta(), default_order=asc): + for col in cols: + if (table, col) not in processed_pairs: + elt = '%s.%s' % (qn(table), qn2(col)) + processed_pairs.add((table, col)) + if distinct and elt not in select_aliases: + ordering_aliases.append(elt) + result.append('%s %s' % (elt, order)) + group_by.append((elt, [])) + else: + elt = qn2(col) + if col not in self.query.extra_select: + sql = "(%s) AS %s" % (self.query.extra[col][0], elt) + ordering_aliases.append(sql) + ordering_params.extend(self.query.extra[col][1]) + else: + if distinct and col not in select_aliases: + ordering_aliases.append(elt) + ordering_params.extend(params) + result.append('%s %s' % (elt, order)) + group_by.append(self.query.extra[col]) + self.ordering_aliases = ordering_aliases + self.ordering_params = ordering_params + return result, params, group_by + + def find_ordering_name(self, name, opts, alias=None, default_order='ASC', + already_seen=None): + """ + Returns the table alias (the name might be ambiguous, the alias will + not be) and column name for ordering by the given 'name' parameter. + The 'name' is of the form 'field1__field2__...__fieldN'. + """ + name, order = get_order_dir(name, default_order) + pieces = name.split(LOOKUP_SEP) + field, cols, alias, joins, opts = self._setup_joins(pieces, opts, alias) + + # If we get to this point and the field is a relation to another model, + # append the default ordering for that model. + if field.rel and len(joins) > 1 and opts.ordering: + # Firstly, avoid infinite loops. + if not already_seen: + already_seen = set() + join_tuple = tuple([self.query.alias_map[j].table_name for j in joins]) + if join_tuple in already_seen: + raise FieldError('Infinite loop caused by ordering.') + already_seen.add(join_tuple) + + results = [] + for item in opts.ordering: + results.extend(self.find_ordering_name(item, opts, alias, + order, already_seen)) + return results + cols, alias = self._final_join_removal(cols, alias) + return [(alias, cols, order)] + + def _setup_joins(self, pieces, opts, alias): + """ + A helper method for get_ordering and get_distinct. This method will + call query.setup_joins, handle refcounts and then promote the joins. + + Note that get_ordering and get_distinct must produce same target + columns on same input, as the prefixes of get_ordering and get_distinct + must match. Executing SQL where this is not true is an error. + """ + if not alias: + alias = self.query.get_initial_alias() + field, targets, opts, joins, _ = self.query.setup_joins( + pieces, opts, alias) + # We will later on need to promote those joins that were added to the + # query afresh above. + joins_to_promote = [j for j in joins if self.query.alias_refcount[j] < 2] + alias = joins[-1] + cols = [target.column for target in targets] + if not field.rel: + # To avoid inadvertent trimming of a necessary alias, use the + # refcount to show that we are referencing a non-relation field on + # the model. + self.query.ref_alias(alias) + + # Must use left outer joins for nullable fields and their relations. + # Ordering or distinct must not affect the returned set, and INNER + # JOINS for nullable fields could do this. + self.query.promote_joins(joins_to_promote) + return field, cols, alias, joins, opts + + def _final_join_removal(self, cols, alias): + """ + A helper method for get_distinct and get_ordering. This method will + trim extra not-needed joins from the tail of the join chain. + + This is very similar to what is done in trim_joins, but we will + trim LEFT JOINS here. It would be a good idea to consolidate this + method and query.trim_joins(). + """ + if alias: + while 1: + join = self.query.alias_map[alias] + lhs_cols, rhs_cols = zip(*[(lhs_col, rhs_col) for lhs_col, rhs_col in join.join_cols]) + if set(cols) != set(rhs_cols): + break + + cols = [lhs_cols[rhs_cols.index(col)] for col in cols] + self.query.unref_alias(alias) + alias = join.lhs_alias + return cols, alias + + def get_from_clause(self): + """ + Returns a list of strings that are joined together to go after the + "FROM" part of the query, as well as a list any extra parameters that + need to be included. Sub-classes, can override this to create a + from-clause via a "select". + + This should only be called after any SQL construction methods that + might change the tables we need. This means the select columns, + ordering and distinct must be done first. + """ + result = [] + qn = self.quote_name_unless_alias + qn2 = self.connection.ops.quote_name + first = True + from_params = [] + for alias in self.query.tables: + if not self.query.alias_refcount[alias]: + continue + try: + name, alias, join_type, lhs, join_cols, _, join_field = self.query.alias_map[alias] + except KeyError: + # Extra tables can end up in self.tables, but not in the + # alias_map if they aren't in a join. That's OK. We skip them. + continue + alias_str = '' if alias == name else (' %s' % alias) + if join_type and not first: + extra_cond = join_field.get_extra_restriction( + self.query.where_class, alias, lhs) + if extra_cond: + extra_sql, extra_params = extra_cond.as_sql( + qn, self.connection) + extra_sql = 'AND (%s)' % extra_sql + from_params.extend(extra_params) + else: + extra_sql = "" + result.append('%s %s%s ON (' + % (join_type, qn(name), alias_str)) + for index, (lhs_col, rhs_col) in enumerate(join_cols): + if index != 0: + result.append(' AND ') + result.append('%s.%s = %s.%s' % + (qn(lhs), qn2(lhs_col), qn(alias), qn2(rhs_col))) + result.append('%s)' % extra_sql) + else: + connector = '' if first else ', ' + result.append('%s%s%s' % (connector, qn(name), alias_str)) + first = False + for t in self.query.extra_tables: + alias, unused = self.query.table_alias(t) + # Only add the alias if it's not already present (the table_alias() + # calls increments the refcount, so an alias refcount of one means + # this is the only reference. + if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1: + connector = '' if first else ', ' + result.append('%s%s' % (connector, qn(alias))) + first = False + return result, from_params + + def get_grouping(self, having_group_by, ordering_group_by): + """ + Returns a tuple representing the SQL elements in the "group by" clause. + """ + qn = self.quote_name_unless_alias + result, params = [], [] + if self.query.group_by is not None: + select_cols = self.query.select + self.query.related_select_cols + # Just the column, not the fields. + select_cols = [s[0] for s in select_cols] + if (len(self.query.get_meta().concrete_fields) == len(self.query.select) + and self.connection.features.allows_group_by_pk): + self.query.group_by = [ + (self.query.get_meta().db_table, self.query.get_meta().pk.column) + ] + select_cols = [] + seen = set() + cols = self.query.group_by + having_group_by + select_cols + for col in cols: + col_params = () + if isinstance(col, (list, tuple)): + sql = '%s.%s' % (qn(col[0]), qn(col[1])) + elif hasattr(col, 'as_sql'): + sql, col_params = col.as_sql(qn, self.connection) + else: + sql = '(%s)' % str(col) + if sql not in seen: + result.append(sql) + params.extend(col_params) + seen.add(sql) + + # Still, we need to add all stuff in ordering (except if the backend can + # group by just by PK). + if ordering_group_by and not self.connection.features.allows_group_by_pk: + for order, order_params in ordering_group_by: + # Even if we have seen the same SQL string, it might have + # different params, so, we add same SQL in "has params" case. + if order not in seen or order_params: + result.append(order) + params.extend(order_params) + seen.add(order) + + # Unconditionally add the extra_select items. + for extra_select, extra_params in self.query.extra_select.values(): + sql = '(%s)' % str(extra_select) + result.append(sql) + params.extend(extra_params) + + return result, params + + def fill_related_selections(self, opts=None, root_alias=None, cur_depth=1, + requested=None, restricted=None, nullable=None): + """ + Fill in the information needed for a select_related query. The current + depth is measured as the number of connections away from the root model + (for example, cur_depth=1 means we are looking at models with direct + connections to the root model). + """ + if not restricted and self.query.max_depth and cur_depth > self.query.max_depth: + # We've recursed far enough; bail out. + return + + if not opts: + opts = self.query.get_meta() + root_alias = self.query.get_initial_alias() + self.query.related_select_cols = [] + only_load = self.query.get_loaded_field_names() + + # Setup for the case when only particular related fields should be + # included in the related selection. + if requested is None: + if isinstance(self.query.select_related, dict): + requested = self.query.select_related + restricted = True + else: + restricted = False + + for f, model in opts.get_fields_with_model(): + # The get_fields_with_model() returns None for fields that live + # in the field's local model. So, for those fields we want to use + # the f.model - that is the field's local model. + field_model = model or f.model + if not select_related_descend(f, restricted, requested, + only_load.get(field_model)): + continue + promote = nullable or f.null + _, _, _, joins, _ = self.query.setup_joins( + [f.name], opts, root_alias, outer_if_first=promote) + alias = joins[-1] + columns, _ = self.get_default_columns(start_alias=alias, + opts=f.rel.to._meta, as_pairs=True) + self.query.related_select_cols.extend( + SelectInfo((col[0], col[1].column), col[1]) for col in columns) + if restricted: + next = requested.get(f.name, {}) + else: + next = False + new_nullable = f.null or promote + self.fill_related_selections(f.rel.to._meta, alias, cur_depth + 1, + next, restricted, new_nullable) + + if restricted: + related_fields = [ + (o.field, o.model) + for o in opts.get_all_related_objects() + if o.field.unique + ] + for f, model in related_fields: + if not select_related_descend(f, restricted, requested, + only_load.get(model), reverse=True): + continue + + _, _, _, joins, _ = self.query.setup_joins( + [f.related_query_name()], opts, root_alias, outer_if_first=True) + alias = joins[-1] + from_parent = (opts.model if issubclass(model, opts.model) + else None) + columns, _ = self.get_default_columns(start_alias=alias, + opts=model._meta, as_pairs=True, from_parent=from_parent) + self.query.related_select_cols.extend( + SelectInfo((col[0], col[1].column), col[1]) for col in columns) + next = requested.get(f.related_query_name(), {}) + # Use True here because we are looking at the _reverse_ side of + # the relation, which is always nullable. + new_nullable = True + self.fill_related_selections(model._meta, alias, cur_depth + 1, + next, restricted, new_nullable) + + def deferred_to_columns(self): + """ + Converts the self.deferred_loading data structure to mapping of table + names to sets of column names which are to be loaded. Returns the + dictionary. + """ + columns = {} + self.query.deferred_to_data(columns, self.query.deferred_to_columns_cb) + return columns + + def results_iter(self): + """ + Returns an iterator over the results from executing this query. + """ + resolve_columns = hasattr(self, 'resolve_columns') + fields = None + has_aggregate_select = bool(self.query.aggregate_select) + for rows in self.execute_sql(MULTI): + for row in rows: + if has_aggregate_select: + loaded_fields = self.query.get_loaded_field_names().get(self.query.model, set()) or self.query.select + aggregate_start = len(self.query.extra_select) + len(loaded_fields) + aggregate_end = aggregate_start + len(self.query.aggregate_select) + if resolve_columns: + if fields is None: + # We only set this up here because + # related_select_cols isn't populated until + # execute_sql() has been called. + + # We also include types of fields of related models that + # will be included via select_related() for the benefit + # of MySQL/MySQLdb when boolean fields are involved + # (#15040). + + # This code duplicates the logic for the order of fields + # found in get_columns(). It would be nice to clean this up. + if self.query.select: + fields = [f.field for f in self.query.select] + elif self.query.default_cols: + fields = self.query.get_meta().concrete_fields + else: + fields = [] + fields = fields + [f.field for f in self.query.related_select_cols] + + # If the field was deferred, exclude it from being passed + # into `resolve_columns` because it wasn't selected. + only_load = self.deferred_to_columns() + if only_load: + fields = [f for f in fields if f.model._meta.db_table not in only_load or + f.column in only_load[f.model._meta.db_table]] + if has_aggregate_select: + # pad None in to fields for aggregates + fields = fields[:aggregate_start] + [ + None for x in range(0, aggregate_end - aggregate_start) + ] + fields[aggregate_start:] + row = self.resolve_columns(row, fields) + + if has_aggregate_select: + row = tuple(row[:aggregate_start]) + tuple([ + self.query.resolve_aggregate(value, aggregate, self.connection) + for (alias, aggregate), value + in zip(self.query.aggregate_select.items(), row[aggregate_start:aggregate_end]) + ]) + tuple(row[aggregate_end:]) + + yield row + + def execute_sql(self, result_type=MULTI): + """ + Run the query against the database and returns the result(s). The + return value is a single data item if result_type is SINGLE, or an + iterator over the results if the result_type is MULTI. + + result_type is either MULTI (use fetchmany() to retrieve all rows), + SINGLE (only retrieve a single row), or None. In this last case, the + cursor is returned if any query is executed, since it's used by + subclasses such as InsertQuery). It's possible, however, that no query + is needed, as the filters describe an empty set. In that case, None is + returned, to avoid any unnecessary database interaction. + """ + try: + sql, params = self.as_sql() + if not sql: + raise EmptyResultSet + except EmptyResultSet: + if result_type == MULTI: + return iter([]) + else: + return + + cursor = self.connection.cursor() + cursor.execute(sql, params) + + if not result_type: + return cursor + if result_type == SINGLE: + if self.ordering_aliases: + return cursor.fetchone()[:-len(self.ordering_aliases)] + return cursor.fetchone() + + # The MULTI case. + if self.ordering_aliases: + result = order_modified_iter(cursor, len(self.ordering_aliases), + self.connection.features.empty_fetchmany_value) + else: + result = iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)), + self.connection.features.empty_fetchmany_value) + if not self.connection.features.can_use_chunked_reads: + # If we are using non-chunked reads, we return the same data + # structure as normally, but ensure it is all read into memory + # before going any further. + return list(result) + return result + + def as_subquery_condition(self, alias, columns, qn): + qn2 = self.connection.ops.quote_name + if len(columns) == 1: + sql, params = self.as_sql() + return '%s.%s IN (%s)' % (qn(alias), qn2(columns[0]), sql), params + + for index, select_col in enumerate(self.query.select): + lhs = '%s.%s' % (qn(select_col.col[0]), qn2(select_col.col[1])) + rhs = '%s.%s' % (qn(alias), qn2(columns[index])) + self.query.where.add( + QueryWrapper('%s = %s' % (lhs, rhs), []), 'AND') + + sql, params = self.as_sql() + return 'EXISTS (%s)' % sql, params + + +class SQLInsertCompiler(SQLCompiler): + + def __init__(self, *args, **kwargs): + self.return_id = False + super(SQLInsertCompiler, self).__init__(*args, **kwargs) + + def placeholder(self, field, val): + if field is None: + # A field value of None means the value is raw. + return val + elif hasattr(field, 'get_placeholder'): + # Some fields (e.g. geo fields) need special munging before + # they can be inserted. + return field.get_placeholder(val, self.connection) + else: + # Return the common case for the placeholder + return '%s' + + def as_sql(self): + # We don't need quote_name_unless_alias() here, since these are all + # going to be column names (so we can avoid the extra overhead). + qn = self.connection.ops.quote_name + opts = self.query.get_meta() + result = ['INSERT INTO %s' % qn(opts.db_table)] + + has_fields = bool(self.query.fields) + fields = self.query.fields if has_fields else [opts.pk] + result.append('(%s)' % ', '.join([qn(f.column) for f in fields])) + + if has_fields: + params = values = [ + [ + f.get_db_prep_save(getattr(obj, f.attname) if self.query.raw else f.pre_save(obj, True), connection=self.connection) + for f in fields + ] + for obj in self.query.objs + ] + else: + values = [[self.connection.ops.pk_default_value()] for obj in self.query.objs] + params = [[]] + fields = [None] + can_bulk = (not any(hasattr(field, "get_placeholder") for field in fields) and + not self.return_id and self.connection.features.has_bulk_insert) + + if can_bulk: + placeholders = [["%s"] * len(fields)] + else: + placeholders = [ + [self.placeholder(field, v) for field, v in zip(fields, val)] + for val in values + ] + # Oracle Spatial needs to remove some values due to #10888 + params = self.connection.ops.modify_insert_params(placeholders, params) + if self.return_id and self.connection.features.can_return_id_from_insert: + params = params[0] + col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column)) + result.append("VALUES (%s)" % ", ".join(placeholders[0])) + r_fmt, r_params = self.connection.ops.return_insert_id() + # Skip empty r_fmt to allow subclasses to customize behaviour for + # 3rd party backends. Refs #19096. + if r_fmt: + result.append(r_fmt % col) + params += r_params + return [(" ".join(result), tuple(params))] + if can_bulk: + result.append(self.connection.ops.bulk_insert_sql(fields, len(values))) + return [(" ".join(result), tuple([v for val in values for v in val]))] + else: + return [ + (" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals) + for p, vals in zip(placeholders, params) + ] + + def execute_sql(self, return_id=False): + assert not (return_id and len(self.query.objs) != 1) + self.return_id = return_id + cursor = self.connection.cursor() + for sql, params in self.as_sql(): + cursor.execute(sql, params) + if not (return_id and cursor): + return + if self.connection.features.can_return_id_from_insert: + return self.connection.ops.fetch_returned_insert_id(cursor) + return self.connection.ops.last_insert_id(cursor, + self.query.get_meta().db_table, self.query.get_meta().pk.column) + + +class SQLDeleteCompiler(SQLCompiler): + def as_sql(self): + """ + Creates the SQL for this query. Returns the SQL string and list of + parameters. + """ + assert len(self.query.tables) == 1, \ + "Can only delete from one table at a time." + qn = self.quote_name_unless_alias + result = ['DELETE FROM %s' % qn(self.query.tables[0])] + where, params = self.query.where.as_sql(qn=qn, connection=self.connection) + if where: + result.append('WHERE %s' % where) + return ' '.join(result), tuple(params) + +class SQLUpdateCompiler(SQLCompiler): + def as_sql(self): + """ + Creates the SQL for this query. Returns the SQL string and list of + parameters. + """ + self.pre_sql_setup() + if not self.query.values: + return '', () + table = self.query.tables[0] + qn = self.quote_name_unless_alias + result = ['UPDATE %s' % qn(table)] + result.append('SET') + values, update_params = [], [] + for field, model, val in self.query.values: + if hasattr(val, 'prepare_database_save'): + val = val.prepare_database_save(field) + else: + val = field.get_db_prep_save(val, connection=self.connection) + + # Getting the placeholder for the field. + if hasattr(field, 'get_placeholder'): + placeholder = field.get_placeholder(val, self.connection) + else: + placeholder = '%s' + + if hasattr(val, 'evaluate'): + val = SQLEvaluator(val, self.query, allow_joins=False) + name = field.column + if hasattr(val, 'as_sql'): + sql, params = val.as_sql(qn, self.connection) + values.append('%s = %s' % (qn(name), sql)) + update_params.extend(params) + elif val is not None: + values.append('%s = %s' % (qn(name), placeholder)) + update_params.append(val) + else: + values.append('%s = NULL' % qn(name)) + if not values: + return '', () + result.append(', '.join(values)) + where, params = self.query.where.as_sql(qn=qn, connection=self.connection) + if where: + result.append('WHERE %s' % where) + return ' '.join(result), tuple(update_params + params) + + def execute_sql(self, result_type): + """ + Execute the specified update. Returns the number of rows affected by + the primary update query. The "primary update query" is the first + non-empty query that is executed. Row counts for any subsequent, + related queries are not available. + """ + cursor = super(SQLUpdateCompiler, self).execute_sql(result_type) + rows = cursor.rowcount if cursor else 0 + is_empty = cursor is None + del cursor + for query in self.query.get_related_updates(): + aux_rows = query.get_compiler(self.using).execute_sql(result_type) + if is_empty: + rows = aux_rows + is_empty = False + return rows + + def pre_sql_setup(self): + """ + If the update depends on results from other tables, we need to do some + munging of the "where" conditions to match the format required for + (portable) SQL updates. That is done here. + + Further, if we are going to be running multiple updates, we pull out + the id values to update at this point so that they don't change as a + result of the progressive updates. + """ + self.query.select_related = False + self.query.clear_ordering(True) + super(SQLUpdateCompiler, self).pre_sql_setup() + count = self.query.count_active_tables() + if not self.query.related_updates and count == 1: + return + + # We need to use a sub-select in the where clause to filter on things + # from other tables. + query = self.query.clone(klass=Query) + query.bump_prefix() + query.extra = {} + query.select = [] + query.add_fields([query.get_meta().pk.name]) + # Recheck the count - it is possible that fiddling with the select + # fields above removes tables from the query. Refs #18304. + count = query.count_active_tables() + if not self.query.related_updates and count == 1: + return + + must_pre_select = count > 1 and not self.connection.features.update_can_self_select + + # Now we adjust the current query: reset the where clause and get rid + # of all the tables we don't need (since they're in the sub-select). + self.query.where = self.query.where_class() + if self.query.related_updates or must_pre_select: + # Either we're using the idents in multiple update queries (so + # don't want them to change), or the db backend doesn't support + # selecting from the updating table (e.g. MySQL). + idents = [] + for rows in query.get_compiler(self.using).execute_sql(MULTI): + idents.extend([r[0] for r in rows]) + self.query.add_filter(('pk__in', idents)) + self.query.related_ids = idents + else: + # The fast path. Filters and updates in one query. + self.query.add_filter(('pk__in', query)) + for alias in self.query.tables[1:]: + self.query.alias_refcount[alias] = 0 + +class SQLAggregateCompiler(SQLCompiler): + def as_sql(self, qn=None): + """ + Creates the SQL for this query. Returns the SQL string and list of + parameters. + """ + if qn is None: + qn = self.quote_name_unless_alias + + sql, params = [], [] + for aggregate in self.query.aggregate_select.values(): + agg_sql, agg_params = aggregate.as_sql(qn, self.connection) + sql.append(agg_sql) + params.extend(agg_params) + sql = ', '.join(sql) + params = tuple(params) + + sql = 'SELECT %s FROM (%s) subquery' % (sql, self.query.subquery) + params = params + self.query.sub_params + return sql, params + +class SQLDateCompiler(SQLCompiler): + def results_iter(self): + """ + Returns an iterator over the results from executing this query. + """ + resolve_columns = hasattr(self, 'resolve_columns') + if resolve_columns: + from django.db.models.fields import DateField + fields = [DateField()] + else: + from django.db.backends.util import typecast_date + needs_string_cast = self.connection.features.needs_datetime_string_cast + + offset = len(self.query.extra_select) + for rows in self.execute_sql(MULTI): + for row in rows: + date = row[offset] + if resolve_columns: + date = self.resolve_columns(row, fields)[offset] + elif needs_string_cast: + date = typecast_date(str(date)) + if isinstance(date, datetime.datetime): + date = date.date() + yield date + +class SQLDateTimeCompiler(SQLCompiler): + def results_iter(self): + """ + Returns an iterator over the results from executing this query. + """ + resolve_columns = hasattr(self, 'resolve_columns') + if resolve_columns: + from django.db.models.fields import DateTimeField + fields = [DateTimeField()] + else: + from django.db.backends.util import typecast_timestamp + needs_string_cast = self.connection.features.needs_datetime_string_cast + + offset = len(self.query.extra_select) + for rows in self.execute_sql(MULTI): + for row in rows: + datetime = row[offset] + if resolve_columns: + datetime = self.resolve_columns(row, fields)[offset] + elif needs_string_cast: + datetime = typecast_timestamp(str(datetime)) + # Datetimes are artifically returned in UTC on databases that + # don't support time zone. Restore the zone used in the query. + if settings.USE_TZ: + if datetime is None: + raise ValueError("Database returned an invalid value " + "in QuerySet.datetimes(). Are time zone " + "definitions for your database and pytz installed?") + datetime = datetime.replace(tzinfo=None) + datetime = timezone.make_aware(datetime, self.query.tzinfo) + yield datetime + +def order_modified_iter(cursor, trim, sentinel): + """ + Yields blocks of rows from a cursor. We use this iterator in the special + case when extra output columns have been added to support ordering + requirements. We must trim those extra columns before anything else can use + the results, since they're only needed to make the SQL valid. + """ + for rows in iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)), + sentinel): + yield [r[:-trim] for r in rows] diff --git a/lib/python2.7/site-packages/django/db/models/sql/constants.py b/lib/python2.7/site-packages/django/db/models/sql/constants.py new file mode 100644 index 0000000..904f7b2 --- /dev/null +++ b/lib/python2.7/site-packages/django/db/models/sql/constants.py @@ -0,0 +1,41 @@ +""" +Constants specific to the SQL storage portion of the ORM. +""" + +from collections import namedtuple +import re + +# Valid query types (a set is used for speedy lookups). These are (currently) +# considered SQL-specific; other storage systems may choose to use different +# lookup types. +QUERY_TERMS = set([ + 'exact', 'iexact', 'contains', 'icontains', 'gt', 'gte', 'lt', 'lte', 'in', + 'startswith', 'istartswith', 'endswith', 'iendswith', 'range', 'year', + 'month', 'day', 'week_day', 'hour', 'minute', 'second', 'isnull', 'search', + 'regex', 'iregex', +]) + +# Size of each "chunk" for get_iterator calls. +# Larger values are slightly faster at the expense of more storage space. +GET_ITERATOR_CHUNK_SIZE = 100 + +# Namedtuples for sql.* internal use. + +# Join lists (indexes into the tuples that are values in the alias_map +# dictionary in the Query class). +JoinInfo = namedtuple('JoinInfo', + 'table_name rhs_alias join_type lhs_alias ' + 'join_cols nullable join_field') + +# Pairs of column clauses to select, and (possibly None) field for the clause. +SelectInfo = namedtuple('SelectInfo', 'col field') + +# How many results to expect from a cursor.execute call +MULTI = 'multi' +SINGLE = 'single' + +ORDER_PATTERN = re.compile(r'\?|[-+]?[.\w]+$') +ORDER_DIR = { + 'ASC': ('ASC', 'DESC'), + 'DESC': ('DESC', 'ASC'), +} diff --git a/lib/python2.7/site-packages/django/db/models/sql/datastructures.py b/lib/python2.7/site-packages/django/db/models/sql/datastructures.py new file mode 100644 index 0000000..daaabbe --- /dev/null +++ b/lib/python2.7/site-packages/django/db/models/sql/datastructures.py @@ -0,0 +1,62 @@ +""" +Useful auxilliary data structures for query construction. Not useful outside +the SQL domain. +""" + +class EmptyResultSet(Exception): + pass + +class MultiJoin(Exception): + """ + Used by join construction code to indicate the point at which a + multi-valued join was attempted (if the caller wants to treat that + exceptionally). + """ + def __init__(self, names_pos, path_with_names): + self.level = names_pos + # The path travelled, this includes the path to the multijoin. + self.names_with_path = path_with_names + +class Empty(object): + pass + +class RawValue(object): + def __init__(self, value): + self.value = value + +class Date(object): + """ + Add a date selection column. + """ + def __init__(self, col, lookup_type): + self.col = col + self.lookup_type = lookup_type + + def relabeled_clone(self, change_map): + return self.__class__((change_map.get(self.col[0], self.col[0]), self.col[1])) + + def as_sql(self, qn, connection): + if isinstance(self.col, (list, tuple)): + col = '%s.%s' % tuple([qn(c) for c in self.col]) + else: + col = self.col + return connection.ops.date_trunc_sql(self.lookup_type, col), [] + +class DateTime(object): + """ + Add a datetime selection column. + """ + def __init__(self, col, lookup_type, tzname): + self.col = col + self.lookup_type = lookup_type + self.tzname = tzname + + def relabeled_clone(self, change_map): + return self.__class__((change_map.get(self.col[0], self.col[0]), self.col[1])) + + def as_sql(self, qn, connection): + if isinstance(self.col, (list, tuple)): + col = '%s.%s' % tuple([qn(c) for c in self.col]) + else: + col = self.col + return connection.ops.datetime_trunc_sql(self.lookup_type, col, self.tzname) diff --git a/lib/python2.7/site-packages/django/db/models/sql/expressions.py b/lib/python2.7/site-packages/django/db/models/sql/expressions.py new file mode 100644 index 0000000..31e0899 --- /dev/null +++ b/lib/python2.7/site-packages/django/db/models/sql/expressions.py @@ -0,0 +1,117 @@ +from django.core.exceptions import FieldError +from django.db.models.constants import LOOKUP_SEP +from django.db.models.fields import FieldDoesNotExist +import copy + +class SQLEvaluator(object): + def __init__(self, expression, query, allow_joins=True, reuse=None): + self.expression = expression + self.opts = query.get_meta() + self.reuse = reuse + self.cols = [] + self.expression.prepare(self, query, allow_joins) + + def relabeled_clone(self, change_map): + clone = copy.copy(self) + clone.cols = [] + for node, col in self.cols: + if hasattr(col, 'relabeled_clone'): + clone.cols.append((node, col.relabeled_clone(change_map))) + else: + clone.cols.append((node, + (change_map.get(col[0], col[0]), col[1]))) + return clone + + def get_cols(self): + cols = [] + for node, col in self.cols: + if hasattr(node, 'get_cols'): + cols.extend(node.get_cols()) + elif isinstance(col, tuple): + cols.append(col) + return cols + + def prepare(self): + return self + + def as_sql(self, qn, connection): + return self.expression.evaluate(self, qn, connection) + + ##################################################### + # Vistor methods for initial expression preparation # + ##################################################### + + def prepare_node(self, node, query, allow_joins): + for child in node.children: + if hasattr(child, 'prepare'): + child.prepare(self, query, allow_joins) + + def prepare_leaf(self, node, query, allow_joins): + if not allow_joins and LOOKUP_SEP in node.name: + raise FieldError("Joined field references are not permitted in this query") + + field_list = node.name.split(LOOKUP_SEP) + if node.name in query.aggregates: + self.cols.append((node, query.aggregate_select[node.name])) + else: + try: + field, sources, opts, join_list, path = query.setup_joins( + field_list, query.get_meta(), + query.get_initial_alias(), self.reuse) + targets, _, join_list = query.trim_joins(sources, join_list, path) + if self.reuse is not None: + self.reuse.update(join_list) + for t in targets: + self.cols.append((node, (join_list[-1], t.column))) + except FieldDoesNotExist: + raise FieldError("Cannot resolve keyword %r into field. " + "Choices are: %s" % (self.name, + [f.name for f in self.opts.fields])) + + ################################################## + # Vistor methods for final expression evaluation # + ################################################## + + def evaluate_node(self, node, qn, connection): + expressions = [] + expression_params = [] + for child in node.children: + if hasattr(child, 'evaluate'): + sql, params = child.evaluate(self, qn, connection) + else: + sql, params = '%s', (child,) + + if len(getattr(child, 'children', [])) > 1: + format = '(%s)' + else: + format = '%s' + + if sql: + expressions.append(format % sql) + expression_params.extend(params) + + return connection.ops.combine_expression(node.connector, expressions), expression_params + + def evaluate_leaf(self, node, qn, connection): + col = None + for n, c in self.cols: + if n is node: + col = c + break + if col is None: + raise ValueError("Given node not found") + if hasattr(col, 'as_sql'): + return col.as_sql(qn, connection) + else: + return '%s.%s' % (qn(col[0]), qn(col[1])), [] + + def evaluate_date_modifier_node(self, node, qn, connection): + timedelta = node.children.pop() + sql, params = self.evaluate_node(node, qn, connection) + node.children.append(timedelta) + + if timedelta.days == 0 and timedelta.seconds == 0 and \ + timedelta.microseconds == 0: + return sql, params + + return connection.ops.date_interval_sql(sql, node.connector, timedelta), params diff --git a/lib/python2.7/site-packages/django/db/models/sql/query.py b/lib/python2.7/site-packages/django/db/models/sql/query.py new file mode 100644 index 0000000..7868c19 --- /dev/null +++ b/lib/python2.7/site-packages/django/db/models/sql/query.py @@ -0,0 +1,1922 @@ +""" +Create SQL statements for QuerySets. + +The code in here encapsulates all of the SQL construction so that QuerySets +themselves do not have to (and could be backed by things other than SQL +databases). The abstraction barrier only works one way: this module has to know +all about the internals of models in order to get the information it needs. +""" + +import copy + +from django.utils.datastructures import SortedDict +from django.utils.encoding import force_text +from django.utils.tree import Node +from django.utils import six +from django.db import connections, DEFAULT_DB_ALIAS +from django.db.models.constants import LOOKUP_SEP +from django.db.models.aggregates import refs_aggregate +from django.db.models.expressions import ExpressionNode +from django.db.models.fields import FieldDoesNotExist +from django.db.models.related import PathInfo +from django.db.models.sql import aggregates as base_aggregates_module +from django.db.models.sql.constants import (QUERY_TERMS, ORDER_DIR, SINGLE, + ORDER_PATTERN, JoinInfo, SelectInfo) +from django.db.models.sql.datastructures import EmptyResultSet, Empty, MultiJoin +from django.db.models.sql.expressions import SQLEvaluator +from django.db.models.sql.where import (WhereNode, Constraint, EverythingNode, + ExtraWhere, AND, OR, EmptyWhere) +from django.core.exceptions import FieldError + +__all__ = ['Query', 'RawQuery'] + + +class RawQuery(object): + """ + A single raw SQL query + """ + + def __init__(self, sql, using, params=None): + self.params = params or () + self.sql = sql + self.using = using + self.cursor = None + + # Mirror some properties of a normal query so that + # the compiler can be used to process results. + self.low_mark, self.high_mark = 0, None # Used for offset/limit + self.extra_select = {} + self.aggregate_select = {} + + def clone(self, using): + return RawQuery(self.sql, using, params=self.params) + + def convert_values(self, value, field, connection): + """Convert the database-returned value into a type that is consistent + across database backends. + + By default, this defers to the underlying backend operations, but + it can be overridden by Query classes for specific backends. + """ + return connection.ops.convert_values(value, field) + + def get_columns(self): + if self.cursor is None: + self._execute_query() + converter = connections[self.using].introspection.table_name_converter + return [converter(column_meta[0]) + for column_meta in self.cursor.description] + + def __iter__(self): + # Always execute a new query for a new iterator. + # This could be optimized with a cache at the expense of RAM. + self._execute_query() + if not connections[self.using].features.can_use_chunked_reads: + # If the database can't use chunked reads we need to make sure we + # evaluate the entire query up front. + result = list(self.cursor) + else: + result = self.cursor + return iter(result) + + def __repr__(self): + return "<RawQuery: %r>" % (self.sql % tuple(self.params)) + + def _execute_query(self): + self.cursor = connections[self.using].cursor() + self.cursor.execute(self.sql, self.params) + + +class Query(object): + """ + A single SQL query. + """ + # SQL join types. These are part of the class because their string forms + # vary from database to database and can be customised by a subclass. + INNER = 'INNER JOIN' + LOUTER = 'LEFT OUTER JOIN' + + alias_prefix = 'T' + query_terms = QUERY_TERMS + aggregates_module = base_aggregates_module + + compiler = 'SQLCompiler' + + def __init__(self, model, where=WhereNode): + self.model = model + self.alias_refcount = {} + # alias_map is the most important data structure regarding joins. + # It's used for recording which joins exist in the query and what + # type they are. The key is the alias of the joined table (possibly + # the table name) and the value is JoinInfo from constants.py. + self.alias_map = {} + self.table_map = {} # Maps table names to list of aliases. + self.join_map = {} + self.default_cols = True + self.default_ordering = True + self.standard_ordering = True + self.used_aliases = set() + self.filter_is_sticky = False + self.included_inherited_models = {} + + # SQL-related attributes + # Select and related select clauses as SelectInfo instances. + # The select is used for cases where we want to set up the select + # clause to contain other than default fields (values(), annotate(), + # subqueries...) + self.select = [] + # The related_select_cols is used for columns needed for + # select_related - this is populated in compile stage. + self.related_select_cols = [] + self.tables = [] # Aliases in the order they are created. + self.where = where() + self.where_class = where + self.group_by = None + self.having = where() + self.order_by = [] + self.low_mark, self.high_mark = 0, None # Used for offset/limit + self.distinct = False + self.distinct_fields = [] + self.select_for_update = False + self.select_for_update_nowait = False + self.select_related = False + + # SQL aggregate-related attributes + self.aggregates = SortedDict() # Maps alias -> SQL aggregate function + self.aggregate_select_mask = None + self._aggregate_select_cache = None + + # Arbitrary maximum limit for select_related. Prevents infinite + # recursion. Can be changed by the depth parameter to select_related(). + self.max_depth = 5 + + # These are for extensions. The contents are more or less appended + # verbatim to the appropriate clause. + self.extra = SortedDict() # Maps col_alias -> (col_sql, params). + self.extra_select_mask = None + self._extra_select_cache = None + + self.extra_tables = () + self.extra_order_by = () + + # A tuple that is a set of model field names and either True, if these + # are the fields to defer, or False if these are the only fields to + # load. + self.deferred_loading = (set(), True) + + def __str__(self): + """ + Returns the query as a string of SQL with the parameter values + substituted in (use sql_with_params() to see the unsubstituted string). + + Parameter values won't necessarily be quoted correctly, since that is + done by the database interface at execution time. + """ + sql, params = self.sql_with_params() + return sql % params + + def sql_with_params(self): + """ + Returns the query as an SQL string and the parameters that will be + subsituted into the query. + """ + return self.get_compiler(DEFAULT_DB_ALIAS).as_sql() + + def __deepcopy__(self, memo): + result = self.clone(memo=memo) + memo[id(self)] = result + return result + + def prepare(self): + return self + + def get_compiler(self, using=None, connection=None): + if using is None and connection is None: + raise ValueError("Need either using or connection") + if using: + connection = connections[using] + + # Check that the compiler will be able to execute the query + for alias, aggregate in self.aggregate_select.items(): + connection.ops.check_aggregate_support(aggregate) + + return connection.ops.compiler(self.compiler)(self, connection, using) + + def get_meta(self): + """ + Returns the Options instance (the model._meta) from which to start + processing. Normally, this is self.model._meta, but it can be changed + by subclasses. + """ + return self.model._meta + + def clone(self, klass=None, memo=None, **kwargs): + """ + Creates a copy of the current instance. The 'kwargs' parameter can be + used by clients to update attributes after copying has taken place. + """ + obj = Empty() + obj.__class__ = klass or self.__class__ + obj.model = self.model + obj.alias_refcount = self.alias_refcount.copy() + obj.alias_map = self.alias_map.copy() + obj.table_map = self.table_map.copy() + obj.join_map = self.join_map.copy() + obj.default_cols = self.default_cols + obj.default_ordering = self.default_ordering + obj.standard_ordering = self.standard_ordering + obj.included_inherited_models = self.included_inherited_models.copy() + obj.select = self.select[:] + obj.related_select_cols = [] + obj.tables = self.tables[:] + obj.where = self.where.clone() + obj.where_class = self.where_class + if self.group_by is None: + obj.group_by = None + else: + obj.group_by = self.group_by[:] + obj.having = self.having.clone() + obj.order_by = self.order_by[:] + obj.low_mark, obj.high_mark = self.low_mark, self.high_mark + obj.distinct = self.distinct + obj.distinct_fields = self.distinct_fields[:] + obj.select_for_update = self.select_for_update + obj.select_for_update_nowait = self.select_for_update_nowait + obj.select_related = self.select_related + obj.related_select_cols = [] + obj.aggregates = self.aggregates.copy() + if self.aggregate_select_mask is None: + obj.aggregate_select_mask = None + else: + obj.aggregate_select_mask = self.aggregate_select_mask.copy() + # _aggregate_select_cache cannot be copied, as doing so breaks the + # (necessary) state in which both aggregates and + # _aggregate_select_cache point to the same underlying objects. + # It will get re-populated in the cloned queryset the next time it's + # used. + obj._aggregate_select_cache = None + obj.max_depth = self.max_depth + obj.extra = self.extra.copy() + if self.extra_select_mask is None: + obj.extra_select_mask = None + else: + obj.extra_select_mask = self.extra_select_mask.copy() + if self._extra_select_cache is None: + obj._extra_select_cache = None + else: + obj._extra_select_cache = self._extra_select_cache.copy() + obj.extra_tables = self.extra_tables + obj.extra_order_by = self.extra_order_by + obj.deferred_loading = copy.copy(self.deferred_loading[0]), self.deferred_loading[1] + if self.filter_is_sticky and self.used_aliases: + obj.used_aliases = self.used_aliases.copy() + else: + obj.used_aliases = set() + obj.filter_is_sticky = False + + obj.__dict__.update(kwargs) + if hasattr(obj, '_setup_query'): + obj._setup_query() + return obj + + def convert_values(self, value, field, connection): + """Convert the database-returned value into a type that is consistent + across database backends. + + By default, this defers to the underlying backend operations, but + it can be overridden by Query classes for specific backends. + """ + return connection.ops.convert_values(value, field) + + def resolve_aggregate(self, value, aggregate, connection): + """Resolve the value of aggregates returned by the database to + consistent (and reasonable) types. + + This is required because of the predisposition of certain backends + to return Decimal and long types when they are not needed. + """ + if value is None: + if aggregate.is_ordinal: + return 0 + # Return None as-is + return value + elif aggregate.is_ordinal: + # Any ordinal aggregate (e.g., count) returns an int + return int(value) + elif aggregate.is_computed: + # Any computed aggregate (e.g., avg) returns a float + return float(value) + else: + # Return value depends on the type of the field being processed. + return self.convert_values(value, aggregate.field, connection) + + def get_aggregation(self, using): + """ + Returns the dictionary with the values of the existing aggregations. + """ + if not self.aggregate_select: + return {} + + # If there is a group by clause, aggregating does not add useful + # information but retrieves only the first row. Aggregate + # over the subquery instead. + if self.group_by is not None: + from django.db.models.sql.subqueries import AggregateQuery + query = AggregateQuery(self.model) + + obj = self.clone() + + # Remove any aggregates marked for reduction from the subquery + # and move them to the outer AggregateQuery. + for alias, aggregate in self.aggregate_select.items(): + if aggregate.is_summary: + query.aggregate_select[alias] = aggregate + del obj.aggregate_select[alias] + + try: + query.add_subquery(obj, using) + except EmptyResultSet: + return dict( + (alias, None) + for alias in query.aggregate_select + ) + else: + query = self + self.select = [] + self.default_cols = False + self.extra = {} + self.remove_inherited_models() + + query.clear_ordering(True) + query.clear_limits() + query.select_for_update = False + query.select_related = False + query.related_select_cols = [] + + result = query.get_compiler(using).execute_sql(SINGLE) + if result is None: + result = [None for q in query.aggregate_select.items()] + + return dict([ + (alias, self.resolve_aggregate(val, aggregate, connection=connections[using])) + for (alias, aggregate), val + in zip(query.aggregate_select.items(), result) + ]) + + def get_count(self, using): + """ + Performs a COUNT() query using the current filter constraints. + """ + obj = self.clone() + if len(self.select) > 1 or self.aggregate_select or (self.distinct and self.distinct_fields): + # If a select clause exists, then the query has already started to + # specify the columns that are to be returned. + # In this case, we need to use a subquery to evaluate the count. + from django.db.models.sql.subqueries import AggregateQuery + subquery = obj + subquery.clear_ordering(True) + subquery.clear_limits() + + obj = AggregateQuery(obj.model) + try: + obj.add_subquery(subquery, using=using) + except EmptyResultSet: + # add_subquery evaluates the query, if it's an EmptyResultSet + # then there are can be no results, and therefore there the + # count is obviously 0 + return 0 + + obj.add_count_column() + number = obj.get_aggregation(using=using)[None] + + # Apply offset and limit constraints manually, since using LIMIT/OFFSET + # in SQL (in variants that provide them) doesn't change the COUNT + # output. + number = max(0, number - self.low_mark) + if self.high_mark is not None: + number = min(number, self.high_mark - self.low_mark) + + return number + + def has_results(self, using): + q = self.clone() + q.clear_select_clause() + q.add_extra({'a': 1}, None, None, None, None, None) + q.set_extra_mask(['a']) + q.clear_ordering(True) + q.set_limits(high=1) + compiler = q.get_compiler(using=using) + return bool(compiler.execute_sql(SINGLE)) + + def combine(self, rhs, connector): + """ + Merge the 'rhs' query into the current one (with any 'rhs' effects + being applied *after* (that is, "to the right of") anything in the + current query. 'rhs' is not modified during a call to this function. + + The 'connector' parameter describes how to connect filters from the + 'rhs' query. + """ + assert self.model == rhs.model, \ + "Cannot combine queries on two different base models." + assert self.can_filter(), \ + "Cannot combine queries once a slice has been taken." + assert self.distinct == rhs.distinct, \ + "Cannot combine a unique query with a non-unique query." + assert self.distinct_fields == rhs.distinct_fields, \ + "Cannot combine queries with different distinct fields." + + self.remove_inherited_models() + # Work out how to relabel the rhs aliases, if necessary. + change_map = {} + conjunction = (connector == AND) + + # Determine which existing joins can be reused. When combining the + # query with AND we must recreate all joins for m2m filters. When + # combining with OR we can reuse joins. The reason is that in AND + # case a single row can't fulfill a condition like: + # revrel__col=1 & revrel__col=2 + # But, there might be two different related rows matching this + # condition. In OR case a single True is enough, so single row is + # enough, too. + # + # Note that we will be creating duplicate joins for non-m2m joins in + # the AND case. The results will be correct but this creates too many + # joins. This is something that could be fixed later on. + reuse = set() if conjunction else set(self.tables) + # Base table must be present in the query - this is the same + # table on both sides. + self.get_initial_alias() + # Now, add the joins from rhs query into the new query (skipping base + # table). + for alias in rhs.tables[1:]: + table, _, join_type, lhs, join_cols, nullable, join_field = rhs.alias_map[alias] + promote = (join_type == self.LOUTER) + # If the left side of the join was already relabeled, use the + # updated alias. + lhs = change_map.get(lhs, lhs) + new_alias = self.join( + (lhs, table, join_cols), reuse=reuse, + outer_if_first=not conjunction, nullable=nullable, + join_field=join_field) + if promote: + self.promote_joins([new_alias]) + # We can't reuse the same join again in the query. If we have two + # distinct joins for the same connection in rhs query, then the + # combined query must have two joins, too. + reuse.discard(new_alias) + change_map[alias] = new_alias + if not rhs.alias_refcount[alias]: + # The alias was unused in the rhs query. Unref it so that it + # will be unused in the new query, too. We have to add and + # unref the alias so that join promotion has information of + # the join type for the unused alias. + self.unref_alias(new_alias) + + # So that we don't exclude valid results in an OR query combination, + # all joins exclusive to either the lhs or the rhs must be converted + # to an outer join. RHS joins were already set to outer joins above, + # so check which joins were used only in the lhs query. + if not conjunction: + rhs_used_joins = set(change_map.values()) + to_promote = [alias for alias in self.tables + if alias not in rhs_used_joins] + self.promote_joins(to_promote, True) + + # Now relabel a copy of the rhs where-clause and add it to the current + # one. + if rhs.where: + w = rhs.where.clone() + w.relabel_aliases(change_map) + if not self.where: + # Since 'self' matches everything, add an explicit "include + # everything" where-constraint so that connections between the + # where clauses won't exclude valid results. + self.where.add(EverythingNode(), AND) + elif self.where: + # rhs has an empty where clause. + w = self.where_class() + w.add(EverythingNode(), AND) + else: + w = self.where_class() + self.where.add(w, connector) + + # Selection columns and extra extensions are those provided by 'rhs'. + self.select = [] + for col, field in rhs.select: + if isinstance(col, (list, tuple)): + new_col = change_map.get(col[0], col[0]), col[1] + self.select.append(SelectInfo(new_col, field)) + else: + new_col = col.relabeled_clone(change_map) + self.select.append(SelectInfo(new_col, field)) + + if connector == OR: + # It would be nice to be able to handle this, but the queries don't + # really make sense (or return consistent value sets). Not worth + # the extra complexity when you can write a real query instead. + if self.extra and rhs.extra: + raise ValueError("When merging querysets using 'or', you " + "cannot have extra(select=...) on both sides.") + self.extra.update(rhs.extra) + extra_select_mask = set() + if self.extra_select_mask is not None: + extra_select_mask.update(self.extra_select_mask) + if rhs.extra_select_mask is not None: + extra_select_mask.update(rhs.extra_select_mask) + if extra_select_mask: + self.set_extra_mask(extra_select_mask) + self.extra_tables += rhs.extra_tables + + # Ordering uses the 'rhs' ordering, unless it has none, in which case + # the current ordering is used. + self.order_by = rhs.order_by[:] if rhs.order_by else self.order_by + self.extra_order_by = rhs.extra_order_by or self.extra_order_by + + def deferred_to_data(self, target, callback): + """ + Converts the self.deferred_loading data structure to an alternate data + structure, describing the field that *will* be loaded. This is used to + compute the columns to select from the database and also by the + QuerySet class to work out which fields are being initialised on each + model. Models that have all their fields included aren't mentioned in + the result, only those that have field restrictions in place. + + The "target" parameter is the instance that is populated (in place). + The "callback" is a function that is called whenever a (model, field) + pair need to be added to "target". It accepts three parameters: + "target", and the model and list of fields being added for that model. + """ + field_names, defer = self.deferred_loading + if not field_names: + return + orig_opts = self.get_meta() + seen = {} + must_include = {orig_opts.concrete_model: set([orig_opts.pk])} + for field_name in field_names: + parts = field_name.split(LOOKUP_SEP) + cur_model = self.model + opts = orig_opts + for name in parts[:-1]: + old_model = cur_model + source = opts.get_field_by_name(name)[0] + if is_reverse_o2o(source): + cur_model = source.model + else: + cur_model = source.rel.to + opts = cur_model._meta + # Even if we're "just passing through" this model, we must add + # both the current model's pk and the related reference field + # (if it's not a reverse relation) to the things we select. + if not is_reverse_o2o(source): + must_include[old_model].add(source) + add_to_dict(must_include, cur_model, opts.pk) + field, model, _, _ = opts.get_field_by_name(parts[-1]) + if model is None: + model = cur_model + if not is_reverse_o2o(field): + add_to_dict(seen, model, field) + + if defer: + # We need to load all fields for each model, except those that + # appear in "seen" (for all models that appear in "seen"). The only + # slight complexity here is handling fields that exist on parent + # models. + workset = {} + for model, values in six.iteritems(seen): + for field, m in model._meta.get_fields_with_model(): + if field in values: + continue + add_to_dict(workset, m or model, field) + for model, values in six.iteritems(must_include): + # If we haven't included a model in workset, we don't add the + # corresponding must_include fields for that model, since an + # empty set means "include all fields". That's why there's no + # "else" branch here. + if model in workset: + workset[model].update(values) + for model, values in six.iteritems(workset): + callback(target, model, values) + else: + for model, values in six.iteritems(must_include): + if model in seen: + seen[model].update(values) + else: + # As we've passed through this model, but not explicitly + # included any fields, we have to make sure it's mentioned + # so that only the "must include" fields are pulled in. + seen[model] = values + # Now ensure that every model in the inheritance chain is mentioned + # in the parent list. Again, it must be mentioned to ensure that + # only "must include" fields are pulled in. + for model in orig_opts.get_parent_list(): + if model not in seen: + seen[model] = set() + for model, values in six.iteritems(seen): + callback(target, model, values) + + + def deferred_to_columns_cb(self, target, model, fields): + """ + Callback used by deferred_to_columns(). The "target" parameter should + be a set instance. + """ + table = model._meta.db_table + if table not in target: + target[table] = set() + for field in fields: + target[table].add(field.column) + + + def table_alias(self, table_name, create=False): + """ + Returns a table alias for the given table_name and whether this is a + new alias or not. + + If 'create' is true, a new alias is always created. Otherwise, the + most recently created alias for the table (if one exists) is reused. + """ + current = self.table_map.get(table_name) + if not create and current: + alias = current[0] + self.alias_refcount[alias] += 1 + return alias, False + + # Create a new alias for this table. + if current: + alias = '%s%d' % (self.alias_prefix, len(self.alias_map) + 1) + current.append(alias) + else: + # The first occurence of a table uses the table name directly. + alias = table_name + self.table_map[alias] = [alias] + self.alias_refcount[alias] = 1 + self.tables.append(alias) + return alias, True + + def ref_alias(self, alias): + """ Increases the reference count for this alias. """ + self.alias_refcount[alias] += 1 + + def unref_alias(self, alias, amount=1): + """ Decreases the reference count for this alias. """ + self.alias_refcount[alias] -= amount + + def promote_joins(self, aliases, unconditional=False): + """ + Promotes recursively the join type of given aliases and its children to + an outer join. If 'unconditional' is False, the join is only promoted if + it is nullable or the parent join is an outer join. + + Note about join promotion: When promoting any alias, we make sure all + joins which start from that alias are promoted, too. When adding a join + in join(), we make sure any join added to already existing LOUTER join + is generated as LOUTER. This ensures we don't ever have broken join + chains which contain first a LOUTER join, then an INNER JOIN, that is + this kind of join should never be generated: a LOUTER b INNER c. The + reason for avoiding this type of join chain is that the INNER after + the LOUTER will effectively remove any effect the LOUTER had. + """ + aliases = list(aliases) + while aliases: + alias = aliases.pop(0) + if self.alias_map[alias].join_cols[0][1] is None: + # This is the base table (first FROM entry) - this table + # isn't really joined at all in the query, so we should not + # alter its join type. + continue + parent_alias = self.alias_map[alias].lhs_alias + parent_louter = (parent_alias + and self.alias_map[parent_alias].join_type == self.LOUTER) + already_louter = self.alias_map[alias].join_type == self.LOUTER + if ((unconditional or self.alias_map[alias].nullable + or parent_louter) and not already_louter): + data = self.alias_map[alias]._replace(join_type=self.LOUTER) + self.alias_map[alias] = data + # Join type of 'alias' changed, so re-examine all aliases that + # refer to this one. + aliases.extend( + join for join in self.alias_map.keys() + if (self.alias_map[join].lhs_alias == alias + and join not in aliases)) + + def reset_refcounts(self, to_counts): + """ + This method will reset reference counts for aliases so that they match + the value passed in :param to_counts:. + """ + for alias, cur_refcount in self.alias_refcount.copy().items(): + unref_amount = cur_refcount - to_counts.get(alias, 0) + self.unref_alias(alias, unref_amount) + + def promote_disjunction(self, aliases_before, alias_usage_counts, + num_childs): + """ + This method is to be used for promoting joins in ORed filters. + + The principle for promotion is: any alias which is used (it is in + alias_usage_counts), is not used by every child of the ORed filter, + and isn't pre-existing needs to be promoted to LOUTER join. + """ + for alias, use_count in alias_usage_counts.items(): + if use_count < num_childs and alias not in aliases_before: + self.promote_joins([alias]) + + def change_aliases(self, change_map): + """ + Changes the aliases in change_map (which maps old-alias -> new-alias), + relabelling any references to them in select columns and the where + clause. + """ + assert set(change_map.keys()).intersection(set(change_map.values())) == set() + + def relabel_column(col): + if isinstance(col, (list, tuple)): + old_alias = col[0] + return (change_map.get(old_alias, old_alias), col[1]) + else: + return col.relabeled_clone(change_map) + # 1. Update references in "select" (normal columns plus aliases), + # "group by", "where" and "having". + self.where.relabel_aliases(change_map) + self.having.relabel_aliases(change_map) + if self.group_by: + self.group_by = [relabel_column(col) for col in self.group_by] + self.select = [SelectInfo(relabel_column(s.col), s.field) + for s in self.select] + self.aggregates = SortedDict( + (key, relabel_column(col)) for key, col in self.aggregates.items()) + + # 2. Rename the alias in the internal table/alias datastructures. + for ident, aliases in self.join_map.items(): + del self.join_map[ident] + aliases = tuple([change_map.get(a, a) for a in aliases]) + ident = (change_map.get(ident[0], ident[0]),) + ident[1:] + self.join_map[ident] = aliases + for old_alias, new_alias in six.iteritems(change_map): + alias_data = self.alias_map[old_alias] + alias_data = alias_data._replace(rhs_alias=new_alias) + self.alias_refcount[new_alias] = self.alias_refcount[old_alias] + del self.alias_refcount[old_alias] + self.alias_map[new_alias] = alias_data + del self.alias_map[old_alias] + + table_aliases = self.table_map[alias_data.table_name] + for pos, alias in enumerate(table_aliases): + if alias == old_alias: + table_aliases[pos] = new_alias + break + for pos, alias in enumerate(self.tables): + if alias == old_alias: + self.tables[pos] = new_alias + break + for key, alias in self.included_inherited_models.items(): + if alias in change_map: + self.included_inherited_models[key] = change_map[alias] + + # 3. Update any joins that refer to the old alias. + for alias, data in six.iteritems(self.alias_map): + lhs = data.lhs_alias + if lhs in change_map: + data = data._replace(lhs_alias=change_map[lhs]) + self.alias_map[alias] = data + + def bump_prefix(self, exceptions=()): + """ + Changes the alias prefix to the next letter in the alphabet and + relabels all the aliases. Even tables that previously had no alias will + get an alias after this call (it's mostly used for nested queries and + the outer query will already be using the non-aliased table name). + + Subclasses who create their own prefix should override this method to + produce a similar result (a new prefix and relabelled aliases). + + The 'exceptions' parameter is a container that holds alias names which + should not be changed. + """ + current = ord(self.alias_prefix) + assert current < ord('Z') + prefix = chr(current + 1) + self.alias_prefix = prefix + change_map = SortedDict() + for pos, alias in enumerate(self.tables): + if alias in exceptions: + continue + new_alias = '%s%d' % (prefix, pos) + change_map[alias] = new_alias + self.tables[pos] = new_alias + self.change_aliases(change_map) + + def get_initial_alias(self): + """ + Returns the first alias for this query, after increasing its reference + count. + """ + if self.tables: + alias = self.tables[0] + self.ref_alias(alias) + else: + alias = self.join((None, self.get_meta().db_table, None)) + return alias + + def count_active_tables(self): + """ + Returns the number of tables in this query with a non-zero reference + count. Note that after execution, the reference counts are zeroed, so + tables added in compiler will not be seen by this method. + """ + return len([1 for count in self.alias_refcount.values() if count]) + + def join(self, connection, reuse=None, outer_if_first=False, + nullable=False, join_field=None): + """ + Returns an alias for the join in 'connection', either reusing an + existing alias for that join or creating a new one. 'connection' is a + tuple (lhs, table, join_cols) where 'lhs' is either an existing + table alias or a table name. 'join_cols' is a tuple of tuples containing + columns to join on ((l_id1, r_id1), (l_id2, r_id2)). The join corresponds + to the SQL equivalent of:: + + lhs.l_id1 = table.r_id1 AND lhs.l_id2 = table.r_id2 + + The 'reuse' parameter can be either None which means all joins + (matching the connection) are reusable, or it can be a set containing + the aliases that can be reused. + + If 'outer_if_first' is True and a new join is created, it will have the + LOUTER join type. + + A join is always created as LOUTER if the lhs alias is LOUTER to make + sure we do not generate chains like t1 LOUTER t2 INNER t3. + + If 'nullable' is True, the join can potentially involve NULL values and + is a candidate for promotion (to "left outer") when combining querysets. + + The 'join_field' is the field we are joining along (if any). + """ + lhs, table, join_cols = connection + assert lhs is None or join_field is not None + existing = self.join_map.get(connection, ()) + if reuse is None: + reuse = existing + else: + reuse = [a for a in existing if a in reuse] + for alias in reuse: + if join_field and self.alias_map[alias].join_field != join_field: + # The join_map doesn't contain join_field (mainly because + # fields in Query structs are problematic in pickling), so + # check that the existing join is created using the same + # join_field used for the under work join. + continue + self.ref_alias(alias) + return alias + + # No reuse is possible, so we need a new alias. + alias, _ = self.table_alias(table, True) + if not lhs: + # Not all tables need to be joined to anything. No join type + # means the later columns are ignored. + join_type = None + elif outer_if_first or self.alias_map[lhs].join_type == self.LOUTER: + # We need to use LOUTER join if asked by outer_if_first or if the + # LHS table is left-joined in the query. + join_type = self.LOUTER + else: + join_type = self.INNER + join = JoinInfo(table, alias, join_type, lhs, join_cols or ((None, None),), nullable, + join_field) + self.alias_map[alias] = join + if connection in self.join_map: + self.join_map[connection] += (alias,) + else: + self.join_map[connection] = (alias,) + return alias + + def setup_inherited_models(self): + """ + If the model that is the basis for this QuerySet inherits other models, + we need to ensure that those other models have their tables included in + the query. + + We do this as a separate step so that subclasses know which + tables are going to be active in the query, without needing to compute + all the select columns (this method is called from pre_sql_setup(), + whereas column determination is a later part, and side-effect, of + as_sql()). + """ + opts = self.get_meta() + root_alias = self.tables[0] + seen = {None: root_alias} + + for field, model in opts.get_fields_with_model(): + if model not in seen: + self.join_parent_model(opts, model, root_alias, seen) + self.included_inherited_models = seen + + def join_parent_model(self, opts, model, alias, seen): + """ + Makes sure the given 'model' is joined in the query. If 'model' isn't + a parent of 'opts' or if it is None this method is a no-op. + + The 'alias' is the root alias for starting the join, 'seen' is a dict + of model -> alias of existing joins. It must also contain a mapping + of None -> some alias. This will be returned in the no-op case. + """ + if model in seen: + return seen[model] + chain = opts.get_base_chain(model) + if chain is None: + return alias + curr_opts = opts + for int_model in chain: + if int_model in seen: + return seen[int_model] + # Proxy model have elements in base chain + # with no parents, assign the new options + # object and skip to the next base in that + # case + if not curr_opts.parents[int_model]: + curr_opts = int_model._meta + continue + link_field = curr_opts.get_ancestor_link(int_model) + _, _, _, joins, _ = self.setup_joins( + [link_field.name], curr_opts, alias) + curr_opts = int_model._meta + alias = seen[int_model] = joins[-1] + return alias or seen[None] + + def remove_inherited_models(self): + """ + Undoes the effects of setup_inherited_models(). Should be called + whenever select columns (self.select) are set explicitly. + """ + for key, alias in self.included_inherited_models.items(): + if key: + self.unref_alias(alias) + self.included_inherited_models = {} + + + def add_aggregate(self, aggregate, model, alias, is_summary): + """ + Adds a single aggregate expression to the Query + """ + opts = model._meta + field_list = aggregate.lookup.split(LOOKUP_SEP) + if len(field_list) == 1 and aggregate.lookup in self.aggregates: + # Aggregate is over an annotation + field_name = field_list[0] + col = field_name + source = self.aggregates[field_name] + if not is_summary: + raise FieldError("Cannot compute %s('%s'): '%s' is an aggregate" % ( + aggregate.name, field_name, field_name)) + elif ((len(field_list) > 1) or + (field_list[0] not in [i.name for i in opts.fields]) or + self.group_by is None or + not is_summary): + # If: + # - the field descriptor has more than one part (foo__bar), or + # - the field descriptor is referencing an m2m/m2o field, or + # - this is a reference to a model field (possibly inherited), or + # - this is an annotation over a model field + # then we need to explore the joins that are required. + + field, sources, opts, join_list, path = self.setup_joins( + field_list, opts, self.get_initial_alias()) + + # Process the join chain to see if it can be trimmed + targets, _, join_list = self.trim_joins(sources, join_list, path) + + # If the aggregate references a model or field that requires a join, + # those joins must be LEFT OUTER - empty join rows must be returned + # in order for zeros to be returned for those aggregates. + self.promote_joins(join_list, True) + + col = targets[0].column + source = sources[0] + col = (join_list[-1], col) + else: + # The simplest cases. No joins required - + # just reference the provided column alias. + field_name = field_list[0] + source = opts.get_field(field_name) + col = field_name + + # Add the aggregate to the query + aggregate.add_to_query(self, alias, col=col, source=source, is_summary=is_summary) + + def build_filter(self, filter_expr, branch_negated=False, current_negated=False, + can_reuse=None): + """ + Builds a WhereNode for a single filter clause, but doesn't add it + to this Query. Query.add_q() will then add this filter to the where + or having Node. + + The 'branch_negated' tells us if the current branch contains any + negations. This will be used to determine if subqueries are needed. + + The 'current_negated' is used to determine if the current filter is + negated or not and this will be used to determine if IS NULL filtering + is needed. + + The difference between current_netageted and branch_negated is that + branch_negated is set on first negation, but current_negated is + flipped for each negation. + + Note that add_filter will not do any negating itself, that is done + upper in the code by add_q(). + + The 'can_reuse' is a set of reusable joins for multijoins. + + The method will create a filter clause that can be added to the current + query. However, if the filter isn't added to the query then the caller + is responsible for unreffing the joins used. + """ + arg, value = filter_expr + parts = arg.split(LOOKUP_SEP) + if not parts: + raise FieldError("Cannot parse keyword query %r" % arg) + + # Work out the lookup type and remove it from the end of 'parts', + # if necessary. + lookup_type = 'exact' # Default lookup type + num_parts = len(parts) + if (len(parts) > 1 and parts[-1] in self.query_terms + and arg not in self.aggregates): + # Traverse the lookup query to distinguish related fields from + # lookup types. + lookup_model = self.model + for counter, field_name in enumerate(parts): + try: + lookup_field = lookup_model._meta.get_field(field_name) + except FieldDoesNotExist: + # Not a field. Bail out. + lookup_type = parts.pop() + break + # Unless we're at the end of the list of lookups, let's attempt + # to continue traversing relations. + if (counter + 1) < num_parts: + try: + lookup_model = lookup_field.rel.to + except AttributeError: + # Not a related field. Bail out. + lookup_type = parts.pop() + break + + clause = self.where_class() + # Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all + # uses of None as a query value. + if value is None: + if lookup_type != 'exact': + raise ValueError("Cannot use None as a query value") + lookup_type = 'isnull' + value = True + elif callable(value): + value = value() + elif isinstance(value, ExpressionNode): + # If value is a query expression, evaluate it + value = SQLEvaluator(value, self, reuse=can_reuse) + # For Oracle '' is equivalent to null. The check needs to be done + # at this stage because join promotion can't be done at compiler + # stage. Using DEFAULT_DB_ALIAS isn't nice, but it is the best we + # can do here. Similar thing is done in is_nullable(), too. + if (connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls and + lookup_type == 'exact' and value == ''): + value = True + lookup_type = 'isnull' + + for alias, aggregate in self.aggregates.items(): + if alias in (parts[0], LOOKUP_SEP.join(parts)): + clause.add((aggregate, lookup_type, value), AND) + return clause + + opts = self.get_meta() + alias = self.get_initial_alias() + allow_many = not branch_negated + + try: + field, sources, opts, join_list, path = self.setup_joins( + parts, opts, alias, can_reuse, allow_many, + allow_explicit_fk=True) + if can_reuse is not None: + can_reuse.update(join_list) + except MultiJoin as e: + return self.split_exclude(filter_expr, LOOKUP_SEP.join(parts[:e.level]), + can_reuse, e.names_with_path) + + if (lookup_type == 'isnull' and value is True and not current_negated and + len(join_list) > 1): + # If the comparison is against NULL, we may need to use some left + # outer joins when creating the join chain. This is only done when + # needed, as it's less efficient at the database level. + self.promote_joins(join_list) + + # Process the join list to see if we can remove any inner joins from + # the far end (fewer tables in a query is better). Note that join + # promotion must happen before join trimming to have the join type + # information available when reusing joins. + targets, alias, join_list = self.trim_joins(sources, join_list, path) + + if hasattr(field, 'get_lookup_constraint'): + constraint = field.get_lookup_constraint(self.where_class, alias, targets, sources, + lookup_type, value) + else: + constraint = (Constraint(alias, targets[0].column, field), lookup_type, value) + clause.add(constraint, AND) + if current_negated and (lookup_type != 'isnull' or value is False): + self.promote_joins(join_list) + if (lookup_type != 'isnull' and ( + self.is_nullable(targets[0]) or + self.alias_map[join_list[-1]].join_type == self.LOUTER)): + # The condition added here will be SQL like this: + # NOT (col IS NOT NULL), where the first NOT is added in + # upper layers of code. The reason for addition is that if col + # is null, then col != someval will result in SQL "unknown" + # which isn't the same as in Python. The Python None handling + # is wanted, and it can be gotten by + # (col IS NULL OR col != someval) + # <=> + # NOT (col IS NOT NULL AND col = someval). + clause.add((Constraint(alias, targets[0].column, None), 'isnull', False), AND) + return clause + + def add_filter(self, filter_clause): + self.where.add(self.build_filter(filter_clause), 'AND') + + def need_having(self, obj): + """ + Returns whether or not all elements of this q_object need to be put + together in the HAVING clause. + """ + if not isinstance(obj, Node): + return (refs_aggregate(obj[0].split(LOOKUP_SEP), self.aggregates) + or (hasattr(obj[1], 'contains_aggregate') + and obj[1].contains_aggregate(self.aggregates))) + return any(self.need_having(c) for c in obj.children) + + def split_having_parts(self, q_object, negated=False): + """ + Returns a list of q_objects which need to go into the having clause + instead of the where clause. Removes the splitted out nodes from the + given q_object. Note that the q_object is altered, so cloning it is + needed. + """ + having_parts = [] + for c in q_object.children[:]: + # When constucting the having nodes we need to take care to + # preserve the negation status from the upper parts of the tree + if isinstance(c, Node): + # For each negated child, flip the in_negated flag. + in_negated = c.negated ^ negated + if c.connector == OR and self.need_having(c): + # A subtree starting from OR clause must go into having in + # whole if any part of that tree references an aggregate. + q_object.children.remove(c) + having_parts.append(c) + c.negated = in_negated + else: + having_parts.extend( + self.split_having_parts(c, in_negated)[1]) + elif self.need_having(c): + q_object.children.remove(c) + new_q = self.where_class(children=[c], negated=negated) + having_parts.append(new_q) + return q_object, having_parts + + def add_q(self, q_object): + """ + A preprocessor for the internal _add_q(). Responsible for + splitting the given q_object into where and having parts and + setting up some internal variables. + """ + if not self.need_having(q_object): + where_part, having_parts = q_object, [] + else: + where_part, having_parts = self.split_having_parts( + q_object.clone(), q_object.negated) + used_aliases = self.used_aliases + clause = self._add_q(where_part, used_aliases) + self.where.add(clause, AND) + for hp in having_parts: + clause = self._add_q(hp, used_aliases) + self.having.add(clause, AND) + if self.filter_is_sticky: + self.used_aliases = used_aliases + + def _add_q(self, q_object, used_aliases, branch_negated=False, + current_negated=False): + """ + Adds a Q-object to the current filter. + """ + connector = q_object.connector + current_negated = current_negated ^ q_object.negated + branch_negated = branch_negated or q_object.negated + target_clause = self.where_class(connector=connector, + negated=q_object.negated) + # Treat case NOT (a AND b) like case ((NOT a) OR (NOT b)) for join + # promotion. See ticket #21748. + effective_connector = connector + if current_negated: + effective_connector = OR if effective_connector == AND else AND + if effective_connector == OR: + alias_usage_counts = dict() + aliases_before = set(self.tables) + for child in q_object.children: + if effective_connector == OR: + refcounts_before = self.alias_refcount.copy() + if isinstance(child, Node): + child_clause = self._add_q( + child, used_aliases, branch_negated, + current_negated) + else: + child_clause = self.build_filter( + child, can_reuse=used_aliases, branch_negated=branch_negated, + current_negated=current_negated) + target_clause.add(child_clause, connector) + if effective_connector == OR: + used = alias_diff(refcounts_before, self.alias_refcount) + for alias in used: + alias_usage_counts[alias] = alias_usage_counts.get(alias, 0) + 1 + if effective_connector == OR: + self.promote_disjunction(aliases_before, alias_usage_counts, + len(q_object.children)) + return target_clause + + def names_to_path(self, names, opts, allow_many, allow_explicit_fk): + """ + Walks the names path and turns them PathInfo tuples. Note that a + single name in 'names' can generate multiple PathInfos (m2m for + example). + + 'names' is the path of names to travle, 'opts' is the model Options we + start the name resolving from, 'allow_many' and 'allow_explicit_fk' + are as for setup_joins(). + + Returns a list of PathInfo tuples. In addition returns the final field + (the last used join field), and target (which is a field guaranteed to + contain the same value as the final field). + """ + path, names_with_path = [], [] + for pos, name in enumerate(names): + cur_names_with_path = (name, []) + if name == 'pk': + name = opts.pk.name + try: + field, model, direct, m2m = opts.get_field_by_name(name) + except FieldDoesNotExist: + for f in opts.fields: + if allow_explicit_fk and name == f.attname: + # XXX: A hack to allow foo_id to work in values() for + # backwards compatibility purposes. If we dropped that + # feature, this could be removed. + field, model, direct, m2m = opts.get_field_by_name(f.name) + break + else: + available = opts.get_all_field_names() + list(self.aggregate_select) + raise FieldError("Cannot resolve keyword %r into field. " + "Choices are: %s" % (name, ", ".join(available))) + # Check if we need any joins for concrete inheritance cases (the + # field lives in parent, but we are currently in one of its + # children) + if model: + # The field lives on a base class of the current model. + # Skip the chain of proxy to the concrete proxied model + proxied_model = opts.concrete_model + + for int_model in opts.get_base_chain(model): + if int_model is proxied_model: + opts = int_model._meta + else: + final_field = opts.parents[int_model] + targets = (final_field.rel.get_related_field(),) + opts = int_model._meta + path.append(PathInfo(final_field.model._meta, opts, targets, final_field, False, True)) + cur_names_with_path[1].append(PathInfo(final_field.model._meta, opts, targets, final_field, False, True)) + if hasattr(field, 'get_path_info'): + pathinfos = field.get_path_info() + if not allow_many: + for inner_pos, p in enumerate(pathinfos): + if p.m2m: + cur_names_with_path[1].extend(pathinfos[0:inner_pos + 1]) + names_with_path.append(cur_names_with_path) + raise MultiJoin(pos + 1, names_with_path) + last = pathinfos[-1] + path.extend(pathinfos) + final_field = last.join_field + opts = last.to_opts + targets = last.target_fields + cur_names_with_path[1].extend(pathinfos) + names_with_path.append(cur_names_with_path) + else: + # Local non-relational field. + final_field = field + targets = (field,) + break + + if pos != len(names) - 1: + if pos == len(names) - 2: + raise FieldError( + "Join on field %r not permitted. Did you misspell %r for " + "the lookup type?" % (name, names[pos + 1])) + else: + raise FieldError("Join on field %r not permitted." % name) + return path, final_field, targets + + def setup_joins(self, names, opts, alias, can_reuse=None, allow_many=True, + allow_explicit_fk=False, outer_if_first=False): + """ + Compute the necessary table joins for the passage through the fields + given in 'names'. 'opts' is the Options class for the current model + (which gives the table we are starting from), 'alias' is the alias for + the table to start the joining from. + + The 'can_reuse' defines the reverse foreign key joins we can reuse. It + can be None in which case all joins are reusable or a set of aliases + that can be reused. Note that non-reverse foreign keys are always + reusable when using setup_joins(). + + If 'allow_many' is False, then any reverse foreign key seen will + generate a MultiJoin exception. + + The 'allow_explicit_fk' controls if field.attname is allowed in the + lookups. + + Returns the final field involved in the joins, the target field (used + for any 'where' constraint), the final 'opts' value, the joins and the + field path travelled to generate the joins. + + The target field is the field containing the concrete value. Final + field can be something different, for example foreign key pointing to + that value. Final field is needed for example in some value + conversions (convert 'obj' in fk__id=obj to pk val using the foreign + key field for example). + """ + joins = [alias] + # First, generate the path for the names + path, final_field, targets = self.names_to_path( + names, opts, allow_many, allow_explicit_fk) + # Then, add the path to the query's joins. Note that we can't trim + # joins at this stage - we will need the information about join type + # of the trimmed joins. + for pos, join in enumerate(path): + opts = join.to_opts + if join.direct: + nullable = self.is_nullable(join.join_field) + else: + nullable = True + connection = alias, opts.db_table, join.join_field.get_joining_columns() + reuse = can_reuse if join.m2m else None + alias = self.join( + connection, reuse=reuse, nullable=nullable, join_field=join.join_field, + outer_if_first=outer_if_first) + joins.append(alias) + if hasattr(final_field, 'field'): + final_field = final_field.field + return final_field, targets, opts, joins, path + + def trim_joins(self, targets, joins, path): + """ + The 'target' parameter is the final field being joined to, 'joins' + is the full list of join aliases. The 'path' contain the PathInfos + used to create the joins. + + Returns the final target field and table alias and the new active + joins. + + We will always trim any direct join if we have the target column + available already in the previous table. Reverse joins can't be + trimmed as we don't know if there is anything on the other side of + the join. + """ + for pos, info in enumerate(reversed(path)): + if len(joins) == 1 or not info.direct: + break + join_targets = set(t.column for t in info.join_field.foreign_related_fields) + cur_targets = set(t.column for t in targets) + if not cur_targets.issubset(join_targets): + break + targets = tuple(r[0] for r in info.join_field.related_fields if r[1].column in cur_targets) + self.unref_alias(joins.pop()) + return targets, joins[-1], joins + + def split_exclude(self, filter_expr, prefix, can_reuse, names_with_path): + """ + When doing an exclude against any kind of N-to-many relation, we need + to use a subquery. This method constructs the nested query, given the + original exclude filter (filter_expr) and the portion up to the first + N-to-many relation field. + + As an example we could have original filter ~Q(child__name='foo'). + We would get here with filter_expr = child__name, prefix = child and + can_reuse is a set of joins usable for filters in the original query. + + We will turn this into equivalent of: + WHERE NOT (pk IN (SELECT parent_id FROM thetable + WHERE name = 'foo' AND parent_id IS NOT NULL)) + + It might be worth it to consider using WHERE NOT EXISTS as that has + saner null handling, and is easier for the backend's optimizer to + handle. + """ + # Generate the inner query. + query = Query(self.model) + query.where.add(query.build_filter(filter_expr), AND) + query.bump_prefix() + query.clear_ordering(True) + # Try to have as simple as possible subquery -> trim leading joins from + # the subquery. + trimmed_prefix, contains_louter = query.trim_start(names_with_path) + query.remove_inherited_models() + + # Add extra check to make sure the selected field will not be null + # since we are adding a IN <subquery> clause. This prevents the + # database from tripping over IN (...,NULL,...) selects and returning + # nothing + if self.is_nullable(query.select[0].field): + alias, col = query.select[0].col + query.where.add((Constraint(alias, col, query.select[0].field), 'isnull', False), AND) + + condition = self.build_filter( + ('%s__in' % trimmed_prefix, query), + current_negated=True, branch_negated=True, can_reuse=can_reuse) + if contains_louter: + or_null_condition = self.build_filter( + ('%s__isnull' % trimmed_prefix, True), + current_negated=True, branch_negated=True, can_reuse=can_reuse) + condition.add(or_null_condition, OR) + # Note that the end result will be: + # (outercol NOT IN innerq AND outercol IS NOT NULL) OR outercol IS NULL. + # This might look crazy but due to how IN works, this seems to be + # correct. If the IS NOT NULL check is removed then outercol NOT + # IN will return UNKNOWN. If the IS NULL check is removed, then if + # outercol IS NULL we will not match the row. + return condition + + def set_empty(self): + self.where = EmptyWhere() + self.having = EmptyWhere() + + def is_empty(self): + return isinstance(self.where, EmptyWhere) or isinstance(self.having, EmptyWhere) + + def set_limits(self, low=None, high=None): + """ + Adjusts the limits on the rows retrieved. We use low/high to set these, + as it makes it more Pythonic to read and write. When the SQL query is + created, they are converted to the appropriate offset and limit values. + + Any limits passed in here are applied relative to the existing + constraints. So low is added to the current low value and both will be + clamped to any existing high value. + """ + if high is not None: + if self.high_mark is not None: + self.high_mark = min(self.high_mark, self.low_mark + high) + else: + self.high_mark = self.low_mark + high + if low is not None: + if self.high_mark is not None: + self.low_mark = min(self.high_mark, self.low_mark + low) + else: + self.low_mark = self.low_mark + low + + def clear_limits(self): + """ + Clears any existing limits. + """ + self.low_mark, self.high_mark = 0, None + + def can_filter(self): + """ + Returns True if adding filters to this instance is still possible. + + Typically, this means no limits or offsets have been put on the results. + """ + return not self.low_mark and self.high_mark is None + + def clear_select_clause(self): + """ + Removes all fields from SELECT clause. + """ + self.select = [] + self.default_cols = False + self.select_related = False + self.set_extra_mask(()) + self.set_aggregate_mask(()) + + def clear_select_fields(self): + """ + Clears the list of fields to select (but not extra_select columns). + Some queryset types completely replace any existing list of select + columns. + """ + self.select = [] + + def add_distinct_fields(self, *field_names): + """ + Adds and resolves the given fields to the query's "distinct on" clause. + """ + self.distinct_fields = field_names + self.distinct = True + + def add_fields(self, field_names, allow_m2m=True): + """ + Adds the given (model) fields to the select set. The field names are + added in the order specified. + """ + alias = self.get_initial_alias() + opts = self.get_meta() + + try: + for name in field_names: + field, targets, u2, joins, path = self.setup_joins( + name.split(LOOKUP_SEP), opts, alias, None, allow_m2m, + allow_explicit_fk=True, outer_if_first=True) + + # Trim last join if possible + targets, final_alias, remaining_joins = self.trim_joins(targets, joins[-2:], path) + joins = joins[:-2] + remaining_joins + + self.promote_joins(joins[1:]) + for target in targets: + self.select.append(SelectInfo((final_alias, target.column), target)) + except MultiJoin: + raise FieldError("Invalid field name: '%s'" % name) + except FieldError: + if LOOKUP_SEP in name: + # For lookups spanning over relationships, show the error + # from the model on which the lookup failed. + raise + else: + names = sorted(opts.get_all_field_names() + list(self.extra) + + list(self.aggregate_select)) + raise FieldError("Cannot resolve keyword %r into field. " + "Choices are: %s" % (name, ", ".join(names))) + self.remove_inherited_models() + + def add_ordering(self, *ordering): + """ + Adds items from the 'ordering' sequence to the query's "order by" + clause. These items are either field names (not column names) -- + possibly with a direction prefix ('-' or '?') -- or ordinals, + corresponding to column positions in the 'select' list. + + If 'ordering' is empty, all ordering is cleared from the query. + """ + errors = [] + for item in ordering: + if not ORDER_PATTERN.match(item): + errors.append(item) + if errors: + raise FieldError('Invalid order_by arguments: %s' % errors) + if ordering: + self.order_by.extend(ordering) + else: + self.default_ordering = False + + def clear_ordering(self, force_empty): + """ + Removes any ordering settings. If 'force_empty' is True, there will be + no ordering in the resulting query (not even the model's default). + """ + self.order_by = [] + self.extra_order_by = () + if force_empty: + self.default_ordering = False + + def set_group_by(self): + """ + Expands the GROUP BY clause required by the query. + + This will usually be the set of all non-aggregate fields in the + return data. If the database backend supports grouping by the + primary key, and the query would be equivalent, the optimization + will be made automatically. + """ + self.group_by = [] + + for col, _ in self.select: + self.group_by.append(col) + + def add_count_column(self): + """ + Converts the query to do count(...) or count(distinct(pk)) in order to + get its size. + """ + if not self.distinct: + if not self.select: + count = self.aggregates_module.Count('*', is_summary=True) + else: + assert len(self.select) == 1, \ + "Cannot add count col with multiple cols in 'select': %r" % self.select + count = self.aggregates_module.Count(self.select[0].col) + else: + opts = self.get_meta() + if not self.select: + count = self.aggregates_module.Count( + (self.join((None, opts.db_table, None)), opts.pk.column), + is_summary=True, distinct=True) + else: + # Because of SQL portability issues, multi-column, distinct + # counts need a sub-query -- see get_count() for details. + assert len(self.select) == 1, \ + "Cannot add count col with multiple cols in 'select'." + + count = self.aggregates_module.Count(self.select[0].col, distinct=True) + # Distinct handling is done in Count(), so don't do it at this + # level. + self.distinct = False + + # Set only aggregate to be the count column. + # Clear out the select cache to reflect the new unmasked aggregates. + self.aggregates = {None: count} + self.set_aggregate_mask(None) + self.group_by = None + + def add_select_related(self, fields): + """ + Sets up the select_related data structure so that we only select + certain related models (as opposed to all models, when + self.select_related=True). + """ + field_dict = {} + for field in fields: + d = field_dict + for part in field.split(LOOKUP_SEP): + d = d.setdefault(part, {}) + self.select_related = field_dict + self.related_select_cols = [] + + def add_extra(self, select, select_params, where, params, tables, order_by): + """ + Adds data to the various extra_* attributes for user-created additions + to the query. + """ + if select: + # We need to pair any placeholder markers in the 'select' + # dictionary with their parameters in 'select_params' so that + # subsequent updates to the select dictionary also adjust the + # parameters appropriately. + select_pairs = SortedDict() + if select_params: + param_iter = iter(select_params) + else: + param_iter = iter([]) + for name, entry in select.items(): + entry = force_text(entry) + entry_params = [] + pos = entry.find("%s") + while pos != -1: + entry_params.append(next(param_iter)) + pos = entry.find("%s", pos + 2) + select_pairs[name] = (entry, entry_params) + # This is order preserving, since self.extra_select is a SortedDict. + self.extra.update(select_pairs) + if where or params: + self.where.add(ExtraWhere(where, params), AND) + if tables: + self.extra_tables += tuple(tables) + if order_by: + self.extra_order_by = order_by + + def clear_deferred_loading(self): + """ + Remove any fields from the deferred loading set. + """ + self.deferred_loading = (set(), True) + + def add_deferred_loading(self, field_names): + """ + Add the given list of model field names to the set of fields to + exclude from loading from the database when automatic column selection + is done. The new field names are added to any existing field names that + are deferred (or removed from any existing field names that are marked + as the only ones for immediate loading). + """ + # Fields on related models are stored in the literal double-underscore + # format, so that we can use a set datastructure. We do the foo__bar + # splitting and handling when computing the SQL colum names (as part of + # get_columns()). + existing, defer = self.deferred_loading + if defer: + # Add to existing deferred names. + self.deferred_loading = existing.union(field_names), True + else: + # Remove names from the set of any existing "immediate load" names. + self.deferred_loading = existing.difference(field_names), False + + def add_immediate_loading(self, field_names): + """ + Add the given list of model field names to the set of fields to + retrieve when the SQL is executed ("immediate loading" fields). The + field names replace any existing immediate loading field names. If + there are field names already specified for deferred loading, those + names are removed from the new field_names before storing the new names + for immediate loading. (That is, immediate loading overrides any + existing immediate values, but respects existing deferrals.) + """ + existing, defer = self.deferred_loading + field_names = set(field_names) + if 'pk' in field_names: + field_names.remove('pk') + field_names.add(self.get_meta().pk.name) + + if defer: + # Remove any existing deferred names from the current set before + # setting the new names. + self.deferred_loading = field_names.difference(existing), False + else: + # Replace any existing "immediate load" field names. + self.deferred_loading = field_names, False + + def get_loaded_field_names(self): + """ + If any fields are marked to be deferred, returns a dictionary mapping + models to a set of names in those fields that will be loaded. If a + model is not in the returned dictionary, none of it's fields are + deferred. + + If no fields are marked for deferral, returns an empty dictionary. + """ + # We cache this because we call this function multiple times + # (compiler.fill_related_selections, query.iterator) + try: + return self._loaded_field_names_cache + except AttributeError: + collection = {} + self.deferred_to_data(collection, self.get_loaded_field_names_cb) + self._loaded_field_names_cache = collection + return collection + + def get_loaded_field_names_cb(self, target, model, fields): + """ + Callback used by get_deferred_field_names(). + """ + target[model] = set([f.name for f in fields]) + + def set_aggregate_mask(self, names): + "Set the mask of aggregates that will actually be returned by the SELECT" + if names is None: + self.aggregate_select_mask = None + else: + self.aggregate_select_mask = set(names) + self._aggregate_select_cache = None + + def set_extra_mask(self, names): + """ + Set the mask of extra select items that will be returned by SELECT, + we don't actually remove them from the Query since they might be used + later + """ + if names is None: + self.extra_select_mask = None + else: + self.extra_select_mask = set(names) + self._extra_select_cache = None + + def _aggregate_select(self): + """The SortedDict of aggregate columns that are not masked, and should + be used in the SELECT clause. + + This result is cached for optimization purposes. + """ + if self._aggregate_select_cache is not None: + return self._aggregate_select_cache + elif self.aggregate_select_mask is not None: + self._aggregate_select_cache = SortedDict([ + (k,v) for k,v in self.aggregates.items() + if k in self.aggregate_select_mask + ]) + return self._aggregate_select_cache + else: + return self.aggregates + aggregate_select = property(_aggregate_select) + + def _extra_select(self): + if self._extra_select_cache is not None: + return self._extra_select_cache + elif self.extra_select_mask is not None: + self._extra_select_cache = SortedDict([ + (k,v) for k,v in self.extra.items() + if k in self.extra_select_mask + ]) + return self._extra_select_cache + else: + return self.extra + extra_select = property(_extra_select) + + def trim_start(self, names_with_path): + """ + Trims joins from the start of the join path. The candidates for trim + are the PathInfos in names_with_path structure that are m2m joins. + + Also sets the select column so the start matches the join. + + This method is meant to be used for generating the subquery joins & + cols in split_exclude(). + + Returns a lookup usable for doing outerq.filter(lookup=self). Returns + also if the joins in the prefix contain a LEFT OUTER join. + _""" + all_paths = [] + for _, paths in names_with_path: + all_paths.extend(paths) + contains_louter = False + for pos, path in enumerate(all_paths): + if path.m2m: + break + if self.alias_map[self.tables[pos + 1]].join_type == self.LOUTER: + contains_louter = True + self.unref_alias(self.tables[pos]) + # The path.join_field is a Rel, lets get the other side's field + join_field = path.join_field.field + # Build the filter prefix. + trimmed_prefix = [] + paths_in_prefix = pos + for name, path in names_with_path: + if paths_in_prefix - len(path) < 0: + break + trimmed_prefix.append(name) + paths_in_prefix -= len(path) + trimmed_prefix.append( + join_field.foreign_related_fields[0].name) + trimmed_prefix = LOOKUP_SEP.join(trimmed_prefix) + # Lets still see if we can trim the first join from the inner query + # (that is, self). We can't do this for LEFT JOINs because we would + # miss those rows that have nothing on the outer side. + if self.alias_map[self.tables[pos + 1]].join_type != self.LOUTER: + select_fields = [r[0] for r in join_field.related_fields] + select_alias = self.tables[pos + 1] + self.unref_alias(self.tables[pos]) + extra_restriction = join_field.get_extra_restriction( + self.where_class, None, self.tables[pos + 1]) + if extra_restriction: + self.where.add(extra_restriction, AND) + else: + # TODO: It might be possible to trim more joins from the start of the + # inner query if it happens to have a longer join chain containing the + # values in select_fields. Lets punt this one for now. + select_fields = [r[1] for r in join_field.related_fields] + select_alias = self.tables[pos] + self.select = [SelectInfo((select_alias, f.column), f) for f in select_fields] + return trimmed_prefix, contains_louter + + def is_nullable(self, field): + """ + A helper to check if the given field should be treated as nullable. + + Some backends treat '' as null and Django treats such fields as + nullable for those backends. In such situations field.null can be + False even if we should treat the field as nullable. + """ + # We need to use DEFAULT_DB_ALIAS here, as QuerySet does not have + # (nor should it have) knowledge of which connection is going to be + # used. The proper fix would be to defer all decisions where + # is_nullable() is needed to the compiler stage, but that is not easy + # to do currently. + if ((connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls) + and field.empty_strings_allowed): + return True + else: + return field.null + +def get_order_dir(field, default='ASC'): + """ + Returns the field name and direction for an order specification. For + example, '-foo' is returned as ('foo', 'DESC'). + + The 'default' param is used to indicate which way no prefix (or a '+' + prefix) should sort. The '-' prefix always sorts the opposite way. + """ + dirn = ORDER_DIR[default] + if field[0] == '-': + return field[1:], dirn[1] + return field, dirn[0] + + +def add_to_dict(data, key, value): + """ + A helper function to add "value" to the set of values for "key", whether or + not "key" already exists. + """ + if key in data: + data[key].add(value) + else: + data[key] = set([value]) + +def is_reverse_o2o(field): + """ + A little helper to check if the given field is reverse-o2o. The field is + expected to be some sort of relation field or related object. + """ + return not hasattr(field, 'rel') and field.field.unique + +def alias_diff(refcounts_before, refcounts_after): + """ + Given the before and after copies of refcounts works out which aliases + have been added to the after copy. + """ + # Use -1 as default value so that any join that is created, then trimmed + # is seen as added. + return set(t for t in refcounts_after + if refcounts_after[t] > refcounts_before.get(t, -1)) diff --git a/lib/python2.7/site-packages/django/db/models/sql/subqueries.py b/lib/python2.7/site-packages/django/db/models/sql/subqueries.py new file mode 100644 index 0000000..6dc0005 --- /dev/null +++ b/lib/python2.7/site-packages/django/db/models/sql/subqueries.py @@ -0,0 +1,297 @@ +""" +Query subclasses which provide extra functionality beyond simple data retrieval. +""" + +from django.conf import settings +from django.core.exceptions import FieldError +from django.db import connections +from django.db.models.constants import LOOKUP_SEP +from django.db.models.fields import DateField, DateTimeField, FieldDoesNotExist +from django.db.models.sql.constants import * +from django.db.models.sql.datastructures import Date, DateTime +from django.db.models.sql.query import Query +from django.db.models.sql.where import AND, Constraint +from django.utils.functional import Promise +from django.utils.encoding import force_text +from django.utils import six +from django.utils import timezone + + +__all__ = ['DeleteQuery', 'UpdateQuery', 'InsertQuery', 'DateQuery', + 'DateTimeQuery', 'AggregateQuery'] + +class DeleteQuery(Query): + """ + Delete queries are done through this class, since they are more constrained + than general queries. + """ + + compiler = 'SQLDeleteCompiler' + + def do_query(self, table, where, using): + self.tables = [table] + self.where = where + self.get_compiler(using).execute_sql(None) + + def delete_batch(self, pk_list, using, field=None): + """ + Set up and execute delete queries for all the objects in pk_list. + + More than one physical query may be executed if there are a + lot of values in pk_list. + """ + if not field: + field = self.get_meta().pk + for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE): + where = self.where_class() + where.add((Constraint(None, field.column, field), 'in', + pk_list[offset:offset + GET_ITERATOR_CHUNK_SIZE]), AND) + self.do_query(self.get_meta().db_table, where, using=using) + + def delete_qs(self, query, using): + """ + Delete the queryset in one SQL query (if possible). For simple queries + this is done by copying the query.query.where to self.query, for + complex queries by using subquery. + """ + innerq = query.query + # Make sure the inner query has at least one table in use. + innerq.get_initial_alias() + # The same for our new query. + self.get_initial_alias() + innerq_used_tables = [t for t in innerq.tables + if innerq.alias_refcount[t]] + if ((not innerq_used_tables or innerq_used_tables == self.tables) + and not len(innerq.having)): + # There is only the base table in use in the query, and there are + # no aggregate filtering going on. + self.where = innerq.where + else: + pk = query.model._meta.pk + if not connections[using].features.update_can_self_select: + # We can't do the delete using subquery. + values = list(query.values_list('pk', flat=True)) + if not values: + return + self.delete_batch(values, using) + return + else: + innerq.clear_select_clause() + innerq.select = [SelectInfo((self.get_initial_alias(), pk.column), None)] + values = innerq + where = self.where_class() + where.add((Constraint(None, pk.column, pk), 'in', values), AND) + self.where = where + self.get_compiler(using).execute_sql(None) + + +class UpdateQuery(Query): + """ + Represents an "update" SQL query. + """ + + compiler = 'SQLUpdateCompiler' + + def __init__(self, *args, **kwargs): + super(UpdateQuery, self).__init__(*args, **kwargs) + self._setup_query() + + def _setup_query(self): + """ + Runs on initialization and after cloning. Any attributes that would + normally be set in __init__ should go in here, instead, so that they + are also set up after a clone() call. + """ + self.values = [] + self.related_ids = None + if not hasattr(self, 'related_updates'): + self.related_updates = {} + + def clone(self, klass=None, **kwargs): + return super(UpdateQuery, self).clone(klass, + related_updates=self.related_updates.copy(), **kwargs) + + def update_batch(self, pk_list, values, using): + pk_field = self.get_meta().pk + self.add_update_values(values) + for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE): + self.where = self.where_class() + self.where.add((Constraint(None, pk_field.column, pk_field), 'in', + pk_list[offset:offset + GET_ITERATOR_CHUNK_SIZE]), + AND) + self.get_compiler(using).execute_sql(None) + + def add_update_values(self, values): + """ + Convert a dictionary of field name to value mappings into an update + query. This is the entry point for the public update() method on + querysets. + """ + values_seq = [] + for name, val in six.iteritems(values): + field, model, direct, m2m = self.get_meta().get_field_by_name(name) + if not direct or m2m: + raise FieldError('Cannot update model field %r (only non-relations and foreign keys permitted).' % field) + if model: + self.add_related_update(model, field, val) + continue + values_seq.append((field, model, val)) + return self.add_update_fields(values_seq) + + def add_update_fields(self, values_seq): + """ + Turn a sequence of (field, model, value) triples into an update query. + Used by add_update_values() as well as the "fast" update path when + saving models. + """ + # Check that no Promise object passes to the query. Refs #10498. + values_seq = [(value[0], value[1], force_text(value[2])) + if isinstance(value[2], Promise) else value + for value in values_seq] + self.values.extend(values_seq) + + def add_related_update(self, model, field, value): + """ + Adds (name, value) to an update query for an ancestor model. + + Updates are coalesced so that we only run one update query per ancestor. + """ + try: + self.related_updates[model].append((field, None, value)) + except KeyError: + self.related_updates[model] = [(field, None, value)] + + def get_related_updates(self): + """ + Returns a list of query objects: one for each update required to an + ancestor model. Each query will have the same filtering conditions as + the current query but will only update a single table. + """ + if not self.related_updates: + return [] + result = [] + for model, values in six.iteritems(self.related_updates): + query = UpdateQuery(model) + query.values = values + if self.related_ids is not None: + query.add_filter(('pk__in', self.related_ids)) + result.append(query) + return result + +class InsertQuery(Query): + compiler = 'SQLInsertCompiler' + + def __init__(self, *args, **kwargs): + super(InsertQuery, self).__init__(*args, **kwargs) + self.fields = [] + self.objs = [] + + def clone(self, klass=None, **kwargs): + extras = { + 'fields': self.fields[:], + 'objs': self.objs[:], + 'raw': self.raw, + } + extras.update(kwargs) + return super(InsertQuery, self).clone(klass, **extras) + + def insert_values(self, fields, objs, raw=False): + """ + Set up the insert query from the 'insert_values' dictionary. The + dictionary gives the model field names and their target values. + + If 'raw_values' is True, the values in the 'insert_values' dictionary + are inserted directly into the query, rather than passed as SQL + parameters. This provides a way to insert NULL and DEFAULT keywords + into the query, for example. + """ + self.fields = fields + # Check that no Promise object reaches the DB. Refs #10498. + for field in fields: + for obj in objs: + value = getattr(obj, field.attname) + if isinstance(value, Promise): + setattr(obj, field.attname, force_text(value)) + self.objs = objs + self.raw = raw + +class DateQuery(Query): + """ + A DateQuery is a normal query, except that it specifically selects a single + date field. This requires some special handling when converting the results + back to Python objects, so we put it in a separate class. + """ + + compiler = 'SQLDateCompiler' + + def add_select(self, field_name, lookup_type, order='ASC'): + """ + Converts the query into an extraction query. + """ + try: + result = self.setup_joins( + field_name.split(LOOKUP_SEP), + self.get_meta(), + self.get_initial_alias(), + ) + except FieldError: + raise FieldDoesNotExist("%s has no field named '%s'" % ( + self.get_meta().object_name, field_name + )) + field = result[0] + self._check_field(field) # overridden in DateTimeQuery + alias = result[3][-1] + select = self._get_select((alias, field.column), lookup_type) + self.clear_select_clause() + self.select = [SelectInfo(select, None)] + self.distinct = True + self.order_by = [1] if order == 'ASC' else [-1] + + if field.null: + self.add_filter(("%s__isnull" % field_name, False)) + + def _check_field(self, field): + assert isinstance(field, DateField), \ + "%r isn't a DateField." % field.name + if settings.USE_TZ: + assert not isinstance(field, DateTimeField), \ + "%r is a DateTimeField, not a DateField." % field.name + + def _get_select(self, col, lookup_type): + return Date(col, lookup_type) + +class DateTimeQuery(DateQuery): + """ + A DateTimeQuery is like a DateQuery but for a datetime field. If time zone + support is active, the tzinfo attribute contains the time zone to use for + converting the values before truncating them. Otherwise it's set to None. + """ + + compiler = 'SQLDateTimeCompiler' + + def clone(self, klass=None, memo=None, **kwargs): + if 'tzinfo' not in kwargs and hasattr(self, 'tzinfo'): + kwargs['tzinfo'] = self.tzinfo + return super(DateTimeQuery, self).clone(klass, memo, **kwargs) + + def _check_field(self, field): + assert isinstance(field, DateTimeField), \ + "%r isn't a DateTimeField." % field.name + + def _get_select(self, col, lookup_type): + if self.tzinfo is None: + tzname = None + else: + tzname = timezone._get_timezone_name(self.tzinfo) + return DateTime(col, lookup_type, tzname) + +class AggregateQuery(Query): + """ + An AggregateQuery takes another query as a parameter to the FROM + clause and only selects the elements in the provided list. + """ + + compiler = 'SQLAggregateCompiler' + + def add_subquery(self, query, using): + self.subquery, self.sub_params = query.get_compiler(using).as_sql(with_col_aliases=True) diff --git a/lib/python2.7/site-packages/django/db/models/sql/where.py b/lib/python2.7/site-packages/django/db/models/sql/where.py new file mode 100644 index 0000000..2a342d4 --- /dev/null +++ b/lib/python2.7/site-packages/django/db/models/sql/where.py @@ -0,0 +1,419 @@ +""" +Code to manage the creation and SQL rendering of 'where' constraints. +""" + +from __future__ import absolute_import + +import datetime +from itertools import repeat + +from django.conf import settings +from django.db.models.fields import DateTimeField, Field +from django.db.models.sql.datastructures import EmptyResultSet, Empty +from django.db.models.sql.aggregates import Aggregate +from django.utils.itercompat import is_iterator +from django.utils.six.moves import xrange +from django.utils import timezone +from django.utils import tree + +# Connection types +AND = 'AND' +OR = 'OR' + +class EmptyShortCircuit(Exception): + """ + Internal exception used to indicate that a "matches nothing" node should be + added to the where-clause. + """ + pass + +class WhereNode(tree.Node): + """ + Used to represent the SQL where-clause. + + The class is tied to the Query class that created it (in order to create + the correct SQL). + + A child is usually a tuple of: + (Constraint(alias, targetcol, field), lookup_type, value) + where value can be either raw Python value, or Query, ExpressionNode or + something else knowing how to turn itself into SQL. + + However, a child could also be any class with as_sql() and either + relabeled_clone() method or relabel_aliases() and clone() methods. The + second alternative should be used if the alias is not the only mutable + variable. + """ + default = AND + + def _prepare_data(self, data): + """ + Prepare data for addition to the tree. If the data is a list or tuple, + it is expected to be of the form (obj, lookup_type, value), where obj + is a Constraint object, and is then slightly munged before being + stored (to avoid storing any reference to field objects). Otherwise, + the 'data' is stored unchanged and can be any class with an 'as_sql()' + method. + """ + if not isinstance(data, (list, tuple)): + return data + obj, lookup_type, value = data + if is_iterator(value): + # Consume any generators immediately, so that we can determine + # emptiness and transform any non-empty values correctly. + value = list(value) + + # The "value_annotation" parameter is used to pass auxilliary information + # about the value(s) to the query construction. Specifically, datetime + # and empty values need special handling. Other types could be used + # here in the future (using Python types is suggested for consistency). + if (isinstance(value, datetime.datetime) + or (isinstance(obj.field, DateTimeField) and lookup_type != 'isnull')): + value_annotation = datetime.datetime + elif hasattr(value, 'value_annotation'): + value_annotation = value.value_annotation + else: + value_annotation = bool(value) + + if hasattr(obj, "prepare"): + value = obj.prepare(lookup_type, value) + return (obj, lookup_type, value_annotation, value) + + def as_sql(self, qn, connection): + """ + Returns the SQL version of the where clause and the value to be + substituted in. Returns '', [] if this node matches everything, + None, [] if this node is empty, and raises EmptyResultSet if this + node can't match anything. + """ + # Note that the logic here is made slightly more complex than + # necessary because there are two kind of empty nodes: Nodes + # containing 0 children, and nodes that are known to match everything. + # A match-everything node is different than empty node (which also + # technically matches everything) for backwards compatibility reasons. + # Refs #5261. + result = [] + result_params = [] + everything_childs, nothing_childs = 0, 0 + non_empty_childs = len(self.children) + + for child in self.children: + try: + if hasattr(child, 'as_sql'): + sql, params = child.as_sql(qn=qn, connection=connection) + else: + # A leaf node in the tree. + sql, params = self.make_atom(child, qn, connection) + except EmptyResultSet: + nothing_childs += 1 + else: + if sql: + result.append(sql) + result_params.extend(params) + else: + if sql is None: + # Skip empty childs totally. + non_empty_childs -= 1 + continue + everything_childs += 1 + # Check if this node matches nothing or everything. + # First check the amount of full nodes and empty nodes + # to make this node empty/full. + if self.connector == AND: + full_needed, empty_needed = non_empty_childs, 1 + else: + full_needed, empty_needed = 1, non_empty_childs + # Now, check if this node is full/empty using the + # counts. + if empty_needed - nothing_childs <= 0: + if self.negated: + return '', [] + else: + raise EmptyResultSet + if full_needed - everything_childs <= 0: + if self.negated: + raise EmptyResultSet + else: + return '', [] + + if non_empty_childs == 0: + # All the child nodes were empty, so this one is empty, too. + return None, [] + conn = ' %s ' % self.connector + sql_string = conn.join(result) + if sql_string: + if self.negated: + # Some backends (Oracle at least) need parentheses + # around the inner SQL in the negated case, even if the + # inner SQL contains just a single expression. + sql_string = 'NOT (%s)' % sql_string + elif len(result) > 1: + sql_string = '(%s)' % sql_string + return sql_string, result_params + + def get_cols(self): + cols = [] + for child in self.children: + if hasattr(child, 'get_cols'): + cols.extend(child.get_cols()) + else: + if isinstance(child[0], Constraint): + cols.append((child[0].alias, child[0].col)) + if hasattr(child[3], 'get_cols'): + cols.extend(child[3].get_cols()) + return cols + + def make_atom(self, child, qn, connection): + """ + Turn a tuple (Constraint(table_alias, column_name, db_type), + lookup_type, value_annotation, params) into valid SQL. + + The first item of the tuple may also be an Aggregate. + + Returns the string for the SQL fragment and the parameters to use for + it. + """ + lvalue, lookup_type, value_annotation, params_or_value = child + field_internal_type = lvalue.field.get_internal_type() if lvalue.field else None + + if isinstance(lvalue, Constraint): + try: + lvalue, params = lvalue.process(lookup_type, params_or_value, connection) + except EmptyShortCircuit: + raise EmptyResultSet + elif isinstance(lvalue, Aggregate): + params = lvalue.field.get_db_prep_lookup(lookup_type, params_or_value, connection) + else: + raise TypeError("'make_atom' expects a Constraint or an Aggregate " + "as the first item of its 'child' argument.") + + if isinstance(lvalue, tuple): + # A direct database column lookup. + field_sql, field_params = self.sql_for_columns(lvalue, qn, connection, field_internal_type), [] + else: + # A smart object with an as_sql() method. + field_sql, field_params = lvalue.as_sql(qn, connection) + + is_datetime_field = value_annotation is datetime.datetime + cast_sql = connection.ops.datetime_cast_sql() if is_datetime_field else '%s' + + if hasattr(params, 'as_sql'): + extra, params = params.as_sql(qn, connection) + cast_sql = '' + else: + extra = '' + + params = field_params + params + + if (len(params) == 1 and params[0] == '' and lookup_type == 'exact' + and connection.features.interprets_empty_strings_as_nulls): + lookup_type = 'isnull' + value_annotation = True + + if lookup_type in connection.operators: + format = "%s %%s %%s" % (connection.ops.lookup_cast(lookup_type),) + return (format % (field_sql, + connection.operators[lookup_type] % cast_sql, + extra), params) + + if lookup_type == 'in': + if not value_annotation: + raise EmptyResultSet + if extra: + return ('%s IN %s' % (field_sql, extra), params) + max_in_list_size = connection.ops.max_in_list_size() + if max_in_list_size and len(params) > max_in_list_size: + # Break up the params list into an OR of manageable chunks. + in_clause_elements = ['('] + for offset in xrange(0, len(params), max_in_list_size): + if offset > 0: + in_clause_elements.append(' OR ') + in_clause_elements.append('%s IN (' % field_sql) + group_size = min(len(params) - offset, max_in_list_size) + param_group = ', '.join(repeat('%s', group_size)) + in_clause_elements.append(param_group) + in_clause_elements.append(')') + in_clause_elements.append(')') + return ''.join(in_clause_elements), params + else: + return ('%s IN (%s)' % (field_sql, + ', '.join(repeat('%s', len(params)))), + params) + elif lookup_type in ('range', 'year'): + return ('%s BETWEEN %%s and %%s' % field_sql, params) + elif is_datetime_field and lookup_type in ('month', 'day', 'week_day', + 'hour', 'minute', 'second'): + tzname = timezone.get_current_timezone_name() if settings.USE_TZ else None + sql, tz_params = connection.ops.datetime_extract_sql(lookup_type, field_sql, tzname) + return ('%s = %%s' % sql, tz_params + params) + elif lookup_type in ('month', 'day', 'week_day'): + return ('%s = %%s' + % connection.ops.date_extract_sql(lookup_type, field_sql), params) + elif lookup_type == 'isnull': + assert value_annotation in (True, False), "Invalid value_annotation for isnull" + return ('%s IS %sNULL' % (field_sql, ('' if value_annotation else 'NOT ')), ()) + elif lookup_type == 'search': + return (connection.ops.fulltext_search_sql(field_sql), params) + elif lookup_type in ('regex', 'iregex'): + return connection.ops.regex_lookup(lookup_type) % (field_sql, cast_sql), params + + raise TypeError('Invalid lookup_type: %r' % lookup_type) + + def sql_for_columns(self, data, qn, connection, internal_type=None): + """ + Returns the SQL fragment used for the left-hand side of a column + constraint (for example, the "T1.foo" portion in the clause + "WHERE ... T1.foo = 6") and a list of parameters. + """ + table_alias, name, db_type = data + if table_alias: + lhs = '%s.%s' % (qn(table_alias), qn(name)) + else: + lhs = qn(name) + return connection.ops.field_cast_sql(db_type, internal_type) % lhs + + def relabel_aliases(self, change_map): + """ + Relabels the alias values of any children. 'change_map' is a dictionary + mapping old (current) alias values to the new values. + """ + for pos, child in enumerate(self.children): + if hasattr(child, 'relabel_aliases'): + # For example another WhereNode + child.relabel_aliases(change_map) + elif isinstance(child, (list, tuple)): + # tuple starting with Constraint + child = (child[0].relabeled_clone(change_map),) + child[1:] + if hasattr(child[3], 'relabeled_clone'): + child = (child[0], child[1], child[2]) + ( + child[3].relabeled_clone(change_map),) + self.children[pos] = child + + def clone(self): + """ + Creates a clone of the tree. Must only be called on root nodes (nodes + with empty subtree_parents). Childs must be either (Contraint, lookup, + value) tuples, or objects supporting .clone(). + """ + clone = self.__class__._new_instance( + children=[], connector=self.connector, negated=self.negated) + for child in self.children: + if hasattr(child, 'clone'): + clone.children.append(child.clone()) + else: + clone.children.append(child) + return clone + +class EmptyWhere(WhereNode): + + def add(self, data, connector): + return + + def as_sql(self, qn=None, connection=None): + raise EmptyResultSet + +class EverythingNode(object): + """ + A node that matches everything. + """ + + def as_sql(self, qn=None, connection=None): + return '', [] + + +class NothingNode(object): + """ + A node that matches nothing. + """ + def as_sql(self, qn=None, connection=None): + raise EmptyResultSet + + +class ExtraWhere(object): + def __init__(self, sqls, params): + self.sqls = sqls + self.params = params + + def as_sql(self, qn=None, connection=None): + sqls = ["(%s)" % sql for sql in self.sqls] + return " AND ".join(sqls), list(self.params or ()) + + +class Constraint(object): + """ + An object that can be passed to WhereNode.add() and knows how to + pre-process itself prior to including in the WhereNode. + """ + def __init__(self, alias, col, field): + self.alias, self.col, self.field = alias, col, field + + def prepare(self, lookup_type, value): + if self.field: + return self.field.get_prep_lookup(lookup_type, value) + return value + + def process(self, lookup_type, value, connection): + """ + Returns a tuple of data suitable for inclusion in a WhereNode + instance. + """ + # Because of circular imports, we need to import this here. + from django.db.models.base import ObjectDoesNotExist + try: + if self.field: + params = self.field.get_db_prep_lookup(lookup_type, value, + connection=connection, prepared=True) + db_type = self.field.db_type(connection=connection) + else: + # This branch is used at times when we add a comparison to NULL + # (we don't really want to waste time looking up the associated + # field object at the calling location). + params = Field().get_db_prep_lookup(lookup_type, value, + connection=connection, prepared=True) + db_type = None + except ObjectDoesNotExist: + raise EmptyShortCircuit + + return (self.alias, self.col, db_type), params + + def relabeled_clone(self, change_map): + if self.alias not in change_map: + return self + else: + new = Empty() + new.__class__ = self.__class__ + new.alias, new.col, new.field = change_map[self.alias], self.col, self.field + return new + +class SubqueryConstraint(object): + def __init__(self, alias, columns, targets, query_object): + self.alias = alias + self.columns = columns + self.targets = targets + self.query_object = query_object + + def as_sql(self, qn, connection): + query = self.query_object + + # QuerySet was sent + if hasattr(query, 'values'): + if query._db and connection.alias != query._db: + raise ValueError("Can't do subqueries with queries on different DBs.") + # Do not override already existing values. + if not hasattr(query, 'field_names'): + query = query.values(*self.targets) + else: + query = query._clone() + query = query.query + query.clear_ordering(True) + + query_compiler = query.get_compiler(connection=connection) + return query_compiler.as_subquery_condition(self.alias, self.columns, qn) + + def relabel_aliases(self, change_map): + self.alias = change_map.get(self.alias, self.alias) + + def clone(self): + return self.__class__( + self.alias, self.columns, self.targets, + self.query_object) |