Merge branch 'master' of github.com:MongoEngine/mongoengine into fix_baselist_marked_changed_bug
This commit is contained in:
@@ -23,7 +23,7 @@ __all__ = (list(document.__all__) + list(fields.__all__) +
|
||||
list(signals.__all__) + list(errors.__all__))
|
||||
|
||||
|
||||
VERSION = (0, 15, 3)
|
||||
VERSION = (0, 16, 3)
|
||||
|
||||
|
||||
def get_version():
|
||||
|
||||
@@ -3,10 +3,10 @@ from mongoengine.errors import NotRegistered
|
||||
__all__ = ('UPDATE_OPERATORS', 'get_document', '_document_registry')
|
||||
|
||||
|
||||
UPDATE_OPERATORS = set(['set', 'unset', 'inc', 'dec', 'mul',
|
||||
'pop', 'push', 'push_all', 'pull',
|
||||
'pull_all', 'add_to_set', 'set_on_insert',
|
||||
'min', 'max', 'rename'])
|
||||
UPDATE_OPERATORS = {'set', 'unset', 'inc', 'dec', 'mul',
|
||||
'pop', 'push', 'push_all', 'pull',
|
||||
'pull_all', 'add_to_set', 'set_on_insert',
|
||||
'min', 'max', 'rename'}
|
||||
|
||||
|
||||
_document_registry = {}
|
||||
@@ -19,7 +19,7 @@ def get_document(name):
|
||||
# Possible old style name
|
||||
single_end = name.split('.')[-1]
|
||||
compound_end = '.%s' % single_end
|
||||
possible_match = [k for k in _document_registry.keys()
|
||||
possible_match = [k for k in _document_registry
|
||||
if k.endswith(compound_end) or k == single_end]
|
||||
if len(possible_match) == 1:
|
||||
doc = _document_registry.get(possible_match.pop(), None)
|
||||
|
||||
@@ -35,10 +35,9 @@ class BaseDict(dict):
|
||||
_name = None
|
||||
|
||||
def __init__(self, dict_items, instance, name):
|
||||
Document = _import_class('Document')
|
||||
EmbeddedDocument = _import_class('EmbeddedDocument')
|
||||
BaseDocument = _import_class('BaseDocument')
|
||||
|
||||
if isinstance(instance, (Document, EmbeddedDocument)):
|
||||
if isinstance(instance, BaseDocument):
|
||||
self._instance = weakref.proxy(instance)
|
||||
self._name = name
|
||||
super(BaseDict, self).__init__(dict_items)
|
||||
@@ -56,11 +55,11 @@ class BaseDict(dict):
|
||||
EmbeddedDocument = _import_class('EmbeddedDocument')
|
||||
if isinstance(value, EmbeddedDocument) and value._instance is None:
|
||||
value._instance = self._instance
|
||||
elif not isinstance(value, BaseDict) and isinstance(value, dict):
|
||||
elif isinstance(value, dict) and not isinstance(value, BaseDict):
|
||||
value = BaseDict(value, None, '%s.%s' % (self._name, key))
|
||||
super(BaseDict, self).__setitem__(key, value)
|
||||
value._instance = self._instance
|
||||
elif not isinstance(value, BaseList) and isinstance(value, list):
|
||||
elif isinstance(value, list) and not isinstance(value, BaseList):
|
||||
value = BaseList(value, None, '%s.%s' % (self._name, key))
|
||||
super(BaseDict, self).__setitem__(key, value)
|
||||
value._instance = self._instance
|
||||
@@ -100,10 +99,9 @@ class BaseList(list):
|
||||
_name = None
|
||||
|
||||
def __init__(self, list_items, instance, name):
|
||||
Document = _import_class('Document')
|
||||
EmbeddedDocument = _import_class('EmbeddedDocument')
|
||||
BaseDocument = _import_class('BaseDocument')
|
||||
|
||||
if isinstance(instance, (Document, EmbeddedDocument)):
|
||||
if isinstance(instance, BaseDocument):
|
||||
self._instance = weakref.proxy(instance)
|
||||
self._name = name
|
||||
super(BaseList, self).__init__(list_items)
|
||||
@@ -119,12 +117,12 @@ class BaseList(list):
|
||||
EmbeddedDocument = _import_class('EmbeddedDocument')
|
||||
if isinstance(value, EmbeddedDocument) and value._instance is None:
|
||||
value._instance = self._instance
|
||||
elif not isinstance(value, BaseDict) and isinstance(value, dict):
|
||||
elif isinstance(value, dict) and not isinstance(value, BaseDict):
|
||||
# Replace dict by BaseDict
|
||||
value = BaseDict(value, None, '%s.%s' % (self._name, key))
|
||||
super(BaseList, self).__setitem__(key, value)
|
||||
value._instance = self._instance
|
||||
elif not isinstance(value, BaseList) and isinstance(value, list):
|
||||
elif isinstance(value, list) and not isinstance(value, BaseList):
|
||||
# Replace list by BaseList
|
||||
value = BaseList(value, None, '%s.%s' % (self._name, key))
|
||||
super(BaseList, self).__setitem__(key, value)
|
||||
@@ -218,6 +216,9 @@ class EmbeddedDocumentList(BaseList):
|
||||
Filters the list by only including embedded documents with the
|
||||
given keyword arguments.
|
||||
|
||||
This method only supports simple comparison (e.g: .filter(name='John Doe'))
|
||||
and does not support operators like __gte, __lte, __icontains like queryset.filter does
|
||||
|
||||
:param kwargs: The keyword arguments corresponding to the fields to
|
||||
filter on. *Multiple arguments are treated as if they are ANDed
|
||||
together.*
|
||||
@@ -358,7 +359,7 @@ class EmbeddedDocumentList(BaseList):
|
||||
|
||||
class StrictDict(object):
|
||||
__slots__ = ()
|
||||
_special_fields = set(['get', 'pop', 'iteritems', 'items', 'keys', 'create'])
|
||||
_special_fields = {'get', 'pop', 'iteritems', 'items', 'keys', 'create'}
|
||||
_classes = {}
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
|
||||
@@ -1,11 +1,8 @@
|
||||
import copy
|
||||
import numbers
|
||||
from collections import Hashable
|
||||
from functools import partial
|
||||
|
||||
from bson import ObjectId, json_util
|
||||
from bson.dbref import DBRef
|
||||
from bson.son import SON
|
||||
from bson import DBRef, ObjectId, SON, json_util
|
||||
import pymongo
|
||||
import six
|
||||
|
||||
@@ -19,6 +16,7 @@ from mongoengine.base.fields import ComplexBaseField
|
||||
from mongoengine.common import _import_class
|
||||
from mongoengine.errors import (FieldDoesNotExist, InvalidDocumentError,
|
||||
LookUpError, OperationError, ValidationError)
|
||||
from mongoengine.python_support import Hashable
|
||||
|
||||
__all__ = ('BaseDocument', 'NON_FIELD_ERRORS')
|
||||
|
||||
@@ -302,7 +300,7 @@ class BaseDocument(object):
|
||||
data['_cls'] = self._class_name
|
||||
|
||||
# only root fields ['test1.a', 'test2'] => ['test1', 'test2']
|
||||
root_fields = set([f.split('.')[0] for f in fields])
|
||||
root_fields = {f.split('.')[0] for f in fields}
|
||||
|
||||
for field_name in self:
|
||||
if root_fields and field_name not in root_fields:
|
||||
@@ -404,7 +402,15 @@ class BaseDocument(object):
|
||||
|
||||
@classmethod
|
||||
def from_json(cls, json_data, created=False):
|
||||
"""Converts json data to an unsaved document instance"""
|
||||
"""Converts json data to a Document instance
|
||||
|
||||
:param json_data: The json data to load into the Document
|
||||
:param created: If True, the document will be considered as a brand new document
|
||||
If False and an id is provided, it will consider that the data being
|
||||
loaded corresponds to what's already in the database (This has an impact of subsequent call to .save())
|
||||
If False and no id is provided, it will consider the data as a new document
|
||||
(default ``False``)
|
||||
"""
|
||||
return cls._from_son(json_util.loads(json_data), created=created)
|
||||
|
||||
def __expand_dynamic_values(self, name, value):
|
||||
@@ -495,7 +501,13 @@ class BaseDocument(object):
|
||||
|
||||
self._changed_fields = []
|
||||
|
||||
def _nestable_types_changed_fields(self, changed_fields, key, data, inspected):
|
||||
def _nestable_types_changed_fields(self, changed_fields, base_key, data):
|
||||
"""Inspect nested data for changed fields
|
||||
|
||||
:param changed_fields: Previously collected changed fields
|
||||
:param base_key: The base key that must be used to prepend changes to this data
|
||||
:param data: data to inspect for changes
|
||||
"""
|
||||
# Loop list / dict fields as they contain documents
|
||||
# Determine the iterator to use
|
||||
if not hasattr(data, 'items'):
|
||||
@@ -503,68 +515,60 @@ class BaseDocument(object):
|
||||
else:
|
||||
iterator = data.iteritems()
|
||||
|
||||
for index, value in iterator:
|
||||
list_key = '%s%s.' % (key, index)
|
||||
for index_or_key, value in iterator:
|
||||
item_key = '%s%s.' % (base_key, index_or_key)
|
||||
# don't check anything lower if this key is already marked
|
||||
# as changed.
|
||||
if list_key[:-1] in changed_fields:
|
||||
if item_key[:-1] in changed_fields:
|
||||
continue
|
||||
|
||||
if hasattr(value, '_get_changed_fields'):
|
||||
changed = value._get_changed_fields(inspected)
|
||||
changed_fields += ['%s%s' % (list_key, k)
|
||||
for k in changed if k]
|
||||
changed = value._get_changed_fields()
|
||||
changed_fields += ['%s%s' % (item_key, k) for k in changed if k]
|
||||
elif isinstance(value, (list, tuple, dict)):
|
||||
self._nestable_types_changed_fields(
|
||||
changed_fields, list_key, value, inspected)
|
||||
changed_fields, item_key, value)
|
||||
|
||||
def _get_changed_fields(self, inspected=None):
|
||||
def _get_changed_fields(self):
|
||||
"""Return a list of all fields that have explicitly been changed.
|
||||
"""
|
||||
EmbeddedDocument = _import_class('EmbeddedDocument')
|
||||
DynamicEmbeddedDocument = _import_class('DynamicEmbeddedDocument')
|
||||
ReferenceField = _import_class('ReferenceField')
|
||||
GenericReferenceField = _import_class('GenericReferenceField')
|
||||
SortedListField = _import_class('SortedListField')
|
||||
|
||||
changed_fields = []
|
||||
changed_fields += getattr(self, '_changed_fields', [])
|
||||
|
||||
inspected = inspected or set()
|
||||
if hasattr(self, 'id') and isinstance(self.id, Hashable):
|
||||
if self.id in inspected:
|
||||
return changed_fields
|
||||
inspected.add(self.id)
|
||||
|
||||
for field_name in self._fields_ordered:
|
||||
db_field_name = self._db_field_map.get(field_name, field_name)
|
||||
key = '%s.' % db_field_name
|
||||
data = self._data.get(field_name, None)
|
||||
field = self._fields.get(field_name)
|
||||
|
||||
if hasattr(data, 'id'):
|
||||
if data.id in inspected:
|
||||
continue
|
||||
if isinstance(field, ReferenceField):
|
||||
if db_field_name in changed_fields:
|
||||
# Whole field already marked as changed, no need to go further
|
||||
continue
|
||||
elif (
|
||||
isinstance(data, (EmbeddedDocument, DynamicEmbeddedDocument)) and
|
||||
db_field_name not in changed_fields
|
||||
):
|
||||
|
||||
if isinstance(field, ReferenceField): # Don't follow referenced documents
|
||||
continue
|
||||
|
||||
if isinstance(data, EmbeddedDocument):
|
||||
# Find all embedded fields that have been changed
|
||||
changed = data._get_changed_fields(inspected)
|
||||
changed = data._get_changed_fields()
|
||||
changed_fields += ['%s%s' % (key, k) for k in changed if k]
|
||||
elif (isinstance(data, (list, tuple, dict)) and
|
||||
db_field_name not in changed_fields):
|
||||
elif isinstance(data, (list, tuple, dict)):
|
||||
if (hasattr(field, 'field') and
|
||||
isinstance(field.field, ReferenceField)):
|
||||
isinstance(field.field, (ReferenceField, GenericReferenceField))):
|
||||
continue
|
||||
elif isinstance(field, SortedListField) and field._ordering:
|
||||
# if ordering is affected whole list is changed
|
||||
if any(map(lambda d: field._ordering in d._changed_fields, data)):
|
||||
if any(field._ordering in d._changed_fields for d in data):
|
||||
changed_fields.append(db_field_name)
|
||||
continue
|
||||
|
||||
self._nestable_types_changed_fields(
|
||||
changed_fields, key, data, inspected)
|
||||
changed_fields, key, data)
|
||||
return changed_fields
|
||||
|
||||
def _delta(self):
|
||||
@@ -576,7 +580,6 @@ class BaseDocument(object):
|
||||
|
||||
set_fields = self._get_changed_fields()
|
||||
unset_data = {}
|
||||
parts = []
|
||||
if hasattr(self, '_changed_fields'):
|
||||
set_data = {}
|
||||
# Fetch each set item from its path
|
||||
@@ -586,15 +589,13 @@ class BaseDocument(object):
|
||||
new_path = []
|
||||
for p in parts:
|
||||
if isinstance(d, (ObjectId, DBRef)):
|
||||
# Don't dig in the references
|
||||
break
|
||||
elif isinstance(d, list) and p.lstrip('-').isdigit():
|
||||
if p[0] == '-':
|
||||
p = str(len(d) + int(p))
|
||||
try:
|
||||
d = d[int(p)]
|
||||
except IndexError:
|
||||
d = None
|
||||
elif isinstance(d, list) and p.isdigit():
|
||||
# An item of a list (identified by its index) is updated
|
||||
d = d[int(p)]
|
||||
elif hasattr(d, 'get'):
|
||||
# dict-like (dict, embedded document)
|
||||
d = d.get(p)
|
||||
new_path.append(p)
|
||||
path = '.'.join(new_path)
|
||||
@@ -606,26 +607,26 @@ class BaseDocument(object):
|
||||
|
||||
# Determine if any changed items were actually unset.
|
||||
for path, value in set_data.items():
|
||||
if value or isinstance(value, (numbers.Number, bool)):
|
||||
if value or isinstance(value, (numbers.Number, bool)): # Account for 0 and True that are truthy
|
||||
continue
|
||||
|
||||
# If we've set a value that ain't the default value don't unset it.
|
||||
default = None
|
||||
parts = path.split('.')
|
||||
|
||||
if (self._dynamic and len(parts) and parts[0] in
|
||||
self._dynamic_fields):
|
||||
del set_data[path]
|
||||
unset_data[path] = 1
|
||||
continue
|
||||
elif path in self._fields:
|
||||
|
||||
# If we've set a value that ain't the default value don't unset it.
|
||||
default = None
|
||||
if path in self._fields:
|
||||
default = self._fields[path].default
|
||||
else: # Perform a full lookup for lists / embedded lookups
|
||||
d = self
|
||||
parts = path.split('.')
|
||||
db_field_name = parts.pop()
|
||||
for p in parts:
|
||||
if isinstance(d, list) and p.lstrip('-').isdigit():
|
||||
if p[0] == '-':
|
||||
p = str(len(d) + int(p))
|
||||
if isinstance(d, list) and p.isdigit():
|
||||
d = d[int(p)]
|
||||
elif (hasattr(d, '__getattribute__') and
|
||||
not isinstance(d, dict)):
|
||||
@@ -643,10 +644,9 @@ class BaseDocument(object):
|
||||
default = None
|
||||
|
||||
if default is not None:
|
||||
if callable(default):
|
||||
default = default()
|
||||
default = default() if callable(default) else default
|
||||
|
||||
if default != value:
|
||||
if value != default:
|
||||
continue
|
||||
|
||||
del set_data[path]
|
||||
@@ -692,7 +692,7 @@ class BaseDocument(object):
|
||||
|
||||
fields = cls._fields
|
||||
if not _auto_dereference:
|
||||
fields = copy.copy(fields)
|
||||
fields = copy.deepcopy(fields)
|
||||
|
||||
for field_name, field in fields.iteritems():
|
||||
field._auto_dereference = _auto_dereference
|
||||
@@ -1083,6 +1083,6 @@ class BaseDocument(object):
|
||||
sep = getattr(field, 'display_sep', ' ')
|
||||
values = value if field.__class__.__name__ in ('ListField', 'SortedListField') else [value]
|
||||
return sep.join([
|
||||
dict(field.choices).get(val, val)
|
||||
six.text_type(dict(field.choices).get(val, val))
|
||||
for val in values or []])
|
||||
return value
|
||||
|
||||
@@ -55,7 +55,7 @@ class BaseField(object):
|
||||
field. Generally this is deprecated in favour of the
|
||||
`FIELD.validate` method
|
||||
:param choices: (optional) The valid choices
|
||||
:param null: (optional) Is the field value can be null. If no and there is a default value
|
||||
:param null: (optional) If the field value can be null. If no and there is a default value
|
||||
then the default value is set
|
||||
:param sparse: (optional) `sparse=True` combined with `unique=True` and `required=False`
|
||||
means that uniqueness won't be enforced for `None` values
|
||||
@@ -130,7 +130,6 @@ class BaseField(object):
|
||||
def __set__(self, instance, value):
|
||||
"""Descriptor for assigning a value to a field in a document.
|
||||
"""
|
||||
|
||||
# If setting to None and there is a default
|
||||
# Then set the value to the default value
|
||||
if value is None:
|
||||
@@ -267,13 +266,15 @@ class ComplexBaseField(BaseField):
|
||||
ReferenceField = _import_class('ReferenceField')
|
||||
GenericReferenceField = _import_class('GenericReferenceField')
|
||||
EmbeddedDocumentListField = _import_class('EmbeddedDocumentListField')
|
||||
dereference = (self._auto_dereference and
|
||||
|
||||
auto_dereference = instance._fields[self.name]._auto_dereference
|
||||
|
||||
dereference = (auto_dereference and
|
||||
(self.field is None or isinstance(self.field,
|
||||
(GenericReferenceField, ReferenceField))))
|
||||
|
||||
_dereference = _import_class('DeReference')()
|
||||
|
||||
self._auto_dereference = instance._fields[self.name]._auto_dereference
|
||||
if instance._initialised and dereference and instance._data.get(self.name):
|
||||
instance._data[self.name] = _dereference(
|
||||
instance._data.get(self.name), max_depth=1, instance=instance,
|
||||
@@ -294,7 +295,7 @@ class ComplexBaseField(BaseField):
|
||||
value = BaseDict(value, instance, self.name)
|
||||
instance._data[self.name] = value
|
||||
|
||||
if (self._auto_dereference and instance._initialised and
|
||||
if (auto_dereference and instance._initialised and
|
||||
isinstance(value, (BaseList, BaseDict)) and
|
||||
not value._dereferenced):
|
||||
value = _dereference(
|
||||
@@ -313,11 +314,16 @@ class ComplexBaseField(BaseField):
|
||||
if hasattr(value, 'to_python'):
|
||||
return value.to_python()
|
||||
|
||||
BaseDocument = _import_class('BaseDocument')
|
||||
if isinstance(value, BaseDocument):
|
||||
# Something is wrong, return the value as it is
|
||||
return value
|
||||
|
||||
is_list = False
|
||||
if not hasattr(value, 'items'):
|
||||
try:
|
||||
is_list = True
|
||||
value = {k: v for k, v in enumerate(value)}
|
||||
value = {idx: v for idx, v in enumerate(value)}
|
||||
except TypeError: # Not iterable return the value
|
||||
return value
|
||||
|
||||
@@ -502,7 +508,7 @@ class GeoJsonBaseField(BaseField):
|
||||
def validate(self, value):
|
||||
"""Validate the GeoJson object based on its type."""
|
||||
if isinstance(value, dict):
|
||||
if set(value.keys()) == set(['type', 'coordinates']):
|
||||
if set(value.keys()) == {'type', 'coordinates'}:
|
||||
if value['type'] != self._type:
|
||||
self.error('%s type must be "%s"' %
|
||||
(self._name, self._type))
|
||||
|
||||
@@ -18,14 +18,14 @@ class DocumentMetaclass(type):
|
||||
"""Metaclass for all documents."""
|
||||
|
||||
# TODO lower complexity of this method
|
||||
def __new__(cls, name, bases, attrs):
|
||||
flattened_bases = cls._get_bases(bases)
|
||||
super_new = super(DocumentMetaclass, cls).__new__
|
||||
def __new__(mcs, name, bases, attrs):
|
||||
flattened_bases = mcs._get_bases(bases)
|
||||
super_new = super(DocumentMetaclass, mcs).__new__
|
||||
|
||||
# If a base class just call super
|
||||
metaclass = attrs.get('my_metaclass')
|
||||
if metaclass and issubclass(metaclass, DocumentMetaclass):
|
||||
return super_new(cls, name, bases, attrs)
|
||||
return super_new(mcs, name, bases, attrs)
|
||||
|
||||
attrs['_is_document'] = attrs.get('_is_document', False)
|
||||
attrs['_cached_reference_fields'] = []
|
||||
@@ -121,7 +121,8 @@ class DocumentMetaclass(type):
|
||||
# inheritance of classes where inheritance is set to False
|
||||
allow_inheritance = base._meta.get('allow_inheritance')
|
||||
if not allow_inheritance and not base._meta.get('abstract'):
|
||||
raise ValueError('Document %s may not be subclassed' %
|
||||
raise ValueError('Document %s may not be subclassed. '
|
||||
'To enable inheritance, use the "allow_inheritance" meta attribute.' %
|
||||
base.__name__)
|
||||
|
||||
# Get superclasses from last base superclass
|
||||
@@ -138,7 +139,7 @@ class DocumentMetaclass(type):
|
||||
attrs['_types'] = attrs['_subclasses'] # TODO depreciate _types
|
||||
|
||||
# Create the new_class
|
||||
new_class = super_new(cls, name, bases, attrs)
|
||||
new_class = super_new(mcs, name, bases, attrs)
|
||||
|
||||
# Set _subclasses
|
||||
for base in document_bases:
|
||||
@@ -147,7 +148,7 @@ class DocumentMetaclass(type):
|
||||
base._types = base._subclasses # TODO depreciate _types
|
||||
|
||||
(Document, EmbeddedDocument, DictField,
|
||||
CachedReferenceField) = cls._import_classes()
|
||||
CachedReferenceField) = mcs._import_classes()
|
||||
|
||||
if issubclass(new_class, Document):
|
||||
new_class._collection = None
|
||||
@@ -219,29 +220,26 @@ class DocumentMetaclass(type):
|
||||
|
||||
return new_class
|
||||
|
||||
def add_to_class(self, name, value):
|
||||
setattr(self, name, value)
|
||||
|
||||
@classmethod
|
||||
def _get_bases(cls, bases):
|
||||
def _get_bases(mcs, bases):
|
||||
if isinstance(bases, BasesTuple):
|
||||
return bases
|
||||
seen = []
|
||||
bases = cls.__get_bases(bases)
|
||||
bases = mcs.__get_bases(bases)
|
||||
unique_bases = (b for b in bases if not (b in seen or seen.append(b)))
|
||||
return BasesTuple(unique_bases)
|
||||
|
||||
@classmethod
|
||||
def __get_bases(cls, bases):
|
||||
def __get_bases(mcs, bases):
|
||||
for base in bases:
|
||||
if base is object:
|
||||
continue
|
||||
yield base
|
||||
for child_base in cls.__get_bases(base.__bases__):
|
||||
for child_base in mcs.__get_bases(base.__bases__):
|
||||
yield child_base
|
||||
|
||||
@classmethod
|
||||
def _import_classes(cls):
|
||||
def _import_classes(mcs):
|
||||
Document = _import_class('Document')
|
||||
EmbeddedDocument = _import_class('EmbeddedDocument')
|
||||
DictField = _import_class('DictField')
|
||||
@@ -254,9 +252,9 @@ class TopLevelDocumentMetaclass(DocumentMetaclass):
|
||||
collection in the database.
|
||||
"""
|
||||
|
||||
def __new__(cls, name, bases, attrs):
|
||||
flattened_bases = cls._get_bases(bases)
|
||||
super_new = super(TopLevelDocumentMetaclass, cls).__new__
|
||||
def __new__(mcs, name, bases, attrs):
|
||||
flattened_bases = mcs._get_bases(bases)
|
||||
super_new = super(TopLevelDocumentMetaclass, mcs).__new__
|
||||
|
||||
# Set default _meta data if base class, otherwise get user defined meta
|
||||
if attrs.get('my_metaclass') == TopLevelDocumentMetaclass:
|
||||
@@ -319,7 +317,7 @@ class TopLevelDocumentMetaclass(DocumentMetaclass):
|
||||
not parent_doc_cls._meta.get('abstract', False)):
|
||||
msg = 'Abstract document cannot have non-abstract base'
|
||||
raise ValueError(msg)
|
||||
return super_new(cls, name, bases, attrs)
|
||||
return super_new(mcs, name, bases, attrs)
|
||||
|
||||
# Merge base class metas.
|
||||
# Uses a special MetaDict that handles various merging rules
|
||||
@@ -360,7 +358,7 @@ class TopLevelDocumentMetaclass(DocumentMetaclass):
|
||||
attrs['_meta'] = meta
|
||||
|
||||
# Call super and get the new class
|
||||
new_class = super_new(cls, name, bases, attrs)
|
||||
new_class = super_new(mcs, name, bases, attrs)
|
||||
|
||||
meta = new_class._meta
|
||||
|
||||
@@ -394,7 +392,7 @@ class TopLevelDocumentMetaclass(DocumentMetaclass):
|
||||
'_auto_id_field', False)
|
||||
if not new_class._meta.get('id_field'):
|
||||
# After 0.10, find not existing names, instead of overwriting
|
||||
id_name, id_db_name = cls.get_auto_id_names(new_class)
|
||||
id_name, id_db_name = mcs.get_auto_id_names(new_class)
|
||||
new_class._auto_id_field = True
|
||||
new_class._meta['id_field'] = id_name
|
||||
new_class._fields[id_name] = ObjectIdField(db_field=id_db_name)
|
||||
@@ -419,7 +417,7 @@ class TopLevelDocumentMetaclass(DocumentMetaclass):
|
||||
return new_class
|
||||
|
||||
@classmethod
|
||||
def get_auto_id_names(cls, new_class):
|
||||
def get_auto_id_names(mcs, new_class):
|
||||
id_name, id_db_name = ('id', '_id')
|
||||
if id_name not in new_class._fields and \
|
||||
id_db_name not in (v.db_field for v in new_class._fields.values()):
|
||||
|
||||
22
mongoengine/base/utils.py
Normal file
22
mongoengine/base/utils.py
Normal file
@@ -0,0 +1,22 @@
|
||||
import re
|
||||
|
||||
|
||||
class LazyRegexCompiler(object):
|
||||
"""Descriptor to allow lazy compilation of regex"""
|
||||
|
||||
def __init__(self, pattern, flags=0):
|
||||
self._pattern = pattern
|
||||
self._flags = flags
|
||||
self._compiled_regex = None
|
||||
|
||||
@property
|
||||
def compiled_regex(self):
|
||||
if self._compiled_regex is None:
|
||||
self._compiled_regex = re.compile(self._pattern, self._flags)
|
||||
return self._compiled_regex
|
||||
|
||||
def __get__(self, instance, owner):
|
||||
return self.compiled_regex
|
||||
|
||||
def __set__(self, instance, value):
|
||||
raise AttributeError("Can not set attribute LazyRegexCompiler")
|
||||
@@ -104,6 +104,18 @@ def register_connection(alias, db=None, name=None, host=None, port=None,
|
||||
conn_settings['authentication_source'] = uri_options['authsource']
|
||||
if 'authmechanism' in uri_options:
|
||||
conn_settings['authentication_mechanism'] = uri_options['authmechanism']
|
||||
if IS_PYMONGO_3 and 'readpreference' in uri_options:
|
||||
read_preferences = (
|
||||
ReadPreference.NEAREST,
|
||||
ReadPreference.PRIMARY,
|
||||
ReadPreference.PRIMARY_PREFERRED,
|
||||
ReadPreference.SECONDARY,
|
||||
ReadPreference.SECONDARY_PREFERRED)
|
||||
read_pf_mode = uri_options['readpreference'].lower()
|
||||
for preference in read_preferences:
|
||||
if preference.name.lower() == read_pf_mode:
|
||||
conn_settings['read_preference'] = preference
|
||||
break
|
||||
else:
|
||||
resolved_hosts.append(entity)
|
||||
conn_settings['host'] = resolved_hosts
|
||||
|
||||
@@ -145,66 +145,85 @@ class no_sub_classes(object):
|
||||
:param cls: the class to turn querying sub classes on
|
||||
"""
|
||||
self.cls = cls
|
||||
self.cls_initial_subclasses = None
|
||||
|
||||
def __enter__(self):
|
||||
"""Change the objects default and _auto_dereference values."""
|
||||
self.cls._all_subclasses = self.cls._subclasses
|
||||
self.cls._subclasses = (self.cls,)
|
||||
self.cls_initial_subclasses = self.cls._subclasses
|
||||
self.cls._subclasses = (self.cls._class_name,)
|
||||
return self.cls
|
||||
|
||||
def __exit__(self, t, value, traceback):
|
||||
"""Reset the default and _auto_dereference values."""
|
||||
self.cls._subclasses = self.cls._all_subclasses
|
||||
delattr(self.cls, '_all_subclasses')
|
||||
return self.cls
|
||||
self.cls._subclasses = self.cls_initial_subclasses
|
||||
|
||||
|
||||
class query_counter(object):
|
||||
"""Query_counter context manager to get the number of queries."""
|
||||
"""Query_counter context manager to get the number of queries.
|
||||
This works by updating the `profiling_level` of the database so that all queries get logged,
|
||||
resetting the db.system.profile collection at the beginnig of the context and counting the new entries.
|
||||
|
||||
This was designed for debugging purpose. In fact it is a global counter so queries issued by other threads/processes
|
||||
can interfere with it
|
||||
|
||||
Be aware that:
|
||||
- Iterating over large amount of documents (>101) makes pymongo issue `getmore` queries to fetch the next batch of
|
||||
documents (https://docs.mongodb.com/manual/tutorial/iterate-a-cursor/#cursor-batches)
|
||||
- Some queries are ignored by default by the counter (killcursors, db.system.indexes)
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""Construct the query_counter."""
|
||||
self.counter = 0
|
||||
"""Construct the query_counter
|
||||
"""
|
||||
self.db = get_db()
|
||||
self.initial_profiling_level = None
|
||||
self._ctx_query_counter = 0 # number of queries issued by the context
|
||||
|
||||
def __enter__(self):
|
||||
"""On every with block we need to drop the profile collection."""
|
||||
self._ignored_query = {
|
||||
'ns':
|
||||
{'$ne': '%s.system.indexes' % self.db.name},
|
||||
'op': # MONGODB < 3.2
|
||||
{'$ne': 'killcursors'},
|
||||
'command.killCursors': # MONGODB >= 3.2
|
||||
{'$exists': False}
|
||||
}
|
||||
|
||||
def _turn_on_profiling(self):
|
||||
self.initial_profiling_level = self.db.profiling_level()
|
||||
self.db.set_profiling_level(0)
|
||||
self.db.system.profile.drop()
|
||||
self.db.set_profiling_level(2)
|
||||
|
||||
def _resets_profiling(self):
|
||||
self.db.set_profiling_level(self.initial_profiling_level)
|
||||
|
||||
def __enter__(self):
|
||||
self._turn_on_profiling()
|
||||
return self
|
||||
|
||||
def __exit__(self, t, value, traceback):
|
||||
"""Reset the profiling level."""
|
||||
self.db.set_profiling_level(0)
|
||||
self._resets_profiling()
|
||||
|
||||
def __eq__(self, value):
|
||||
"""== Compare querycounter."""
|
||||
counter = self._get_count()
|
||||
return value == counter
|
||||
|
||||
def __ne__(self, value):
|
||||
"""!= Compare querycounter."""
|
||||
return not self.__eq__(value)
|
||||
|
||||
def __lt__(self, value):
|
||||
"""< Compare querycounter."""
|
||||
return self._get_count() < value
|
||||
|
||||
def __le__(self, value):
|
||||
"""<= Compare querycounter."""
|
||||
return self._get_count() <= value
|
||||
|
||||
def __gt__(self, value):
|
||||
"""> Compare querycounter."""
|
||||
return self._get_count() > value
|
||||
|
||||
def __ge__(self, value):
|
||||
""">= Compare querycounter."""
|
||||
return self._get_count() >= value
|
||||
|
||||
def __int__(self):
|
||||
"""int representation."""
|
||||
return self._get_count()
|
||||
|
||||
def __repr__(self):
|
||||
@@ -212,10 +231,12 @@ class query_counter(object):
|
||||
return u"%s" % self._get_count()
|
||||
|
||||
def _get_count(self):
|
||||
"""Get the number of queries."""
|
||||
ignore_query = {'ns': {'$ne': '%s.system.indexes' % self.db.name}}
|
||||
count = self.db.system.profile.find(ignore_query).count() - self.counter
|
||||
self.counter += 1
|
||||
"""Get the number of queries by counting the current number of entries in db.system.profile
|
||||
and substracting the queries issued by this context. In fact everytime this is called, 1 query is
|
||||
issued so we need to balance that
|
||||
"""
|
||||
count = self.db.system.profile.find(self._ignored_query).count() - self._ctx_query_counter
|
||||
self._ctx_query_counter += 1 # Account for the query we just issued to gather the information
|
||||
return count
|
||||
|
||||
|
||||
|
||||
@@ -52,26 +52,40 @@ class DeReference(object):
|
||||
[i.__class__ == doc_type for i in items.values()]):
|
||||
return items
|
||||
elif not field.dbref:
|
||||
# We must turn the ObjectIds into DBRefs
|
||||
|
||||
# Recursively dig into the sub items of a list/dict
|
||||
# to turn the ObjectIds into DBRefs
|
||||
def _get_items_from_list(items):
|
||||
new_items = []
|
||||
for v in items:
|
||||
value = v
|
||||
if isinstance(v, dict):
|
||||
value = _get_items_from_dict(v)
|
||||
elif isinstance(v, list):
|
||||
value = _get_items_from_list(v)
|
||||
elif not isinstance(v, (DBRef, Document)):
|
||||
value = field.to_python(v)
|
||||
new_items.append(value)
|
||||
return new_items
|
||||
|
||||
def _get_items_from_dict(items):
|
||||
new_items = {}
|
||||
for k, v in items.iteritems():
|
||||
value = v
|
||||
if isinstance(v, list):
|
||||
value = _get_items_from_list(v)
|
||||
elif isinstance(v, dict):
|
||||
value = _get_items_from_dict(v)
|
||||
elif not isinstance(v, (DBRef, Document)):
|
||||
value = field.to_python(v)
|
||||
new_items[k] = value
|
||||
return new_items
|
||||
|
||||
if not hasattr(items, 'items'):
|
||||
|
||||
def _get_items(items):
|
||||
new_items = []
|
||||
for v in items:
|
||||
if isinstance(v, list):
|
||||
new_items.append(_get_items(v))
|
||||
elif not isinstance(v, (DBRef, Document)):
|
||||
new_items.append(field.to_python(v))
|
||||
else:
|
||||
new_items.append(v)
|
||||
return new_items
|
||||
|
||||
items = _get_items(items)
|
||||
items = _get_items_from_list(items)
|
||||
else:
|
||||
items = {
|
||||
k: (v if isinstance(v, (DBRef, Document))
|
||||
else field.to_python(v))
|
||||
for k, v in items.iteritems()
|
||||
}
|
||||
items = _get_items_from_dict(items)
|
||||
|
||||
self.reference_map = self._find_references(items)
|
||||
self.object_map = self._fetch_objects(doc_type=doc_type)
|
||||
@@ -133,7 +147,12 @@ class DeReference(object):
|
||||
"""
|
||||
object_map = {}
|
||||
for collection, dbrefs in self.reference_map.iteritems():
|
||||
if hasattr(collection, 'objects'): # We have a document class for the refs
|
||||
|
||||
# we use getattr instead of hasattr because hasattr swallows any exception under python2
|
||||
# so it could hide nasty things without raising exceptions (cfr bug #1688))
|
||||
ref_document_cls_exists = (getattr(collection, 'objects', None) is not None)
|
||||
|
||||
if ref_document_cls_exists:
|
||||
col_name = collection._get_collection_name()
|
||||
refs = [dbref for dbref in dbrefs
|
||||
if (col_name, dbref) not in object_map]
|
||||
@@ -141,7 +160,7 @@ class DeReference(object):
|
||||
for key, doc in references.iteritems():
|
||||
object_map[(col_name, key)] = doc
|
||||
else: # Generic reference: use the refs data to convert to document
|
||||
if isinstance(doc_type, (ListField, DictField, MapField,)):
|
||||
if isinstance(doc_type, (ListField, DictField, MapField)):
|
||||
continue
|
||||
|
||||
refs = [dbref for dbref in dbrefs
|
||||
|
||||
@@ -12,7 +12,9 @@ from mongoengine.base import (BaseDict, BaseDocument, BaseList,
|
||||
TopLevelDocumentMetaclass, get_document)
|
||||
from mongoengine.common import _import_class
|
||||
from mongoengine.connection import DEFAULT_CONNECTION_NAME, get_db
|
||||
from mongoengine.context_managers import switch_collection, switch_db
|
||||
from mongoengine.context_managers import (set_write_concern,
|
||||
switch_collection,
|
||||
switch_db)
|
||||
from mongoengine.errors import (InvalidDocumentError, InvalidQueryError,
|
||||
SaveConditionError)
|
||||
from mongoengine.python_support import IS_PYMONGO_3
|
||||
@@ -39,7 +41,7 @@ class InvalidCollectionError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class EmbeddedDocument(BaseDocument):
|
||||
class EmbeddedDocument(six.with_metaclass(DocumentMetaclass, BaseDocument)):
|
||||
"""A :class:`~mongoengine.Document` that isn't stored in its own
|
||||
collection. :class:`~mongoengine.EmbeddedDocument`\ s should be used as
|
||||
fields on :class:`~mongoengine.Document`\ s through the
|
||||
@@ -58,7 +60,6 @@ class EmbeddedDocument(BaseDocument):
|
||||
# The __metaclass__ attribute is removed by 2to3 when running with Python3
|
||||
# my_metaclass is defined so that metaclass can be queried in Python 2 & 3
|
||||
my_metaclass = DocumentMetaclass
|
||||
__metaclass__ = DocumentMetaclass
|
||||
|
||||
# A generic embedded document doesn't have any immutable properties
|
||||
# that describe it uniquely, hence it shouldn't be hashable. You can
|
||||
@@ -95,7 +96,7 @@ class EmbeddedDocument(BaseDocument):
|
||||
self._instance.reload(*args, **kwargs)
|
||||
|
||||
|
||||
class Document(BaseDocument):
|
||||
class Document(six.with_metaclass(TopLevelDocumentMetaclass, BaseDocument)):
|
||||
"""The base class used for defining the structure and properties of
|
||||
collections of documents stored in MongoDB. Inherit from this class, and
|
||||
add fields as class attributes to define a document's structure.
|
||||
@@ -150,7 +151,6 @@ class Document(BaseDocument):
|
||||
# The __metaclass__ attribute is removed by 2to3 when running with Python3
|
||||
# my_metaclass is defined so that metaclass can be queried in Python 2 & 3
|
||||
my_metaclass = TopLevelDocumentMetaclass
|
||||
__metaclass__ = TopLevelDocumentMetaclass
|
||||
|
||||
__slots__ = ('__objects',)
|
||||
|
||||
@@ -172,8 +172,8 @@ class Document(BaseDocument):
|
||||
"""
|
||||
if self.pk is None:
|
||||
return super(BaseDocument, self).__hash__()
|
||||
else:
|
||||
return hash(self.pk)
|
||||
|
||||
return hash(self.pk)
|
||||
|
||||
@classmethod
|
||||
def _get_db(cls):
|
||||
@@ -370,6 +370,8 @@ class Document(BaseDocument):
|
||||
|
||||
signals.pre_save_post_validation.send(self.__class__, document=self,
|
||||
created=created, **signal_kwargs)
|
||||
# it might be refreshed by the pre_save_post_validation hook, e.g., for etag generation
|
||||
doc = self.to_mongo()
|
||||
|
||||
if self._meta.get('auto_create_index', True):
|
||||
self.ensure_indexes()
|
||||
@@ -429,11 +431,18 @@ class Document(BaseDocument):
|
||||
Helper method, should only be used inside save().
|
||||
"""
|
||||
collection = self._get_collection()
|
||||
with set_write_concern(collection, write_concern) as wc_collection:
|
||||
if force_insert:
|
||||
return wc_collection.insert_one(doc).inserted_id
|
||||
# insert_one will provoke UniqueError alongside save does not
|
||||
# therefore, it need to catch and call replace_one.
|
||||
if '_id' in doc:
|
||||
raw_object = wc_collection.find_one_and_replace(
|
||||
{'_id': doc['_id']}, doc)
|
||||
if raw_object:
|
||||
return doc['_id']
|
||||
|
||||
if force_insert:
|
||||
return collection.insert(doc, **write_concern)
|
||||
|
||||
object_id = collection.save(doc, **write_concern)
|
||||
object_id = wc_collection.insert_one(doc).inserted_id
|
||||
|
||||
# In PyMongo 3.0, the save() call calls internally the _update() call
|
||||
# but they forget to return the _id value passed back, therefore getting it back here
|
||||
@@ -585,9 +594,8 @@ class Document(BaseDocument):
|
||||
:param signal_kwargs: (optional) kwargs dictionary to be passed to
|
||||
the signal calls.
|
||||
:param write_concern: Extra keyword arguments are passed down which
|
||||
will be used as options for the resultant
|
||||
``getLastError`` command. For example,
|
||||
``save(..., write_concern={w: 2, fsync: True}, ...)`` will
|
||||
will be used as options for the resultant ``getLastError`` command.
|
||||
For example, ``save(..., w: 2, fsync: True)`` will
|
||||
wait until at least two servers have recorded the write and
|
||||
will force an fsync on the primary server.
|
||||
|
||||
@@ -997,7 +1005,7 @@ class Document(BaseDocument):
|
||||
return {'missing': missing, 'extra': extra}
|
||||
|
||||
|
||||
class DynamicDocument(Document):
|
||||
class DynamicDocument(six.with_metaclass(TopLevelDocumentMetaclass, Document)):
|
||||
"""A Dynamic Document class allowing flexible, expandable and uncontrolled
|
||||
schemas. As a :class:`~mongoengine.Document` subclass, acts in the same
|
||||
way as an ordinary document but has expanded style properties. Any data
|
||||
@@ -1014,7 +1022,6 @@ class DynamicDocument(Document):
|
||||
# The __metaclass__ attribute is removed by 2to3 when running with Python3
|
||||
# my_metaclass is defined so that metaclass can be queried in Python 2 & 3
|
||||
my_metaclass = TopLevelDocumentMetaclass
|
||||
__metaclass__ = TopLevelDocumentMetaclass
|
||||
|
||||
_dynamic = True
|
||||
|
||||
@@ -1030,7 +1037,7 @@ class DynamicDocument(Document):
|
||||
super(DynamicDocument, self).__delattr__(*args, **kwargs)
|
||||
|
||||
|
||||
class DynamicEmbeddedDocument(EmbeddedDocument):
|
||||
class DynamicEmbeddedDocument(six.with_metaclass(DocumentMetaclass, EmbeddedDocument)):
|
||||
"""A Dynamic Embedded Document class allowing flexible, expandable and
|
||||
uncontrolled schemas. See :class:`~mongoengine.DynamicDocument` for more
|
||||
information about dynamic documents.
|
||||
@@ -1039,7 +1046,6 @@ class DynamicEmbeddedDocument(EmbeddedDocument):
|
||||
# The __metaclass__ attribute is removed by 2to3 when running with Python3
|
||||
# my_metaclass is defined so that metaclass can be queried in Python 2 & 3
|
||||
my_metaclass = DocumentMetaclass
|
||||
__metaclass__ = DocumentMetaclass
|
||||
|
||||
_dynamic = True
|
||||
|
||||
|
||||
@@ -71,6 +71,7 @@ class ValidationError(AssertionError):
|
||||
_message = None
|
||||
|
||||
def __init__(self, message='', **kwargs):
|
||||
super(ValidationError, self).__init__(message)
|
||||
self.errors = kwargs.get('errors', {})
|
||||
self.field_name = kwargs.get('field_name')
|
||||
self.message = message
|
||||
|
||||
@@ -5,7 +5,6 @@ import re
|
||||
import socket
|
||||
import time
|
||||
import uuid
|
||||
import warnings
|
||||
from operator import itemgetter
|
||||
|
||||
from bson import Binary, DBRef, ObjectId, SON
|
||||
@@ -25,15 +24,18 @@ try:
|
||||
except ImportError:
|
||||
Int64 = long
|
||||
|
||||
|
||||
from mongoengine.base import (BaseDocument, BaseField, ComplexBaseField,
|
||||
GeoJsonBaseField, LazyReference, ObjectIdField,
|
||||
get_document)
|
||||
from mongoengine.base.utils import LazyRegexCompiler
|
||||
from mongoengine.common import _import_class
|
||||
from mongoengine.connection import DEFAULT_CONNECTION_NAME, get_db
|
||||
from mongoengine.document import Document, EmbeddedDocument
|
||||
from mongoengine.errors import DoesNotExist, InvalidQueryError, ValidationError
|
||||
from mongoengine.python_support import StringIO
|
||||
from mongoengine.queryset import DO_NOTHING, QuerySet
|
||||
from mongoengine.queryset import DO_NOTHING
|
||||
from mongoengine.queryset.base import BaseQuerySet
|
||||
|
||||
try:
|
||||
from PIL import Image, ImageOps
|
||||
@@ -41,6 +43,12 @@ except ImportError:
|
||||
Image = None
|
||||
ImageOps = None
|
||||
|
||||
if six.PY3:
|
||||
# Useless as long as 2to3 gets executed
|
||||
# as it turns `long` into `int` blindly
|
||||
long = int
|
||||
|
||||
|
||||
__all__ = (
|
||||
'StringField', 'URLField', 'EmailField', 'IntField', 'LongField',
|
||||
'FloatField', 'DecimalField', 'BooleanField', 'DateTimeField', 'DateField',
|
||||
@@ -123,9 +131,9 @@ class URLField(StringField):
|
||||
.. versionadded:: 0.3
|
||||
"""
|
||||
|
||||
_URL_REGEX = re.compile(
|
||||
_URL_REGEX = LazyRegexCompiler(
|
||||
r'^(?:[a-z0-9\.\-]*)://' # scheme is validated separately
|
||||
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}(?<!-)\.?)|' # domain...
|
||||
r'(?:(?:[A-Z0-9](?:[A-Z0-9-_]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}(?<!-)\.?)|' # domain...
|
||||
r'localhost|' # localhost...
|
||||
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|' # ...or ipv4
|
||||
r'\[?[A-F0-9]*:[A-F0-9:]+\]?)' # ...or ipv6
|
||||
@@ -133,8 +141,7 @@ class URLField(StringField):
|
||||
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
|
||||
_URL_SCHEMES = ['http', 'https', 'ftp', 'ftps']
|
||||
|
||||
def __init__(self, verify_exists=False, url_regex=None, schemes=None, **kwargs):
|
||||
self.verify_exists = verify_exists
|
||||
def __init__(self, url_regex=None, schemes=None, **kwargs):
|
||||
self.url_regex = url_regex or self._URL_REGEX
|
||||
self.schemes = schemes or self._URL_SCHEMES
|
||||
super(URLField, self).__init__(**kwargs)
|
||||
@@ -157,7 +164,7 @@ class EmailField(StringField):
|
||||
|
||||
.. versionadded:: 0.4
|
||||
"""
|
||||
USER_REGEX = re.compile(
|
||||
USER_REGEX = LazyRegexCompiler(
|
||||
# `dot-atom` defined in RFC 5322 Section 3.2.3.
|
||||
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*\Z"
|
||||
# `quoted-string` defined in RFC 5322 Section 3.2.4.
|
||||
@@ -165,7 +172,7 @@ class EmailField(StringField):
|
||||
re.IGNORECASE
|
||||
)
|
||||
|
||||
UTF8_USER_REGEX = re.compile(
|
||||
UTF8_USER_REGEX = LazyRegexCompiler(
|
||||
six.u(
|
||||
# RFC 6531 Section 3.3 extends `atext` (used by dot-atom) to
|
||||
# include `UTF8-non-ascii`.
|
||||
@@ -175,7 +182,7 @@ class EmailField(StringField):
|
||||
), re.IGNORECASE | re.UNICODE
|
||||
)
|
||||
|
||||
DOMAIN_REGEX = re.compile(
|
||||
DOMAIN_REGEX = LazyRegexCompiler(
|
||||
r'((?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+)(?:[A-Z0-9-]{2,63}(?<!-))\Z',
|
||||
re.IGNORECASE
|
||||
)
|
||||
@@ -267,14 +274,14 @@ class IntField(BaseField):
|
||||
def to_python(self, value):
|
||||
try:
|
||||
value = int(value)
|
||||
except ValueError:
|
||||
except (TypeError, ValueError):
|
||||
pass
|
||||
return value
|
||||
|
||||
def validate(self, value):
|
||||
try:
|
||||
value = int(value)
|
||||
except Exception:
|
||||
except (TypeError, ValueError):
|
||||
self.error('%s could not be converted to int' % value)
|
||||
|
||||
if self.min_value is not None and value < self.min_value:
|
||||
@@ -300,7 +307,7 @@ class LongField(BaseField):
|
||||
def to_python(self, value):
|
||||
try:
|
||||
value = long(value)
|
||||
except ValueError:
|
||||
except (TypeError, ValueError):
|
||||
pass
|
||||
return value
|
||||
|
||||
@@ -310,7 +317,7 @@ class LongField(BaseField):
|
||||
def validate(self, value):
|
||||
try:
|
||||
value = long(value)
|
||||
except Exception:
|
||||
except (TypeError, ValueError):
|
||||
self.error('%s could not be converted to long' % value)
|
||||
|
||||
if self.min_value is not None and value < self.min_value:
|
||||
@@ -364,7 +371,8 @@ class FloatField(BaseField):
|
||||
|
||||
|
||||
class DecimalField(BaseField):
|
||||
"""Fixed-point decimal number field.
|
||||
"""Fixed-point decimal number field. Stores the value as a float by default unless `force_string` is used.
|
||||
If using floats, beware of Decimal to float conversion (potential precision loss)
|
||||
|
||||
.. versionchanged:: 0.8
|
||||
.. versionadded:: 0.3
|
||||
@@ -375,7 +383,9 @@ class DecimalField(BaseField):
|
||||
"""
|
||||
:param min_value: Validation rule for the minimum acceptable value.
|
||||
:param max_value: Validation rule for the maximum acceptable value.
|
||||
:param force_string: Store as a string.
|
||||
:param force_string: Store the value as a string (instead of a float).
|
||||
Be aware that this affects query sorting and operation like lte, gte (as string comparison is applied)
|
||||
and some query operator won't work (e.g: inc, dec)
|
||||
:param precision: Number of decimal places to store.
|
||||
:param rounding: The rounding rule from the python decimal library:
|
||||
|
||||
@@ -406,7 +416,7 @@ class DecimalField(BaseField):
|
||||
# Convert to string for python 2.6 before casting to Decimal
|
||||
try:
|
||||
value = decimal.Decimal('%s' % value)
|
||||
except decimal.InvalidOperation:
|
||||
except (TypeError, ValueError, decimal.InvalidOperation):
|
||||
return value
|
||||
return value.quantize(decimal.Decimal('.%s' % ('0' * self.precision)), rounding=self.rounding)
|
||||
|
||||
@@ -423,7 +433,7 @@ class DecimalField(BaseField):
|
||||
value = six.text_type(value)
|
||||
try:
|
||||
value = decimal.Decimal(value)
|
||||
except Exception as exc:
|
||||
except (TypeError, ValueError, decimal.InvalidOperation) as exc:
|
||||
self.error('Could not convert value to decimal: %s' % exc)
|
||||
|
||||
if self.min_value is not None and value < self.min_value:
|
||||
@@ -462,6 +472,8 @@ class DateTimeField(BaseField):
|
||||
installed you can utilise it to convert varying types of date formats into valid
|
||||
python datetime objects.
|
||||
|
||||
Note: To default the field to the current datetime, use: DateTimeField(default=datetime.utcnow)
|
||||
|
||||
Note: Microseconds are rounded to the nearest millisecond.
|
||||
Pre UTC microsecond support is effectively broken.
|
||||
Use :class:`~mongoengine.fields.ComplexDateTimeField` if you
|
||||
@@ -557,11 +569,15 @@ class ComplexDateTimeField(StringField):
|
||||
The `,` as the separator can be easily modified by passing the `separator`
|
||||
keyword when initializing the field.
|
||||
|
||||
Note: To default the field to the current datetime, use: DateTimeField(default=datetime.utcnow)
|
||||
|
||||
.. versionadded:: 0.5
|
||||
"""
|
||||
|
||||
def __init__(self, separator=',', **kwargs):
|
||||
self.names = ['year', 'month', 'day', 'hour', 'minute', 'second', 'microsecond']
|
||||
"""
|
||||
:param separator: Allows to customize the separator used for storage (default ``,``)
|
||||
"""
|
||||
self.separator = separator
|
||||
self.format = separator.join(['%Y', '%m', '%d', '%H', '%M', '%S', '%f'])
|
||||
super(ComplexDateTimeField, self).__init__(**kwargs)
|
||||
@@ -588,20 +604,24 @@ class ComplexDateTimeField(StringField):
|
||||
>>> ComplexDateTimeField()._convert_from_string(a)
|
||||
datetime.datetime(2011, 6, 8, 20, 26, 24, 92284)
|
||||
"""
|
||||
values = map(int, data.split(self.separator))
|
||||
values = [int(d) for d in data.split(self.separator)]
|
||||
return datetime.datetime(*values)
|
||||
|
||||
def __get__(self, instance, owner):
|
||||
if instance is None:
|
||||
return self
|
||||
|
||||
data = super(ComplexDateTimeField, self).__get__(instance, owner)
|
||||
if data is None:
|
||||
return None if self.null else datetime.datetime.now()
|
||||
if isinstance(data, datetime.datetime):
|
||||
|
||||
if isinstance(data, datetime.datetime) or data is None:
|
||||
return data
|
||||
return self._convert_from_string(data)
|
||||
|
||||
def __set__(self, instance, value):
|
||||
value = self._convert_from_datetime(value) if value else value
|
||||
return super(ComplexDateTimeField, self).__set__(instance, value)
|
||||
super(ComplexDateTimeField, self).__set__(instance, value)
|
||||
value = instance._data[self.name]
|
||||
if value is not None:
|
||||
instance._data[self.name] = self._convert_from_datetime(value)
|
||||
|
||||
def validate(self, value):
|
||||
value = self.to_python(value)
|
||||
@@ -645,9 +665,17 @@ class EmbeddedDocumentField(BaseField):
|
||||
def document_type(self):
|
||||
if isinstance(self.document_type_obj, six.string_types):
|
||||
if self.document_type_obj == RECURSIVE_REFERENCE_CONSTANT:
|
||||
self.document_type_obj = self.owner_document
|
||||
resolved_document_type = self.owner_document
|
||||
else:
|
||||
self.document_type_obj = get_document(self.document_type_obj)
|
||||
resolved_document_type = get_document(self.document_type_obj)
|
||||
|
||||
if not issubclass(resolved_document_type, EmbeddedDocument):
|
||||
# Due to the late resolution of the document_type
|
||||
# There is a chance that it won't be an EmbeddedDocument (#1661)
|
||||
self.error('Invalid embedded document class provided to an '
|
||||
'EmbeddedDocumentField')
|
||||
self.document_type_obj = resolved_document_type
|
||||
|
||||
return self.document_type_obj
|
||||
|
||||
def to_python(self, value):
|
||||
@@ -824,8 +852,7 @@ class ListField(ComplexBaseField):
|
||||
|
||||
def validate(self, value):
|
||||
"""Make sure that a list of valid fields is being used."""
|
||||
if (not isinstance(value, (list, tuple, QuerySet)) or
|
||||
isinstance(value, six.string_types)):
|
||||
if not isinstance(value, (list, tuple, BaseQuerySet)):
|
||||
self.error('Only lists and tuples may be used in a list field')
|
||||
super(ListField, self).validate(value)
|
||||
|
||||
@@ -932,14 +959,9 @@ class DictField(ComplexBaseField):
|
||||
.. versionchanged:: 0.5 - Can now handle complex / varying types of data
|
||||
"""
|
||||
|
||||
def __init__(self, basecls=None, field=None, *args, **kwargs):
|
||||
def __init__(self, field=None, *args, **kwargs):
|
||||
self.field = field
|
||||
self._auto_dereference = False
|
||||
self.basecls = basecls or BaseField
|
||||
|
||||
# XXX ValidationError raised outside of the "validate" method.
|
||||
if not issubclass(self.basecls, BaseField):
|
||||
self.error('DictField only accepts dict values')
|
||||
|
||||
kwargs.setdefault('default', lambda: {})
|
||||
super(DictField, self).__init__(*args, **kwargs)
|
||||
@@ -959,7 +981,7 @@ class DictField(ComplexBaseField):
|
||||
super(DictField, self).validate(value)
|
||||
|
||||
def lookup_member(self, member_name):
|
||||
return DictField(basecls=self.basecls, db_field=member_name)
|
||||
return DictField(db_field=member_name)
|
||||
|
||||
def prepare_query_value(self, op, value):
|
||||
match_operators = ['contains', 'icontains', 'startswith',
|
||||
@@ -969,7 +991,7 @@ class DictField(ComplexBaseField):
|
||||
if op in match_operators and isinstance(value, six.string_types):
|
||||
return StringField().prepare_query_value(op, value)
|
||||
|
||||
if hasattr(self.field, 'field'):
|
||||
if hasattr(self.field, 'field'): # Used for instance when using DictField(ListField(IntField()))
|
||||
if op in ('set', 'unset') and isinstance(value, dict):
|
||||
return {
|
||||
k: self.field.prepare_query_value(op, v)
|
||||
@@ -1027,11 +1049,13 @@ class ReferenceField(BaseField):
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
class Bar(Document):
|
||||
content = StringField()
|
||||
foo = ReferenceField('Foo')
|
||||
class Org(Document):
|
||||
owner = ReferenceField('User')
|
||||
|
||||
Foo.register_delete_rule(Bar, 'foo', NULLIFY)
|
||||
class User(Document):
|
||||
org = ReferenceField('Org', reverse_delete_rule=CASCADE)
|
||||
|
||||
User.register_delete_rule(Org, 'owner', DENY)
|
||||
|
||||
.. versionchanged:: 0.5 added `reverse_delete_rule`
|
||||
"""
|
||||
@@ -1079,9 +1103,9 @@ class ReferenceField(BaseField):
|
||||
|
||||
# Get value from document instance if available
|
||||
value = instance._data.get(self.name)
|
||||
self._auto_dereference = instance._fields[self.name]._auto_dereference
|
||||
auto_dereference = instance._fields[self.name]._auto_dereference
|
||||
# Dereference DBRefs
|
||||
if self._auto_dereference and isinstance(value, DBRef):
|
||||
if auto_dereference and isinstance(value, DBRef):
|
||||
if hasattr(value, 'cls'):
|
||||
# Dereference using the class type specified in the reference
|
||||
cls = get_document(value.cls)
|
||||
@@ -1152,16 +1176,6 @@ class ReferenceField(BaseField):
|
||||
self.error('You can only reference documents once they have been '
|
||||
'saved to the database')
|
||||
|
||||
if (
|
||||
self.document_type._meta.get('abstract') and
|
||||
not isinstance(value, self.document_type)
|
||||
):
|
||||
self.error(
|
||||
'%s is not an instance of abstract reference type %s' % (
|
||||
self.document_type._class_name
|
||||
)
|
||||
)
|
||||
|
||||
def lookup_member(self, member_name):
|
||||
return self.document_type._fields.get(member_name)
|
||||
|
||||
@@ -1242,9 +1256,10 @@ class CachedReferenceField(BaseField):
|
||||
|
||||
# Get value from document instance if available
|
||||
value = instance._data.get(self.name)
|
||||
self._auto_dereference = instance._fields[self.name]._auto_dereference
|
||||
auto_dereference = instance._fields[self.name]._auto_dereference
|
||||
|
||||
# Dereference DBRefs
|
||||
if self._auto_dereference and isinstance(value, DBRef):
|
||||
if auto_dereference and isinstance(value, DBRef):
|
||||
dereferenced = self.document_type._get_db().dereference(value)
|
||||
if dereferenced is None:
|
||||
raise DoesNotExist('Trying to dereference unknown document %s' % value)
|
||||
@@ -1377,8 +1392,8 @@ class GenericReferenceField(BaseField):
|
||||
|
||||
value = instance._data.get(self.name)
|
||||
|
||||
self._auto_dereference = instance._fields[self.name]._auto_dereference
|
||||
if self._auto_dereference and isinstance(value, (dict, SON)):
|
||||
auto_dereference = instance._fields[self.name]._auto_dereference
|
||||
if auto_dereference and isinstance(value, (dict, SON)):
|
||||
dereferenced = self.dereference(value)
|
||||
if dereferenced is None:
|
||||
raise DoesNotExist('Trying to dereference unknown document %s' % value)
|
||||
@@ -1460,10 +1475,10 @@ class BinaryField(BaseField):
|
||||
return Binary(value)
|
||||
|
||||
def validate(self, value):
|
||||
if not isinstance(value, (six.binary_type, six.text_type, Binary)):
|
||||
if not isinstance(value, (six.binary_type, Binary)):
|
||||
self.error('BinaryField only accepts instances of '
|
||||
'(%s, %s, Binary)' % (
|
||||
six.binary_type.__name__, six.text_type.__name__))
|
||||
six.binary_type.__name__, Binary.__name__))
|
||||
|
||||
if self.max_bytes is not None and len(value) > self.max_bytes:
|
||||
self.error('Binary value is too long')
|
||||
@@ -1508,9 +1523,11 @@ class GridFSProxy(object):
|
||||
def __get__(self, instance, value):
|
||||
return self
|
||||
|
||||
def __nonzero__(self):
|
||||
def __bool__(self):
|
||||
return bool(self.grid_id)
|
||||
|
||||
__nonzero__ = __bool__ # For Py2 support
|
||||
|
||||
def __getstate__(self):
|
||||
self_dict = self.__dict__
|
||||
self_dict['_fs'] = None
|
||||
@@ -1850,12 +1867,9 @@ class ImageField(FileField):
|
||||
"""
|
||||
A Image File storage field.
|
||||
|
||||
@size (width, height, force):
|
||||
max size to store images, if larger will be automatically resized
|
||||
ex: size=(800, 600, True)
|
||||
|
||||
@thumbnail (width, height, force):
|
||||
size to generate a thumbnail
|
||||
:param size: max size to store images, provided as (width, height, force)
|
||||
if larger, it will be automatically resized (ex: size=(800, 600, True))
|
||||
:param thumbnail_size: size to generate a thumbnail, provided as (width, height, force)
|
||||
|
||||
.. versionadded:: 0.6
|
||||
"""
|
||||
@@ -1926,8 +1940,7 @@ class SequenceField(BaseField):
|
||||
self.collection_name = collection_name or self.COLLECTION_NAME
|
||||
self.db_alias = db_alias or DEFAULT_CONNECTION_NAME
|
||||
self.sequence_name = sequence_name
|
||||
self.value_decorator = (callable(value_decorator) and
|
||||
value_decorator or self.VALUE_DECORATOR)
|
||||
self.value_decorator = value_decorator if callable(value_decorator) else self.VALUE_DECORATOR
|
||||
super(SequenceField, self).__init__(*args, **kwargs)
|
||||
|
||||
def generate(self):
|
||||
@@ -2036,7 +2049,7 @@ class UUIDField(BaseField):
|
||||
if not isinstance(value, six.string_types):
|
||||
value = six.text_type(value)
|
||||
return uuid.UUID(value)
|
||||
except Exception:
|
||||
except (ValueError, TypeError, AttributeError):
|
||||
return original_value
|
||||
return value
|
||||
|
||||
@@ -2058,7 +2071,7 @@ class UUIDField(BaseField):
|
||||
value = str(value)
|
||||
try:
|
||||
uuid.UUID(value)
|
||||
except Exception as exc:
|
||||
except (ValueError, TypeError, AttributeError) as exc:
|
||||
self.error('Could not convert to UUID: %s' % exc)
|
||||
|
||||
|
||||
|
||||
@@ -6,11 +6,7 @@ import pymongo
|
||||
import six
|
||||
|
||||
|
||||
if pymongo.version_tuple[0] < 3:
|
||||
IS_PYMONGO_3 = False
|
||||
else:
|
||||
IS_PYMONGO_3 = True
|
||||
|
||||
IS_PYMONGO_3 = pymongo.version_tuple[0] >= 3
|
||||
|
||||
# six.BytesIO resolves to StringIO.StringIO in Py2 and io.BytesIO in Py3.
|
||||
StringIO = six.BytesIO
|
||||
@@ -23,3 +19,10 @@ if not six.PY3:
|
||||
pass
|
||||
else:
|
||||
StringIO = cStringIO.StringIO
|
||||
|
||||
|
||||
if six.PY3:
|
||||
from collections.abc import Hashable
|
||||
else:
|
||||
# raises DeprecationWarnings in Python >=3.7
|
||||
from collections import Hashable
|
||||
|
||||
@@ -2,7 +2,6 @@ from __future__ import absolute_import
|
||||
|
||||
import copy
|
||||
import itertools
|
||||
import operator
|
||||
import pprint
|
||||
import re
|
||||
import warnings
|
||||
@@ -39,8 +38,6 @@ CASCADE = 2
|
||||
DENY = 3
|
||||
PULL = 4
|
||||
|
||||
RE_TYPE = type(re.compile(''))
|
||||
|
||||
|
||||
class BaseQuerySet(object):
|
||||
"""A set of results returned from a query. Wraps a MongoDB cursor,
|
||||
@@ -209,14 +206,12 @@ class BaseQuerySet(object):
|
||||
queryset = self.order_by()
|
||||
return False if queryset.first() is None else True
|
||||
|
||||
def __nonzero__(self):
|
||||
"""Avoid to open all records in an if stmt in Py2."""
|
||||
return self._has_data()
|
||||
|
||||
def __bool__(self):
|
||||
"""Avoid to open all records in an if stmt in Py3."""
|
||||
return self._has_data()
|
||||
|
||||
__nonzero__ = __bool__ # For Py2 support
|
||||
|
||||
# Core functions
|
||||
|
||||
def all(self):
|
||||
@@ -269,13 +264,13 @@ class BaseQuerySet(object):
|
||||
queryset = queryset.filter(*q_objs, **query)
|
||||
|
||||
try:
|
||||
result = queryset.next()
|
||||
result = six.next(queryset)
|
||||
except StopIteration:
|
||||
msg = ('%s matching query does not exist.'
|
||||
% queryset._document._class_name)
|
||||
raise queryset._document.DoesNotExist(msg)
|
||||
try:
|
||||
queryset.next()
|
||||
six.next(queryset)
|
||||
except StopIteration:
|
||||
return result
|
||||
|
||||
@@ -359,7 +354,7 @@ class BaseQuerySet(object):
|
||||
|
||||
try:
|
||||
inserted_result = insert_func(raw)
|
||||
ids = return_one and [inserted_result.inserted_id] or inserted_result.inserted_ids
|
||||
ids = [inserted_result.inserted_id] if return_one else inserted_result.inserted_ids
|
||||
except pymongo.errors.DuplicateKeyError as err:
|
||||
message = 'Could not save document (%s)'
|
||||
raise NotUniqueError(message % six.text_type(err))
|
||||
@@ -377,17 +372,20 @@ class BaseQuerySet(object):
|
||||
raise NotUniqueError(message % six.text_type(err))
|
||||
raise OperationError(message % six.text_type(err))
|
||||
|
||||
# Apply inserted_ids to documents
|
||||
for doc, doc_id in zip(docs, ids):
|
||||
doc.pk = doc_id
|
||||
|
||||
if not load_bulk:
|
||||
signals.post_bulk_insert.send(
|
||||
self._document, documents=docs, loaded=False, **signal_kwargs)
|
||||
return return_one and ids[0] or ids
|
||||
return ids[0] if return_one else ids
|
||||
|
||||
documents = self.in_bulk(ids)
|
||||
results = []
|
||||
for obj_id in ids:
|
||||
results.append(documents.get(obj_id))
|
||||
results = [documents.get(obj_id) for obj_id in ids]
|
||||
signals.post_bulk_insert.send(
|
||||
self._document, documents=results, loaded=True, **signal_kwargs)
|
||||
return return_one and results[0] or results
|
||||
return results[0] if return_one else results
|
||||
|
||||
def count(self, with_limit_and_skip=False):
|
||||
"""Count the selected elements in the query.
|
||||
@@ -396,9 +394,11 @@ class BaseQuerySet(object):
|
||||
:meth:`skip` that has been applied to this cursor into account when
|
||||
getting the count
|
||||
"""
|
||||
if self._limit == 0 and with_limit_and_skip or self._none:
|
||||
if self._limit == 0 and with_limit_and_skip is False or self._none:
|
||||
return 0
|
||||
return self._cursor.count(with_limit_and_skip=with_limit_and_skip)
|
||||
count = self._cursor.count(with_limit_and_skip=with_limit_and_skip)
|
||||
self._cursor_obj = None
|
||||
return count
|
||||
|
||||
def delete(self, write_concern=None, _from_doc_delete=False,
|
||||
cascade_refs=None):
|
||||
@@ -775,10 +775,11 @@ class BaseQuerySet(object):
|
||||
"""Limit the number of returned documents to `n`. This may also be
|
||||
achieved using array-slicing syntax (e.g. ``User.objects[:5]``).
|
||||
|
||||
:param n: the maximum number of objects to return
|
||||
:param n: the maximum number of objects to return if n is greater than 0.
|
||||
When 0 is passed, returns all the documents in the cursor
|
||||
"""
|
||||
queryset = self.clone()
|
||||
queryset._limit = n if n != 0 else 1
|
||||
queryset._limit = n
|
||||
|
||||
# If a cursor object has already been created, apply the limit to it.
|
||||
if queryset._cursor_obj:
|
||||
@@ -976,11 +977,10 @@ class BaseQuerySet(object):
|
||||
# explicitly included, and then more complicated operators such as
|
||||
# $slice.
|
||||
def _sort_key(field_tuple):
|
||||
key, value = field_tuple
|
||||
if isinstance(value, (int)):
|
||||
_, value = field_tuple
|
||||
if isinstance(value, int):
|
||||
return value # 0 for exclusion, 1 for inclusion
|
||||
else:
|
||||
return 2 # so that complex values appear last
|
||||
return 2 # so that complex values appear last
|
||||
|
||||
fields = sorted(cleaned_fields, key=_sort_key)
|
||||
|
||||
@@ -1477,13 +1477,13 @@ class BaseQuerySet(object):
|
||||
|
||||
# Iterator helpers
|
||||
|
||||
def next(self):
|
||||
def __next__(self):
|
||||
"""Wrap the result in a :class:`~mongoengine.Document` object.
|
||||
"""
|
||||
if self._limit == 0 or self._none:
|
||||
raise StopIteration
|
||||
|
||||
raw_doc = self._cursor.next()
|
||||
raw_doc = six.next(self._cursor)
|
||||
|
||||
if self._as_pymongo:
|
||||
return self._get_as_pymongo(raw_doc)
|
||||
@@ -1497,6 +1497,8 @@ class BaseQuerySet(object):
|
||||
|
||||
return doc
|
||||
|
||||
next = __next__ # For Python2 support
|
||||
|
||||
def rewind(self):
|
||||
"""Rewind the cursor to its unevaluated state.
|
||||
|
||||
@@ -1872,8 +1874,8 @@ class BaseQuerySet(object):
|
||||
# Substitute the correct name for the field into the javascript
|
||||
return '.'.join([f.db_field for f in fields])
|
||||
|
||||
code = re.sub(u'\[\s*~([A-z_][A-z_0-9.]+?)\s*\]', field_sub, code)
|
||||
code = re.sub(u'\{\{\s*~([A-z_][A-z_0-9.]+?)\s*\}\}', field_path_sub,
|
||||
code = re.sub(r'\[\s*~([A-z_][A-z_0-9.]+?)\s*\]', field_sub, code)
|
||||
code = re.sub(r'\{\{\s*~([A-z_][A-z_0-9.]+?)\s*\}\}', field_path_sub,
|
||||
code)
|
||||
return code
|
||||
|
||||
|
||||
@@ -63,9 +63,11 @@ class QueryFieldList(object):
|
||||
self._only_called = True
|
||||
return self
|
||||
|
||||
def __nonzero__(self):
|
||||
def __bool__(self):
|
||||
return bool(self.fields)
|
||||
|
||||
__nonzero__ = __bool__ # For Py2 support
|
||||
|
||||
def as_dict(self):
|
||||
field_list = {field: self.value for field in self.fields}
|
||||
if self.slice:
|
||||
|
||||
@@ -36,7 +36,7 @@ class QuerySetManager(object):
|
||||
queryset_class = owner._meta.get('queryset_class', self.default)
|
||||
queryset = queryset_class(owner, owner._get_collection())
|
||||
if self.get_queryset:
|
||||
arg_count = self.get_queryset.func_code.co_argcount
|
||||
arg_count = self.get_queryset.__code__.co_argcount
|
||||
if arg_count == 1:
|
||||
queryset = self.get_queryset(queryset)
|
||||
elif arg_count == 2:
|
||||
|
||||
@@ -89,7 +89,7 @@ class QuerySet(BaseQuerySet):
|
||||
yield self._result_cache[pos]
|
||||
pos += 1
|
||||
|
||||
# Raise StopIteration if we already established there were no more
|
||||
# return if we already established there were no more
|
||||
# docs in the db cursor.
|
||||
if not self._has_more:
|
||||
return
|
||||
@@ -115,7 +115,7 @@ class QuerySet(BaseQuerySet):
|
||||
# the result cache.
|
||||
try:
|
||||
for _ in six.moves.range(ITER_CHUNK_SIZE):
|
||||
self._result_cache.append(self.next())
|
||||
self._result_cache.append(six.next(self))
|
||||
except StopIteration:
|
||||
# Getting this exception means there are no more docs in the
|
||||
# db cursor. Set _has_more to False so that we can use that
|
||||
@@ -170,7 +170,7 @@ class QuerySetNoCache(BaseQuerySet):
|
||||
data = []
|
||||
for _ in six.moves.range(REPR_OUTPUT_SIZE + 1):
|
||||
try:
|
||||
data.append(self.next())
|
||||
data.append(six.next(self))
|
||||
except StopIteration:
|
||||
break
|
||||
|
||||
@@ -186,10 +186,3 @@ class QuerySetNoCache(BaseQuerySet):
|
||||
queryset = self.clone()
|
||||
queryset.rewind()
|
||||
return queryset
|
||||
|
||||
|
||||
class QuerySetNoDeRef(QuerySet):
|
||||
"""Special no_dereference QuerySet"""
|
||||
|
||||
def __dereference(items, max_depth=1, instance=None, name=None):
|
||||
return items
|
||||
|
||||
@@ -147,7 +147,7 @@ def query(_doc_cls=None, **kwargs):
|
||||
if op is None or key not in mongo_query:
|
||||
mongo_query[key] = value
|
||||
elif key in mongo_query:
|
||||
if isinstance(mongo_query[key], dict):
|
||||
if isinstance(mongo_query[key], dict) and isinstance(value, dict):
|
||||
mongo_query[key].update(value)
|
||||
# $max/minDistance needs to come last - convert to SON
|
||||
value_dict = mongo_query[key]
|
||||
@@ -201,30 +201,37 @@ def update(_doc_cls=None, **update):
|
||||
format.
|
||||
"""
|
||||
mongo_update = {}
|
||||
|
||||
for key, value in update.items():
|
||||
if key == '__raw__':
|
||||
mongo_update.update(value)
|
||||
continue
|
||||
|
||||
parts = key.split('__')
|
||||
|
||||
# if there is no operator, default to 'set'
|
||||
if len(parts) < 3 and parts[0] not in UPDATE_OPERATORS:
|
||||
parts.insert(0, 'set')
|
||||
|
||||
# Check for an operator and transform to mongo-style if there is
|
||||
op = None
|
||||
if parts[0] in UPDATE_OPERATORS:
|
||||
op = parts.pop(0)
|
||||
# Convert Pythonic names to Mongo equivalents
|
||||
if op in ('push_all', 'pull_all'):
|
||||
op = op.replace('_all', 'All')
|
||||
elif op == 'dec':
|
||||
operator_map = {
|
||||
'push_all': 'pushAll',
|
||||
'pull_all': 'pullAll',
|
||||
'dec': 'inc',
|
||||
'add_to_set': 'addToSet',
|
||||
'set_on_insert': 'setOnInsert'
|
||||
}
|
||||
if op == 'dec':
|
||||
# Support decrement by flipping a positive value's sign
|
||||
# and using 'inc'
|
||||
op = 'inc'
|
||||
value = -value
|
||||
elif op == 'add_to_set':
|
||||
op = 'addToSet'
|
||||
elif op == 'set_on_insert':
|
||||
op = 'setOnInsert'
|
||||
# If the operator doesn't found from operator map, the op value
|
||||
# will stay unchanged
|
||||
op = operator_map.get(op, op)
|
||||
|
||||
match = None
|
||||
if parts[-1] in COMPARISON_OPERATORS:
|
||||
@@ -291,6 +298,8 @@ def update(_doc_cls=None, **update):
|
||||
value = field.prepare_query_value(op, value)
|
||||
elif op == 'unset':
|
||||
value = 1
|
||||
elif op == 'inc':
|
||||
value = field.prepare_query_value(op, value)
|
||||
|
||||
if match:
|
||||
match = '$' + match
|
||||
@@ -336,7 +345,7 @@ def update(_doc_cls=None, **update):
|
||||
value = {key: {'$each': value}}
|
||||
elif op in ('push', 'pushAll'):
|
||||
if parts[-1].isdigit():
|
||||
key = parts[0]
|
||||
key = '.'.join(parts[0:-1])
|
||||
position = int(parts[-1])
|
||||
# $position expects an iterable. If pushing a single value,
|
||||
# wrap it in a list.
|
||||
@@ -420,7 +429,6 @@ def _infer_geometry(value):
|
||||
'type and coordinates keys')
|
||||
elif isinstance(value, (list, set)):
|
||||
# TODO: shouldn't we test value[0][0][0][0] to see if it is MultiPolygon?
|
||||
# TODO: should both TypeError and IndexError be alike interpreted?
|
||||
|
||||
try:
|
||||
value[0][0][0]
|
||||
|
||||
@@ -3,7 +3,7 @@ import copy
|
||||
from mongoengine.errors import InvalidQueryError
|
||||
from mongoengine.queryset import transform
|
||||
|
||||
__all__ = ('Q',)
|
||||
__all__ = ('Q', 'QNode')
|
||||
|
||||
|
||||
class QNodeVisitor(object):
|
||||
@@ -131,6 +131,10 @@ class QCombination(QNode):
|
||||
else:
|
||||
self.children.append(node)
|
||||
|
||||
def __repr__(self):
|
||||
op = ' & ' if self.operation is self.AND else ' | '
|
||||
return '(%s)' % op.join([repr(node) for node in self.children])
|
||||
|
||||
def accept(self, visitor):
|
||||
for i in range(len(self.children)):
|
||||
if isinstance(self.children[i], QNode):
|
||||
@@ -151,6 +155,9 @@ class Q(QNode):
|
||||
def __init__(self, **query):
|
||||
self.query = query
|
||||
|
||||
def __repr__(self):
|
||||
return 'Q(**%s)' % repr(self.query)
|
||||
|
||||
def accept(self, visitor):
|
||||
return visitor.visit_query(self)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user