Added FutureWarning - save will default to cascade=False
in 0.8
This commit is contained in:
parent
999d4a7676
commit
b1eeb77ddc
@ -4,6 +4,7 @@ Changelog
|
|||||||
|
|
||||||
Changes in 0.7.X
|
Changes in 0.7.X
|
||||||
=================
|
=================
|
||||||
|
- Added FutureWarning - save will default to `cascade=False` in 0.8
|
||||||
- Added example of indexing embedded document fields (MongoEngine/mongoengine#75)
|
- Added example of indexing embedded document fields (MongoEngine/mongoengine#75)
|
||||||
- Fixed ImageField resizing when forcing size (MongoEngine/mongoengine#80)
|
- Fixed ImageField resizing when forcing size (MongoEngine/mongoengine#80)
|
||||||
- Add flexibility for fields handling bad data (MongoEngine/mongoengine#78)
|
- Add flexibility for fields handling bad data (MongoEngine/mongoengine#78)
|
||||||
@ -99,7 +100,7 @@ Changes in 0.6.8
|
|||||||
================
|
================
|
||||||
- Fixed FileField losing reference when no default set
|
- Fixed FileField losing reference when no default set
|
||||||
- Removed possible race condition from FileField (grid_file)
|
- Removed possible race condition from FileField (grid_file)
|
||||||
- Added assignment to save, can now do: b = MyDoc(**kwargs).save()
|
- Added assignment to save, can now do: `b = MyDoc(**kwargs).save()`
|
||||||
- Added support for pull operations on nested EmbeddedDocuments
|
- Added support for pull operations on nested EmbeddedDocuments
|
||||||
- Added support for choices with GenericReferenceFields
|
- Added support for choices with GenericReferenceFields
|
||||||
- Added support for choices with GenericEmbeddedDocumentFields
|
- Added support for choices with GenericEmbeddedDocumentFields
|
||||||
|
@ -2,6 +2,20 @@
|
|||||||
Upgrading
|
Upgrading
|
||||||
=========
|
=========
|
||||||
|
|
||||||
|
0.6 to 0.7
|
||||||
|
==========
|
||||||
|
|
||||||
|
Saves will raise a `FutureWarning` if they cascade and cascade hasn't been set to
|
||||||
|
True. This is because in 0.8 it will default to False. If you require cascading
|
||||||
|
saves then either set it in the `meta` or pass via `save` ::
|
||||||
|
|
||||||
|
# At the class level:
|
||||||
|
class Person(Document):
|
||||||
|
meta = {'cascade': True}
|
||||||
|
|
||||||
|
# Or in code:
|
||||||
|
my_document.save(cascade=True)
|
||||||
|
|
||||||
0.5 to 0.6
|
0.5 to 0.6
|
||||||
==========
|
==========
|
||||||
|
|
||||||
|
@ -1,15 +1,18 @@
|
|||||||
|
import warnings
|
||||||
|
|
||||||
import pymongo
|
import pymongo
|
||||||
|
|
||||||
from bson.dbref import DBRef
|
from bson.dbref import DBRef
|
||||||
|
|
||||||
from mongoengine import signals, queryset
|
from mongoengine import signals, queryset
|
||||||
|
|
||||||
from base import (DocumentMetaclass, TopLevelDocumentMetaclass, BaseDocument,
|
from base import (DocumentMetaclass, TopLevelDocumentMetaclass, BaseDocument,
|
||||||
BaseDict, BaseList)
|
BaseDict, BaseList)
|
||||||
from queryset import OperationError
|
from queryset import OperationError
|
||||||
from connection import get_db, DEFAULT_CONNECTION_NAME
|
from connection import get_db, DEFAULT_CONNECTION_NAME
|
||||||
|
|
||||||
__all__ = ['Document', 'EmbeddedDocument', 'DynamicDocument',
|
__all__ = ['Document', 'EmbeddedDocument', 'DynamicDocument',
|
||||||
'DynamicEmbeddedDocument', 'OperationError', 'InvalidCollectionError']
|
'DynamicEmbeddedDocument', 'OperationError',
|
||||||
|
'InvalidCollectionError']
|
||||||
|
|
||||||
|
|
||||||
class InvalidCollectionError(Exception):
|
class InvalidCollectionError(Exception):
|
||||||
@ -134,8 +137,9 @@ class Document(BaseDocument):
|
|||||||
options = cls._collection.options()
|
options = cls._collection.options()
|
||||||
if options.get('max') != max_documents or \
|
if options.get('max') != max_documents or \
|
||||||
options.get('size') != max_size:
|
options.get('size') != max_size:
|
||||||
msg = ('Cannot create collection "%s" as a capped '
|
msg = (('Cannot create collection "%s" as a capped '
|
||||||
'collection as it already exists') % cls._collection
|
'collection as it already exists')
|
||||||
|
% cls._collection)
|
||||||
raise InvalidCollectionError(msg)
|
raise InvalidCollectionError(msg)
|
||||||
else:
|
else:
|
||||||
# Create the collection as a capped collection
|
# Create the collection as a capped collection
|
||||||
@ -149,8 +153,9 @@ class Document(BaseDocument):
|
|||||||
cls._collection = db[collection_name]
|
cls._collection = db[collection_name]
|
||||||
return cls._collection
|
return cls._collection
|
||||||
|
|
||||||
def save(self, safe=True, force_insert=False, validate=True, write_options=None,
|
def save(self, safe=True, force_insert=False, validate=True,
|
||||||
cascade=None, cascade_kwargs=None, _refs=None):
|
write_options=None, cascade=None, cascade_kwargs=None,
|
||||||
|
_refs=None):
|
||||||
"""Save the :class:`~mongoengine.Document` to the database. If the
|
"""Save the :class:`~mongoengine.Document` to the database. If the
|
||||||
document already exists, it will be updated, otherwise it will be
|
document already exists, it will be updated, otherwise it will be
|
||||||
created.
|
created.
|
||||||
@ -163,27 +168,30 @@ class Document(BaseDocument):
|
|||||||
updates of existing documents
|
updates of existing documents
|
||||||
:param validate: validates the document; set to ``False`` to skip.
|
:param validate: validates the document; set to ``False`` to skip.
|
||||||
:param write_options: Extra keyword arguments are passed down to
|
:param write_options: Extra keyword arguments are passed down to
|
||||||
:meth:`~pymongo.collection.Collection.save` OR
|
:meth:`~pymongo.collection.Collection.save` OR
|
||||||
:meth:`~pymongo.collection.Collection.insert`
|
:meth:`~pymongo.collection.Collection.insert`
|
||||||
which will be used as options for the resultant ``getLastError`` command.
|
which will be used as options for the resultant
|
||||||
For example, ``save(..., write_options={w: 2, fsync: True}, ...)`` will
|
``getLastError`` command. For example,
|
||||||
wait until at least two servers have recorded the write and will force an
|
``save(..., write_options={w: 2, fsync: True}, ...)`` will
|
||||||
fsync on each server being written to.
|
wait until at least two servers have recorded the write and
|
||||||
:param cascade: Sets the flag for cascading saves. You can set a default by setting
|
will force an fsync on the primary server.
|
||||||
"cascade" in the document __meta__
|
:param cascade: Sets the flag for cascading saves. You can set a
|
||||||
:param cascade_kwargs: optional kwargs dictionary to be passed throw to cascading saves
|
default by setting "cascade" in the document __meta__
|
||||||
|
:param cascade_kwargs: optional kwargs dictionary to be passed throw
|
||||||
|
to cascading saves
|
||||||
:param _refs: A list of processed references used in cascading saves
|
:param _refs: A list of processed references used in cascading saves
|
||||||
|
|
||||||
.. versionchanged:: 0.5
|
.. versionchanged:: 0.5
|
||||||
In existing documents it only saves changed fields using set / unset
|
In existing documents it only saves changed fields using
|
||||||
Saves are cascaded and any :class:`~bson.dbref.DBRef` objects
|
set / unset. Saves are cascaded and any
|
||||||
that have changes are saved as well.
|
:class:`~bson.dbref.DBRef` objects that have changes are
|
||||||
|
saved as well.
|
||||||
.. versionchanged:: 0.6
|
.. versionchanged:: 0.6
|
||||||
Cascade saves are optional = defaults to True, if you want fine grain
|
Cascade saves are optional = defaults to True, if you want
|
||||||
control then you can turn off using document meta['cascade'] = False
|
fine grain control then you can turn off using document
|
||||||
Also you can pass different kwargs to the cascade save using cascade_kwargs
|
meta['cascade'] = False Also you can pass different kwargs to
|
||||||
which overwrites the existing kwargs with custom values
|
the cascade save using cascade_kwargs which overwrites the
|
||||||
|
existing kwargs with custom values
|
||||||
"""
|
"""
|
||||||
signals.pre_save.send(self.__class__, document=self)
|
signals.pre_save.send(self.__class__, document=self)
|
||||||
|
|
||||||
@ -201,9 +209,11 @@ class Document(BaseDocument):
|
|||||||
collection = self.__class__.objects._collection
|
collection = self.__class__.objects._collection
|
||||||
if created:
|
if created:
|
||||||
if force_insert:
|
if force_insert:
|
||||||
object_id = collection.insert(doc, safe=safe, **write_options)
|
object_id = collection.insert(doc, safe=safe,
|
||||||
|
**write_options)
|
||||||
else:
|
else:
|
||||||
object_id = collection.save(doc, safe=safe, **write_options)
|
object_id = collection.save(doc, safe=safe,
|
||||||
|
**write_options)
|
||||||
else:
|
else:
|
||||||
object_id = doc['_id']
|
object_id = doc['_id']
|
||||||
updates, removals = self._delta()
|
updates, removals = self._delta()
|
||||||
@ -216,11 +226,15 @@ class Document(BaseDocument):
|
|||||||
|
|
||||||
upsert = self._created
|
upsert = self._created
|
||||||
if updates:
|
if updates:
|
||||||
collection.update(select_dict, {"$set": updates}, upsert=upsert, safe=safe, **write_options)
|
collection.update(select_dict, {"$set": updates},
|
||||||
|
upsert=upsert, safe=safe, **write_options)
|
||||||
if removals:
|
if removals:
|
||||||
collection.update(select_dict, {"$unset": removals}, upsert=upsert, safe=safe, **write_options)
|
collection.update(select_dict, {"$unset": removals},
|
||||||
|
upsert=upsert, safe=safe, **write_options)
|
||||||
|
|
||||||
cascade = self._meta.get('cascade', True) if cascade is None else cascade
|
warn_cascade = not cascade and 'cascade' not in self._meta
|
||||||
|
cascade = (self._meta.get('cascade', True)
|
||||||
|
if cascade is None else cascade)
|
||||||
if cascade:
|
if cascade:
|
||||||
kwargs = {
|
kwargs = {
|
||||||
"safe": safe,
|
"safe": safe,
|
||||||
@ -232,8 +246,7 @@ class Document(BaseDocument):
|
|||||||
if cascade_kwargs: # Allow granular control over cascades
|
if cascade_kwargs: # Allow granular control over cascades
|
||||||
kwargs.update(cascade_kwargs)
|
kwargs.update(cascade_kwargs)
|
||||||
kwargs['_refs'] = _refs
|
kwargs['_refs'] = _refs
|
||||||
#self._changed_fields = []
|
self.cascade_save(warn_cascade=warn_cascade, **kwargs)
|
||||||
self.cascade_save(**kwargs)
|
|
||||||
|
|
||||||
except pymongo.errors.OperationFailure, err:
|
except pymongo.errors.OperationFailure, err:
|
||||||
message = 'Could not save document (%s)'
|
message = 'Could not save document (%s)'
|
||||||
@ -249,23 +262,27 @@ class Document(BaseDocument):
|
|||||||
signals.post_save.send(self.__class__, document=self, created=created)
|
signals.post_save.send(self.__class__, document=self, created=created)
|
||||||
return self
|
return self
|
||||||
|
|
||||||
def cascade_save(self, *args, **kwargs):
|
def cascade_save(self, warn_cascade=None, *args, **kwargs):
|
||||||
"""Recursively saves any references / generic references on an object"""
|
"""Recursively saves any references /
|
||||||
|
generic references on an objects"""
|
||||||
import fields
|
import fields
|
||||||
_refs = kwargs.get('_refs', []) or []
|
_refs = kwargs.get('_refs', []) or []
|
||||||
|
|
||||||
for name, cls in self._fields.items():
|
for name, cls in self._fields.items():
|
||||||
if not isinstance(cls, (fields.ReferenceField, fields.GenericReferenceField)):
|
if not isinstance(cls, (fields.ReferenceField,
|
||||||
|
fields.GenericReferenceField)):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
ref = getattr(self, name)
|
ref = getattr(self, name)
|
||||||
if not ref:
|
if not ref or isinstance(ref, DBRef):
|
||||||
continue
|
|
||||||
if isinstance(ref, DBRef):
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
ref_id = "%s,%s" % (ref.__class__.__name__, str(ref._data))
|
ref_id = "%s,%s" % (ref.__class__.__name__, str(ref._data))
|
||||||
if ref and ref_id not in _refs:
|
if ref and ref_id not in _refs:
|
||||||
|
if warn_cascade:
|
||||||
|
msg = ("Cascading saves will default to off in 0.8, "
|
||||||
|
"please explicitly set `.save(cascade=True)`")
|
||||||
|
warnings.warn(msg, FutureWarning)
|
||||||
_refs.append(ref_id)
|
_refs.append(ref_id)
|
||||||
kwargs["_refs"] = _refs
|
kwargs["_refs"] = _refs
|
||||||
ref.save(**kwargs)
|
ref.save(**kwargs)
|
||||||
|
@ -1508,6 +1508,40 @@ class DocumentTest(unittest.TestCase):
|
|||||||
p1.reload()
|
p1.reload()
|
||||||
self.assertEqual(p1.name, p.parent.name)
|
self.assertEqual(p1.name, p.parent.name)
|
||||||
|
|
||||||
|
def test_cascade_warning(self):
|
||||||
|
|
||||||
|
self.warning_list = []
|
||||||
|
showwarning_default = warnings.showwarning
|
||||||
|
|
||||||
|
def append_to_warning_list(message, category, *args):
|
||||||
|
self.warning_list.append({"message": message,
|
||||||
|
"category": category})
|
||||||
|
|
||||||
|
# add warnings to self.warning_list instead of stderr
|
||||||
|
warnings.showwarning = append_to_warning_list
|
||||||
|
|
||||||
|
class Person(Document):
|
||||||
|
name = StringField()
|
||||||
|
parent = ReferenceField('self')
|
||||||
|
|
||||||
|
Person.drop_collection()
|
||||||
|
|
||||||
|
p1 = Person(name="Wilson Snr")
|
||||||
|
p1.parent = None
|
||||||
|
p1.save()
|
||||||
|
|
||||||
|
p2 = Person(name="Wilson Jr")
|
||||||
|
p2.parent = p1
|
||||||
|
p2.save()
|
||||||
|
|
||||||
|
# restore default handling of warnings
|
||||||
|
warnings.showwarning = showwarning_default
|
||||||
|
self.assertEqual(len(self.warning_list), 1)
|
||||||
|
warning = self.warning_list[0]
|
||||||
|
self.assertEqual(FutureWarning, warning["category"])
|
||||||
|
self.assertTrue("Cascading saves will default to off in 0.8"
|
||||||
|
in str(warning["message"]))
|
||||||
|
|
||||||
def test_save_cascade_kwargs(self):
|
def test_save_cascade_kwargs(self):
|
||||||
|
|
||||||
class Person(Document):
|
class Person(Document):
|
||||||
|
Loading…
x
Reference in New Issue
Block a user