diff --git a/docs/changelog.rst b/docs/changelog.rst index 45470000..d7d010cb 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -4,6 +4,7 @@ Changelog Changes in 0.8.X ================ +- Updated connection to use MongoClient (#262, #274) - Fixed db_alias and inherited Documents (#143) - Documentation update for document errors (#124) - Deprecated `get_or_create` (#35) diff --git a/docs/guide/connecting.rst b/docs/guide/connecting.rst index ebd61a97..de6794cd 100644 --- a/docs/guide/connecting.rst +++ b/docs/guide/connecting.rst @@ -29,7 +29,7 @@ name - just supply the uri as the :attr:`host` to ReplicaSets =========== -MongoEngine now supports :func:`~pymongo.replica_set_connection.ReplicaSetConnection` +MongoEngine supports :class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient` to use them please use a URI style connection and provide the `replicaSet` name in the connection kwargs. diff --git a/docs/upgrade.rst b/docs/upgrade.rst index 8724503d..356f5109 100644 --- a/docs/upgrade.rst +++ b/docs/upgrade.rst @@ -1,15 +1,15 @@ -========= +######### Upgrading -========= +######### 0.7 to 0.8 -========== +********** Inheritance ------------ +=========== Data Model -~~~~~~~~~~ +---------- The inheritance model has changed, we no longer need to store an array of :attr:`types` with the model we can just use the classname in :attr:`_cls`. @@ -44,7 +44,7 @@ inherited classes like so: :: Document Definition -~~~~~~~~~~~~~~~~~~~ +------------------- The default for inheritance has changed - its now off by default and :attr:`_cls` will not be stored automatically with the class. So if you extend @@ -77,7 +77,7 @@ the case and the data is set only in the ``document._data`` dictionary: :: AttributeError: 'Animal' object has no attribute 'size' Querysets -~~~~~~~~~ +========= Querysets now return clones and should no longer be considered editable in place. This brings us in line with how Django's querysets work and removes a @@ -98,8 +98,47 @@ update your code like so: :: mammals = Animal.objects(type="mammal").filter(order="Carnivora") # The final queryset is assgined to mammals [m for m in mammals] # This will return all carnivores +Client +====== +PyMongo 2.4 came with a new connection client; MongoClient_ and started the +depreciation of the old :class:`~pymongo.connection.Connection`. MongoEngine +now uses the latest `MongoClient` for connections. By default operations were +`safe` but if you turned them off or used the connection directly this will +impact your queries. + +Querysets +--------- + +Safe +^^^^ + +`safe` has been depreciated in the new MongoClient connection. Please use +`write_concern` instead. As `safe` always defaulted as `True` normally no code +change is required. To disable confirmation of the write just pass `{"w": 0}` +eg: :: + + # Old + Animal(name="Dinasour").save(safe=False) + + # new code: + Animal(name="Dinasour").save(write_concern={"w": 0}) + +Write Concern +^^^^^^^^^^^^^ + +`write_options` has been replaced with `write_concern` to bring it inline with +pymongo. To upgrade simply rename any instances where you used the `write_option` +keyword to `write_concern` like so:: + + # Old code: + Animal(name="Dinasour").save(write_options={"w": 2}) + + # new code: + Animal(name="Dinasour").save(write_concern={"w": 2}) + + Indexes -------- +======= Index methods are no longer tied to querysets but rather to the document class. Although `QuerySet._ensure_indexes` and `QuerySet.ensure_index` still exist. @@ -107,17 +146,19 @@ They should be replaced with :func:`~mongoengine.Document.ensure_indexes` / :func:`~mongoengine.Document.ensure_index`. SequenceFields --------------- +============== :class:`~mongoengine.fields.SequenceField` now inherits from `BaseField` to allow flexible storage of the calculated value. As such MIN and MAX settings are no longer handled. +.. _MongoClient: http://blog.mongodb.org/post/36666163412/introducing-mongoclient + 0.6 to 0.7 -========== +********** Cascade saves -------------- +============= Saves will raise a `FutureWarning` if they cascade and cascade hasn't been set to True. This is because in 0.8 it will default to False. If you require @@ -135,7 +176,7 @@ via `save` eg :: Remember: cascading saves **do not** cascade through lists. ReferenceFields ---------------- +=============== ReferenceFields now can store references as ObjectId strings instead of DBRefs. This will become the default in 0.8 and if `dbref` is not set a `FutureWarning` @@ -164,7 +205,7 @@ migrate :: item_frequencies ----------------- +================ In the 0.6 series we added support for null / zero / false values in item_frequencies. A side effect was to return keys in the value they are @@ -173,14 +214,14 @@ updated to handle native types rather than strings keys for the results of item frequency queries. BinaryFields ------------- +============ Binary fields have been updated so that they are native binary types. If you previously were doing `str` comparisons with binary field values you will have to update and wrap the value in a `str`. 0.5 to 0.6 -========== +********** Embedded Documents - if you had a `pk` field you will have to rename it from `_id` to `pk` as pk is no longer a property of Embedded Documents. @@ -200,13 +241,13 @@ don't define :attr:`allow_inheritance` in their meta. You may need to update pyMongo to 2.0 for use with Sharding. 0.4 to 0.5 -=========== +********** There have been the following backwards incompatibilities from 0.4 to 0.5. The main areas of changed are: choices in fields, map_reduce and collection names. Choice options: ---------------- +=============== Are now expected to be an iterable of tuples, with the first element in each tuple being the actual value to be stored. The second element is the @@ -214,7 +255,7 @@ human-readable name for the option. PyMongo / MongoDB ------------------ +================= map reduce now requires pymongo 1.11+- The pymongo `merge_output` and `reduce_output` parameters, have been depreciated. @@ -228,7 +269,7 @@ such the following have been changed: Default collection naming -------------------------- +========================= Previously it was just lowercase, its now much more pythonic and readable as its lowercase and underscores, previously :: diff --git a/mongoengine/connection.py b/mongoengine/connection.py index a47be446..3c53ea3c 100644 --- a/mongoengine/connection.py +++ b/mongoengine/connection.py @@ -1,5 +1,5 @@ import pymongo -from pymongo import Connection, ReplicaSetConnection, uri_parser +from pymongo import MongoClient, MongoReplicaSetClient, uri_parser __all__ = ['ConnectionError', 'connect', 'register_connection', @@ -112,15 +112,15 @@ def get_connection(alias=DEFAULT_CONNECTION_NAME, reconnect=False): conn_settings['slaves'] = slaves conn_settings.pop('read_preference', None) - connection_class = Connection + connection_class = MongoClient if 'replicaSet' in conn_settings: conn_settings['hosts_or_uri'] = conn_settings.pop('host', None) - # Discard port since it can't be used on ReplicaSetConnection + # Discard port since it can't be used on MongoReplicaSetClient conn_settings.pop('port', None) # Discard replicaSet if not base string if not isinstance(conn_settings['replicaSet'], basestring): conn_settings.pop('replicaSet', None) - connection_class = ReplicaSetConnection + connection_class = MongoReplicaSetClient try: _connections[alias] = connection_class(**conn_settings) diff --git a/mongoengine/django/sessions.py b/mongoengine/django/sessions.py index 0d199a6c..29583f5c 100644 --- a/mongoengine/django/sessions.py +++ b/mongoengine/django/sessions.py @@ -88,7 +88,7 @@ class SessionStore(SessionBase): s.session_data = self._get_session(no_load=must_create) s.expire_date = self.get_expiry_date() try: - s.save(force_insert=must_create, safe=True) + s.save(force_insert=must_create) except OperationError: if must_create: raise CreateError diff --git a/mongoengine/document.py b/mongoengine/document.py index 9057075e..54b55df9 100644 --- a/mongoengine/document.py +++ b/mongoengine/document.py @@ -142,7 +142,7 @@ class Document(BaseDocument): options.get('size') != max_size: msg = (('Cannot create collection "%s" as a capped ' 'collection as it already exists') - % cls._collection) + % cls._collection) raise InvalidCollectionError(msg) else: # Create the collection as a capped collection @@ -158,28 +158,24 @@ class Document(BaseDocument): cls.ensure_indexes() return cls._collection - def save(self, safe=True, force_insert=False, validate=True, clean=True, - write_options=None, cascade=None, cascade_kwargs=None, + def save(self, force_insert=False, validate=True, clean=True, + write_concern=None, cascade=None, cascade_kwargs=None, _refs=None, **kwargs): """Save the :class:`~mongoengine.Document` to the database. If the document already exists, it will be updated, otherwise it will be created. - If ``safe=True`` and the operation is unsuccessful, an - :class:`~mongoengine.OperationError` will be raised. - - :param safe: check if the operation succeeded before returning :param force_insert: only try to create a new document, don't allow updates of existing documents :param validate: validates the document; set to ``False`` to skip. :param clean: call the document clean method, requires `validate` to be True. - :param write_options: Extra keyword arguments are passed down to + :param write_concern: Extra keyword arguments are passed down to :meth:`~pymongo.collection.Collection.save` OR :meth:`~pymongo.collection.Collection.insert` which will be used as options for the resultant ``getLastError`` command. For example, - ``save(..., write_options={w: 2, fsync: True}, ...)`` will + ``save(..., write_concern={w: 2, fsync: True}, ...)`` will wait until at least two servers have recorded the write and will force an fsync on the primary server. :param cascade: Sets the flag for cascading saves. You can set a @@ -205,8 +201,8 @@ class Document(BaseDocument): if validate: self.validate(clean=clean) - if not write_options: - write_options = {} + if not write_concern: + write_concern = {} doc = self.to_mongo() @@ -216,11 +212,9 @@ class Document(BaseDocument): collection = self._get_collection() if created: if force_insert: - object_id = collection.insert(doc, safe=safe, - **write_options) + object_id = collection.insert(doc, **write_concern) else: - object_id = collection.save(doc, safe=safe, - **write_options) + object_id = collection.save(doc, **write_concern) else: object_id = doc['_id'] updates, removals = self._delta() @@ -247,7 +241,7 @@ class Document(BaseDocument): update_query["$unset"] = removals if updates or removals: last_error = collection.update(select_dict, update_query, - upsert=upsert, safe=safe, **write_options) + upsert=upsert, **write_concern) created = is_new_object(last_error) warn_cascade = not cascade and 'cascade' not in self._meta @@ -255,10 +249,9 @@ class Document(BaseDocument): if cascade is None else cascade) if cascade: kwargs = { - "safe": safe, "force_insert": force_insert, "validate": validate, - "write_options": write_options, + "write_concern": write_concern, "cascade": cascade } if cascade_kwargs: # Allow granular control over cascades @@ -305,7 +298,7 @@ class Document(BaseDocument): if ref and ref_id not in _refs: if warn_cascade: msg = ("Cascading saves will default to off in 0.8, " - "please explicitly set `.save(cascade=True)`") + "please explicitly set `.save(cascade=True)`") warnings.warn(msg, FutureWarning) _refs.append(ref_id) kwargs["_refs"] = _refs @@ -344,16 +337,21 @@ class Document(BaseDocument): # Need to add shard key to query, or you get an error return self._qs.filter(**self._object_key).update_one(**kwargs) - def delete(self, safe=False): + def delete(self, **write_concern): """Delete the :class:`~mongoengine.Document` from the database. This will only take effect if the document has been previously saved. - :param safe: check if the operation succeeded before returning + :param write_concern: Extra keyword arguments are passed down which + will be used as options for the resultant + ``getLastError`` command. For example, + ``save(..., write_concern={w: 2, fsync: True}, ...)`` will + wait until at least two servers have recorded the write and + will force an fsync on the primary server. """ signals.pre_delete.send(self.__class__, document=self) try: - self._qs.filter(**self._object_key).delete(safe=safe) + self._qs.filter(**self._object_key).delete(write_concern=write_concern) except pymongo.errors.OperationFailure, err: message = u'Could not delete document (%s)' % err.message raise OperationError(message) @@ -428,9 +426,8 @@ class Document(BaseDocument): .. versionchanged:: 0.6 Now chainable """ id_field = self._meta['id_field'] - obj = self._qs.filter( - **{id_field: self[id_field]} - ).limit(1).select_related(max_depth=max_depth) + obj = self._qs.filter(**{id_field: self[id_field]} + ).limit(1).select_related(max_depth=max_depth) if obj: obj = obj[0] else: diff --git a/mongoengine/queryset/queryset.py b/mongoengine/queryset/queryset.py index 15c8e634..71332b92 100644 --- a/mongoengine/queryset/queryset.py +++ b/mongoengine/queryset/queryset.py @@ -221,7 +221,7 @@ class QuerySet(object): """ return self._document(**kwargs).save() - def get_or_create(self, write_options=None, auto_save=True, + def get_or_create(self, write_concern=None, auto_save=True, *q_objs, **query): """Retrieve unique object or create, if it doesn't exist. Returns a tuple of ``(object, created)``, where ``object`` is the retrieved or @@ -239,9 +239,9 @@ class QuerySet(object): don't accidently duplicate data when using this method. This is now scheduled to be removed before 1.0 - :param write_options: optional extra keyword arguments used if we + :param write_concern: optional extra keyword arguments used if we have to create a new document. - Passes any write_options onto :meth:`~mongoengine.Document.save` + Passes any write_concern onto :meth:`~mongoengine.Document.save` :param auto_save: if the object is to be saved automatically if not found. @@ -266,7 +266,7 @@ class QuerySet(object): doc = self._document(**query) if auto_save: - doc.save(write_options=write_options) + doc.save(write_concern=write_concern) return doc, True def first(self): @@ -279,18 +279,13 @@ class QuerySet(object): result = None return result - def insert(self, doc_or_docs, load_bulk=True, safe=False, - write_options=None): + def insert(self, doc_or_docs, load_bulk=True, write_concern=None): """bulk insert documents - If ``safe=True`` and the operation is unsuccessful, an - :class:`~mongoengine.OperationError` will be raised. - :param docs_or_doc: a document or list of documents to be inserted :param load_bulk (optional): If True returns the list of document instances - :param safe: check if the operation succeeded before returning - :param write_options: Extra keyword arguments are passed down to + :param write_concern: Extra keyword arguments are passed down to :meth:`~pymongo.collection.Collection.insert` which will be used as options for the resultant ``getLastError`` command. For example, @@ -305,9 +300,8 @@ class QuerySet(object): """ Document = _import_class('Document') - if not write_options: - write_options = {} - write_options.update({'safe': safe}) + if not write_concern: + write_concern = {} docs = doc_or_docs return_one = False @@ -319,7 +313,7 @@ class QuerySet(object): for doc in docs: if not isinstance(doc, self._document): msg = ("Some documents inserted aren't instances of %s" - % str(self._document)) + % str(self._document)) raise OperationError(msg) if doc.pk and not doc._created: msg = "Some documents have ObjectIds use doc.update() instead" @@ -328,7 +322,7 @@ class QuerySet(object): signals.pre_bulk_insert.send(self._document, documents=docs) try: - ids = self._collection.insert(raw, **write_options) + ids = self._collection.insert(raw, **write_concern) except pymongo.errors.OperationFailure, err: message = 'Could not save document (%s)' if re.match('^E1100[01] duplicate key', unicode(err)): @@ -340,7 +334,7 @@ class QuerySet(object): if not load_bulk: signals.post_bulk_insert.send( - self._document, documents=docs, loaded=False) + self._document, documents=docs, loaded=False) return return_one and ids[0] or ids documents = self.in_bulk(ids) @@ -348,7 +342,7 @@ class QuerySet(object): for obj_id in ids: results.append(documents.get(obj_id)) signals.post_bulk_insert.send( - self._document, documents=results, loaded=True) + self._document, documents=results, loaded=True) return return_one and results[0] or results def count(self): @@ -358,10 +352,15 @@ class QuerySet(object): return 0 return self._cursor.count(with_limit_and_skip=True) - def delete(self, safe=False): + def delete(self, write_concern=None): """Delete the documents matched by the query. - :param safe: check if the operation succeeded before returning + :param write_concern: Extra keyword arguments are passed down which + will be used as options for the resultant + ``getLastError`` command. For example, + ``save(..., write_concern={w: 2, fsync: True}, ...)`` will + wait until at least two servers have recorded the write and + will force an fsync on the primary server. """ queryset = self.clone() doc = queryset._document @@ -370,11 +369,14 @@ class QuerySet(object): signals.pre_delete.has_receivers_for(self._document) or signals.post_delete.has_receivers_for(self._document)) + if not write_concern: + write_concern = {} + # Handle deletes where skips or limits have been applied or has a # delete signal if queryset._skip or queryset._limit or has_delete_signal: for doc in queryset: - doc.delete(safe=safe) + doc.delete(write_concern=write_concern) return delete_rules = doc._meta.get('delete_rules') or {} @@ -386,7 +388,7 @@ class QuerySet(object): if rule == DENY and document_cls.objects( **{field_name + '__in': self}).count() > 0: msg = ("Could not delete document (%s.%s refers to it)" - % (document_cls.__name__, field_name)) + % (document_cls.__name__, field_name)) raise OperationError(msg) for rule_entry in delete_rules: @@ -396,36 +398,38 @@ class QuerySet(object): ref_q = document_cls.objects(**{field_name + '__in': self}) ref_q_count = ref_q.count() if (doc != document_cls and ref_q_count > 0 - or (doc == document_cls and ref_q_count > 0)): - ref_q.delete(safe=safe) + or (doc == document_cls and ref_q_count > 0)): + ref_q.delete(write_concern=write_concern) elif rule == NULLIFY: document_cls.objects(**{field_name + '__in': self}).update( - safe_update=safe, - **{'unset__%s' % field_name: 1}) + write_concern=write_concern, **{'unset__%s' % field_name: 1}) elif rule == PULL: document_cls.objects(**{field_name + '__in': self}).update( - safe_update=safe, - **{'pull_all__%s' % field_name: self}) + write_concern=write_concern, + **{'pull_all__%s' % field_name: self}) - queryset._collection.remove(queryset._query, safe=safe) + queryset._collection.remove(queryset._query, write_concern=write_concern) - def update(self, safe_update=True, upsert=False, multi=True, - write_options=None, **update): - """Perform an atomic update on the fields matched by the query. When - ``safe_update`` is used, the number of affected documents is returned. + def update(self, upsert=False, multi=True, write_concern=None, **update): + """Perform an atomic update on the fields matched by the query. - :param safe_update: check if the operation succeeded before returning :param upsert: Any existing document with that "_id" is overwritten. - :param write_options: extra keyword arguments for - :meth:`~pymongo.collection.Collection.update` + :param multi: Update multiple documents. + :param write_concern: Extra keyword arguments are passed down which + will be used as options for the resultant + ``getLastError`` command. For example, + ``save(..., write_concern={w: 2, fsync: True}, ...)`` will + wait until at least two servers have recorded the write and + will force an fsync on the primary server. + :param update: Django-style update keyword arguments .. versionadded:: 0.2 """ if not update: raise OperationError("No update parameters, would remove data") - if not write_options: - write_options = {} + if not write_concern: + write_concern = {} queryset = self.clone() query = queryset._query @@ -441,8 +445,7 @@ class QuerySet(object): try: ret = queryset._collection.update(query, update, multi=multi, - upsert=upsert, safe=safe_update, - **write_options) + upsert=upsert, **write_concern) if ret is not None and 'n' in ret: return ret['n'] except pymongo.errors.OperationFailure, err: @@ -451,21 +454,21 @@ class QuerySet(object): raise OperationError(message) raise OperationError(u'Update failed (%s)' % unicode(err)) - def update_one(self, safe_update=True, upsert=False, write_options=None, - **update): - """Perform an atomic update on first field matched by the query. When - ``safe_update`` is used, the number of affected documents is returned. + def update_one(self, upsert=False, write_concern=None, **update): + """Perform an atomic update on first field matched by the query. - :param safe_update: check if the operation succeeded before returning :param upsert: Any existing document with that "_id" is overwritten. - :param write_options: extra keyword arguments for - :meth:`~pymongo.collection.Collection.update` + :param write_concern: Extra keyword arguments are passed down which + will be used as options for the resultant + ``getLastError`` command. For example, + ``save(..., write_concern={w: 2, fsync: True}, ...)`` will + wait until at least two servers have recorded the write and + will force an fsync on the primary server. :param update: Django-style update keyword arguments .. versionadded:: 0.2 """ - return self.update(safe_update=True, upsert=upsert, multi=False, - write_options=None, **update) + return self.update(upsert=upsert, multi=False, write_concern=None, **update) def with_id(self, object_id): """Retrieve the object matching the id provided. Uses `object_id` only @@ -498,7 +501,7 @@ class QuerySet(object): if self._scalar: for doc in docs: doc_map[doc['_id']] = self._get_scalar( - self._document._from_son(doc)) + self._document._from_son(doc)) elif self._as_pymongo: for doc in docs: doc_map[doc['_id']] = self._get_as_pymongo(doc) @@ -523,10 +526,10 @@ class QuerySet(object): c = self.__class__(self._document, self._collection_obj) copy_props = ('_mongo_query', '_initial_query', '_none', '_query_obj', - '_where_clause', '_loaded_fields', '_ordering', '_snapshot', - '_timeout', '_class_check', '_slave_okay', '_read_preference', - '_iter', '_scalar', '_as_pymongo', '_as_pymongo_coerce', - '_limit', '_skip', '_hint', '_auto_dereference') + '_where_clause', '_loaded_fields', '_ordering', '_snapshot', + '_timeout', '_class_check', '_slave_okay', '_read_preference', + '_iter', '_scalar', '_as_pymongo', '_as_pymongo_coerce', + '_limit', '_skip', '_hint', '_auto_dereference') for prop in copy_props: val = getattr(self, prop) diff --git a/tests/document/indexes.py b/tests/document/indexes.py index ff08ef1a..fea63a51 100644 --- a/tests/document/indexes.py +++ b/tests/document/indexes.py @@ -314,19 +314,27 @@ class IndexesTest(unittest.TestCase): """ class User(Document): meta = { + 'allow_inheritance': True, 'indexes': ['user_guid'], 'auto_create_index': False } user_guid = StringField(required=True) + class MongoUser(User): + pass + User.drop_collection() - u = User(user_guid='123') - u.save() + User(user_guid='123').save() + MongoUser(user_guid='123').save() - self.assertEqual(1, User.objects.count()) + self.assertEqual(2, User.objects.count()) info = User.objects._collection.index_information() self.assertEqual(info.keys(), ['_id_']) + + User.ensure_indexes() + info = User.objects._collection.index_information() + self.assertEqual(info.keys(), ['_cls_1_user_guid_1', '_id_']) User.drop_collection() def test_embedded_document_index(self): diff --git a/tests/queryset/queryset.py b/tests/queryset/queryset.py index 37670b0a..42e98ae7 100644 --- a/tests/queryset/queryset.py +++ b/tests/queryset/queryset.py @@ -278,24 +278,24 @@ class QuerySetTest(unittest.TestCase): query = query.filter(boolfield=True) self.assertEquals(query.count(), 1) - def test_update_write_options(self): - """Test that passing write_options works""" + def test_update_write_concern(self): + """Test that passing write_concern works""" self.Person.drop_collection() - write_options = {"fsync": True} + write_concern = {"fsync": True} author, created = self.Person.objects.get_or_create( - name='Test User', write_options=write_options) - author.save(write_options=write_options) + name='Test User', write_concern=write_concern) + author.save(write_concern=write_concern) self.Person.objects.update(set__name='Ross', - write_options=write_options) + write_concern=write_concern) author = self.Person.objects.first() self.assertEqual(author.name, 'Ross') - self.Person.objects.update_one(set__name='Test User', write_options=write_options) + self.Person.objects.update_one(set__name='Test User', write_concern=write_concern) author = self.Person.objects.first() self.assertEqual(author.name, 'Test User') @@ -592,10 +592,17 @@ class QuerySetTest(unittest.TestCase): blogs.append(Blog(title="post %s" % i, posts=[post1, post2])) Blog.objects.insert(blogs, load_bulk=False) - self.assertEqual(q, 1) # 1 for the insert + self.assertEqual(q, 1) # 1 for the insert + + Blog.drop_collection() + with query_counter() as q: + self.assertEqual(q, 0) + + Blog.ensure_indexes() + self.assertEqual(q, 1) Blog.objects.insert(blogs) - self.assertEqual(q, 3) # 1 for insert, and 1 for in bulk fetch (3 in total) + self.assertEqual(q, 3) # 1 for insert, and 1 for in bulk fetch (3 in total) Blog.drop_collection() @@ -619,7 +626,7 @@ class QuerySetTest(unittest.TestCase): self.assertRaises(OperationError, throw_operation_error) # Test can insert new doc - new_post = Blog(title="code", id=ObjectId()) + new_post = Blog(title="code123", id=ObjectId()) Blog.objects.insert(new_post) # test handles other classes being inserted @@ -655,13 +662,13 @@ class QuerySetTest(unittest.TestCase): Blog.objects.insert([blog1, blog2]) def throw_operation_error_not_unique(): - Blog.objects.insert([blog2, blog3], safe=True) + Blog.objects.insert([blog2, blog3]) self.assertRaises(NotUniqueError, throw_operation_error_not_unique) self.assertEqual(Blog.objects.count(), 2) - Blog.objects.insert([blog2, blog3], write_options={ - 'continue_on_error': True}) + Blog.objects.insert([blog2, blog3], write_concern={"w": 0, + 'continue_on_error': True}) self.assertEqual(Blog.objects.count(), 3) def test_get_changed_fields_query_count(self): diff --git a/tests/test_connection.py b/tests/test_connection.py index 5b9743d6..4b8a3d11 100644 --- a/tests/test_connection.py +++ b/tests/test_connection.py @@ -10,7 +10,6 @@ from bson.tz_util import utc from mongoengine import * import mongoengine.connection from mongoengine.connection import get_db, get_connection, ConnectionError -from mongoengine.context_managers import switch_db class ConnectionTest(unittest.TestCase): @@ -26,7 +25,7 @@ class ConnectionTest(unittest.TestCase): connect('mongoenginetest') conn = get_connection() - self.assertTrue(isinstance(conn, pymongo.connection.Connection)) + self.assertTrue(isinstance(conn, pymongo.mongo_client.MongoClient)) db = get_db() self.assertTrue(isinstance(db, pymongo.database.Database)) @@ -34,7 +33,7 @@ class ConnectionTest(unittest.TestCase): connect('mongoenginetest2', alias='testdb') conn = get_connection('testdb') - self.assertTrue(isinstance(conn, pymongo.connection.Connection)) + self.assertTrue(isinstance(conn, pymongo.mongo_client.MongoClient)) def test_connect_uri(self): """Ensure that the connect() method works properly with uri's @@ -52,7 +51,7 @@ class ConnectionTest(unittest.TestCase): connect("testdb_uri", host='mongodb://username:password@localhost/mongoenginetest') conn = get_connection() - self.assertTrue(isinstance(conn, pymongo.connection.Connection)) + self.assertTrue(isinstance(conn, pymongo.mongo_client.MongoClient)) db = get_db() self.assertTrue(isinstance(db, pymongo.database.Database)) @@ -65,7 +64,7 @@ class ConnectionTest(unittest.TestCase): self.assertRaises(ConnectionError, get_connection) conn = get_connection('testdb') - self.assertTrue(isinstance(conn, pymongo.connection.Connection)) + self.assertTrue(isinstance(conn, pymongo.mongo_client.MongoClient)) db = get_db('testdb') self.assertTrue(isinstance(db, pymongo.database.Database))