This is an automated email from the ASF dual-hosted git repository. dill0wn pushed a commit to branch dw/8565 in repository https://gitbox.apache.org/repos/asf/allura.git
commit 4e52dafd9e851089c1004b41d96f0eb546e0bb23 Author: Dillon Walls <dillon.wa...@slashdotmedia.com> AuthorDate: Tue Jun 18 18:21:38 2024 -0400 [#8565] updated to fully support pymongo 4 and Ming 0.14.0 --- Allura/allura/command/show_models.py | 8 ++-- Allura/allura/lib/custom_middleware.py | 18 ++++---- Allura/allura/lib/helpers.py | 2 +- Allura/allura/model/auth.py | 6 +-- Allura/allura/model/monq_model.py | 6 +-- Allura/allura/model/notification.py | 13 +++--- Allura/allura/model/repository.py | 6 +-- Allura/allura/tests/model/test_filesystem.py | 66 ++++++++++++++-------------- Allura/allura/tests/test_commands.py | 20 ++++----- Allura/allura/websetup/bootstrap.py | 10 ++--- ForgeImporters/forgeimporters/base.py | 12 ++--- ForgeSVN/forgesvn/model/svn.py | 4 +- ForgeTracker/forgetracker/model/ticket.py | 6 +-- pytest.ini | 7 +-- run_tests | 29 +++++++++--- 15 files changed, 112 insertions(+), 101 deletions(-) diff --git a/Allura/allura/command/show_models.py b/Allura/allura/command/show_models.py index b0b5fad19..7bffebffa 100644 --- a/Allura/allura/command/show_models.py +++ b/Allura/allura/command/show_models.py @@ -297,14 +297,14 @@ class EnsureIndexCommand(base.Command): # _id is always non-sparse and unique anyway del index_options['sparse'] del index_options['unique'] - collection.ensure_index(idx.index_spec, **index_options) + collection.create_index(idx.index_spec, **index_options) break except DuplicateKeyError as err: base.log.info('Found dupe key(%s), eliminating dupes', err) self._remove_dupes(collection, idx.index_spec) for keys, idx in indexes.items(): base.log.info('...... ensure %s:%s', collection.name, idx) - collection.ensure_index(idx.index_spec, background=True, **idx.index_options) + collection.create_index(idx.index_spec, background=True, **idx.index_options) # Drop obsolete indexes for iname, keys in prev_indexes.items(): if keys not in indexes: @@ -329,12 +329,12 @@ class EnsureIndexCommand(base.Command): superset_keys = keys + [('temporary_extra_field_for_indexing', 1)] base.log.info('...... ensure index %s:%s', collection.name, superset_keys) - superset_index = collection.ensure_index(superset_keys) + superset_index = collection.create_index(superset_keys) base.log.info('...... drop index %s:%s', collection.name, iname) collection.drop_index(iname) base.log.info('...... ensure index %s:%s %s', collection.name, keys, creation_options) - collection.ensure_index(keys, **creation_options) + collection.create_index(keys, **creation_options) base.log.info('...... drop index %s:%s', collection.name, superset_index) collection.drop_index(superset_index) diff --git a/Allura/allura/lib/custom_middleware.py b/Allura/allura/lib/custom_middleware.py index 4b2e5e1f5..f73036d16 100644 --- a/Allura/allura/lib/custom_middleware.py +++ b/Allura/allura/lib/custom_middleware.py @@ -326,20 +326,22 @@ class AlluraTimerMiddleware(TimerMiddleware): debug_each_call=False), Timer('ming', ming.odm.odmsession.ODMSession, 'insert_now', 'update_now', 'delete_now', - 'find', 'find_and_modify', 'remove', 'update', 'update_if_not_modified', - 'aggregate', 'group', 'map_reduce', 'inline_map_reduce', 'distinct', + 'find', 'find_one_and_update', 'find_one_and_replace', 'find_one_and_delete', + 'remove', 'update', 'update_if_not_modified', + 'aggregate', 'distinct', ), # Timer('ming', ming.schema.Document, 'validate', # debug_each_call=False), # Timer('ming', ming.schema.FancySchemaItem, '_validate_required', # '_validate_fast_missing', '_validate_optional', # debug_each_call=False), - Timer('mongo', pymongo.collection.Collection, 'count', 'find', - 'find_one', 'aggregate', 'group', 'map_reduce', - 'inline_map_reduce', 'find_and_modify', - 'insert', 'save', 'update', 'remove', 'drop'), - Timer('mongo', pymongo.cursor.Cursor, 'count', 'distinct', - '_refresh'), + Timer('mongo', pymongo.collection.Collection, 'find', 'find_one', 'drop', + 'update_one', 'update_many', 'insert_one', 'insert_many', 'replace_one', + 'delete_one', 'delete_many', 'count_documents', 'estimated_document_count', + 'find_one_and_update', 'find_one_and_replace', 'find_one_and_delete'), + Timer('mongo', pymongo.cursor.Cursor, 'distinct', + 'clone', 'rewind', 'close', 'limit', 'batch_size', 'skip', 'max', 'min', + 'sort', 'explain', 'where', 'collation', 'comment', 'hint', '_refresh'), # urlopen and socket io may or may not overlap partially Timer('repo.Blob.{method_name}', allura.model.repository.Blob, '*'), Timer('repo.Commit.{method_name}', allura.model.repository.Commit, '*'), diff --git a/Allura/allura/lib/helpers.py b/Allura/allura/lib/helpers.py index c6fc9fbae..4ac03e2ae 100644 --- a/Allura/allura/lib/helpers.py +++ b/Allura/allura/lib/helpers.py @@ -51,8 +51,8 @@ import pkg_resources from formencode.validators import FancyValidator from dateutil.parser import parse from bson import ObjectId +from bson.errors import InvalidId from paste.deploy import appconfig -from pymongo.errors import InvalidId from contextlib import contextmanager from tg import tmpl_context as c, app_globals as g from tg import response, request diff --git a/Allura/allura/model/auth.py b/Allura/allura/model/auth.py index 0b73f1b34..17818c829 100644 --- a/Allura/allura/model/auth.py +++ b/Allura/allura/model/auth.py @@ -217,9 +217,9 @@ class AuthGlobals(MappedClass): @classmethod def get_next_uid(cls): cls.upsert() - g = cls.query.find_and_modify( - query={}, update={'$inc': {'next_uid': 1}}, - new=True) + g = cls.query.find_one_and_update( + {}, update={'$inc': {'next_uid': 1}}, + return_document=True) return g.next_uid diff --git a/Allura/allura/model/monq_model.py b/Allura/allura/model/monq_model.py index 61ea3d863..4dbc532dc 100644 --- a/Allura/allura/model/monq_model.py +++ b/Allura/allura/model/monq_model.py @@ -199,14 +199,14 @@ class MonQTask(MappedClass): query['time_queue'] = {'$lte': datetime.utcnow()} if only: query['task_name'] = {'$in': only} - obj = cls.query.find_and_modify( - query=query, + obj = cls.query.find_one_and_update( + query, update={ '$set': dict( state='busy', process=process) }, - new=True, + return_document=True, sort=cls.sort) if obj is not None: return obj diff --git a/Allura/allura/model/notification.py b/Allura/allura/model/notification.py index 2bd205d35..5b08cf1e8 100644 --- a/Allura/allura/model/notification.py +++ b/Allura/allura/model/notification.py @@ -592,13 +592,12 @@ class Mailbox(MappedClass): next_scheduled={'$lt': now}) def find_and_modify_direct_mbox(): - return cls.query.find_and_modify( - query=q_direct, + return cls.query.find_one_and_update( + q_direct, update={'$set': dict( queue=[], queue_empty=True, - )}, - new=False) + )}) for mbox in take_while_true(find_and_modify_direct_mbox): try: @@ -618,14 +617,14 @@ class Mailbox(MappedClass): next_scheduled += timedelta(days=7 * mbox.frequency.n) elif mbox.frequency.unit == 'month': next_scheduled += timedelta(days=30 * mbox.frequency.n) - mbox = cls.query.find_and_modify( - query=dict(_id=mbox._id), + mbox = cls.query.find_one_and_update( + dict(_id=mbox._id), update={'$set': dict( next_scheduled=next_scheduled, queue=[], queue_empty=True, )}, - new=False) + ) mbox.fire(now) def fire(self, now): diff --git a/Allura/allura/model/repository.py b/Allura/allura/model/repository.py index c947ec1b3..e51cdcd60 100644 --- a/Allura/allura/model/repository.py +++ b/Allura/allura/model/repository.py @@ -1072,10 +1072,10 @@ class CommitStatus(MappedClass): @classmethod def upsert(cls, **kw): - obj = cls.query.find_and_modify( - query=dict(commit_id=kw.get('commit_id'), context=kw.get('context')), + obj = cls.query.find_one_and_update( + dict(commit_id=kw.get('commit_id'), context=kw.get('context')), update={'$set': kw}, - new=True, + return_document=True, upsert=True, ) return obj diff --git a/Allura/allura/tests/model/test_filesystem.py b/Allura/allura/tests/model/test_filesystem.py index 7c2c9520a..3b2e54db1 100644 --- a/Allura/allura/tests/model/test_filesystem.py +++ b/Allura/allura/tests/model/test_filesystem.py @@ -49,16 +49,16 @@ class TestFile(TestCase): self.conn = M.session.main_doc_session.db._connection self.db = M.session.main_doc_session.db - self.db.fs.remove() - self.db.fs.files.remove() - self.db.fs.chunks.remove() + self.db.fs.drop() + self.db.fs.files.drop() + self.db.fs.chunks.drop() def test_from_stream(self): f = File.from_stream('test1.txt', BytesIO(b'test1')) self.session.flush() - assert self.db.fs.count() == 1 - assert self.db.fs.files.count() == 1 - assert self.db.fs.chunks.count() == 1 + assert self.db.fs.count_documents({}) == 1 + assert self.db.fs.files.count_documents({}) == 1 + assert self.db.fs.chunks.count_documents({}) == 1 assert f.filename == 'test1.txt' assert f.content_type == 'text/plain' self._assert_content(f, b'test1') @@ -66,9 +66,9 @@ class TestFile(TestCase): def test_from_data(self): f = File.from_data('test2.txt', b'test2') self.session.flush(f) - assert self.db.fs.count() == 1 - assert self.db.fs.files.count() == 1 - assert self.db.fs.chunks.count() == 1 + assert self.db.fs.count_documents({}) == 1 + assert self.db.fs.files.count_documents({}) == 1 + assert self.db.fs.chunks.count_documents({}) == 1 assert f.filename == 'test2.txt' assert f.content_type == 'text/plain' self._assert_content(f, b'test2') @@ -77,50 +77,50 @@ class TestFile(TestCase): path = __file__.rstrip('c') f = File.from_path(path) self.session.flush() - assert self.db.fs.count() == 1 - assert self.db.fs.files.count() == 1 - assert self.db.fs.chunks.count() >= 1 + assert self.db.fs.count_documents({}) == 1 + assert self.db.fs.files.count_documents({}) == 1 + assert self.db.fs.chunks.count_documents({}) >= 1 assert f.filename == os.path.basename(path) text = f.rfile().read() def test_delete(self): f = File.from_data('test1.txt', b'test1') self.session.flush() - assert self.db.fs.count() == 1 - assert self.db.fs.files.count() == 1 - assert self.db.fs.chunks.count() == 1 + assert self.db.fs.count_documents({}) == 1 + assert self.db.fs.files.count_documents({}) == 1 + assert self.db.fs.chunks.count_documents({}) == 1 f.delete() self.session.flush() - assert self.db.fs.count() == 0 - assert self.db.fs.files.count() == 0 - assert self.db.fs.chunks.count() == 0 + assert self.db.fs.count_documents({}) == 0 + assert self.db.fs.files.count_documents({}) == 0 + assert self.db.fs.chunks.count_documents({}) == 0 def test_remove(self): File.from_data('test1.txt', b'test1') File.from_data('test2.txt', b'test2') self.session.flush() - assert self.db.fs.count() == 2 - assert self.db.fs.files.count() == 2 - assert self.db.fs.chunks.count() == 2 + assert self.db.fs.count_documents({}) == 2 + assert self.db.fs.files.count_documents({}) == 2 + assert self.db.fs.chunks.count_documents({}) == 2 File.remove(dict(filename='test1.txt')) self.session.flush() - assert self.db.fs.count() == 1 - assert self.db.fs.files.count() == 1 - assert self.db.fs.chunks.count() == 1 + assert self.db.fs.count_documents({}) == 1 + assert self.db.fs.files.count_documents({}) == 1 + assert self.db.fs.chunks.count_documents({}) == 1 def test_overwrite(self): f = File.from_data('test1.txt', b'test1') self.session.flush() - assert self.db.fs.count() == 1 - assert self.db.fs.files.count() == 1 - assert self.db.fs.chunks.count() == 1 + assert self.db.fs.count_documents({}) == 1 + assert self.db.fs.files.count_documents({}) == 1 + assert self.db.fs.chunks.count_documents({}) == 1 self._assert_content(f, b'test1') with f.wfile() as fp: fp.write(b'test2') self.session.flush() - assert self.db.fs.count() == 1 - assert self.db.fs.files.count() == 2 - assert self.db.fs.chunks.count() == 2 + assert self.db.fs.count_documents({}) == 1 + assert self.db.fs.files.count_documents({}) == 2 + assert self.db.fs.chunks.count_documents({}) == 2 self._assert_content(f, b'test2') def test_serve_embed(self): @@ -167,9 +167,9 @@ class TestFile(TestCase): assert t.content_type == 'image/png' assert t.is_image() assert f.filename == t.filename - assert self.db.fs.count() == 2 - assert self.db.fs.files.count() == 2 - assert self.db.fs.chunks.count() == 2 + assert self.db.fs.count_documents({}) == 2 + assert self.db.fs.files.count_documents({}) == 2 + assert self.db.fs.chunks.count_documents({}) == 2 def test_not_image(self): f, t = File.save_image( diff --git a/Allura/allura/tests/test_commands.py b/Allura/allura/tests/test_commands.py index 660b0f172..068fdaf89 100644 --- a/Allura/allura/tests/test_commands.py +++ b/Allura/allura/tests/test_commands.py @@ -199,7 +199,7 @@ class TestEnsureIndexCommand: cmd = show_models.EnsureIndexCommand('ensure_index') cmd.options = Object(clean=False) cmd._update_indexes(collection, indexes) - assert collection.ensure_index.called + assert collection.create_index.called assert not collection.drop_index.called def test_update_indexes_order(self): @@ -220,13 +220,13 @@ class TestEnsureIndexCommand: for i, call_ in enumerate(collection.mock_calls): method_name = call_[0] collection_call_order[method_name] = i - assert collection_call_order['ensure_index'] < collection_call_order['drop_index'], collection.mock_calls + assert collection_call_order['create_index'] < collection_call_order['drop_index'], collection.mock_calls def test_update_indexes_unique_changes(self): collection = Mock(name='collection') # expecting these ensure_index calls, we'll make their return values normal # for easier assertions later - collection.ensure_index.side_effect = [ + collection.create_index.side_effect = [ '_foo_bar_temporary_extra_field_for_indexing', '_foo_bar', '_foo_baz_temporary_extra_field_for_indexing', @@ -251,18 +251,16 @@ class TestEnsureIndexCommand: assert collection.mock_calls == [ call.index_information(), - call.ensure_index( - [('foo', 1), ('bar', 1), ('temporary_extra_field_for_indexing', 1)]), + call.create_index([('foo', 1), ('bar', 1), ('temporary_extra_field_for_indexing', 1)]), call.drop_index('_foo_bar'), - call.ensure_index([('foo', 1), ('bar', 1)], unique=False), + call.create_index([('foo', 1), ('bar', 1)], unique=False), call.drop_index('_foo_bar_temporary_extra_field_for_indexing'), - call.ensure_index( - [('foo', 1), ('baz', 1), ('temporary_extra_field_for_indexing', 1)]), + call.create_index([('foo', 1), ('baz', 1), ('temporary_extra_field_for_indexing', 1)]), call.drop_index('_foo_baz'), - call.ensure_index([('foo', 1), ('baz', 1)], unique=True), + call.create_index([('foo', 1), ('baz', 1)], unique=True), call.drop_index('_foo_baz_temporary_extra_field_for_indexing'), - call.ensure_index([('foo', 1), ('baz', 1)], unique=True, sparse=False), - call.ensure_index([('foo', 1), ('bar', 1)], unique=False, sparse=False, background=True) + call.create_index([('foo', 1), ('baz', 1)], unique=True, sparse=False), + call.create_index([('foo', 1), ('bar', 1)], unique=False, sparse=False, background=True) ] diff --git a/Allura/allura/websetup/bootstrap.py b/Allura/allura/websetup/bootstrap.py index 21cfa6781..78a602d54 100644 --- a/Allura/allura/websetup/bootstrap.py +++ b/Allura/allura/websetup/bootstrap.py @@ -279,15 +279,15 @@ def wipe_database(): conn = M.main_doc_session.bind.conn if isinstance(conn, mim.Connection): clear_all_database_tables() - for db in conn.database_names(): + for db in conn.list_database_names(): db = conn[db] else: - for database in conn.database_names(): + for database in conn.list_database_names(): if database not in ('allura', 'pyforge', 'project-data'): continue log.info('Wiping database %s', database) db = conn[database] - for coll in list(db.collection_names()): + for coll in list(db.list_collection_names()): if coll.startswith('system.'): continue log.info('Dropping collection %s:%s', database, coll) @@ -299,9 +299,9 @@ def wipe_database(): def clear_all_database_tables(): conn = M.main_doc_session.bind.conn - for db in conn.database_names(): + for db in conn.list_database_names(): db = conn[db] - for coll in list(db.collection_names()): + for coll in list(db.list_collection_names()): if coll == 'system.indexes': continue db.drop_collection(coll) diff --git a/ForgeImporters/forgeimporters/base.py b/ForgeImporters/forgeimporters/base.py index 1087bdc40..db14405df 100644 --- a/ForgeImporters/forgeimporters/base.py +++ b/ForgeImporters/forgeimporters/base.py @@ -466,8 +466,8 @@ class ToolImporter(metaclass=ToolImporterMeta): """ limit = config.get('tool_import.rate_limit', 1) pending_key = 'tool_data.%s.pending' % self.classname - modified_project = M.Project.query.find_and_modify( - query={ + modified_project = M.Project.query.find_one_and_update( + { '_id': project._id, '$or': [ {pending_key: None}, @@ -475,7 +475,7 @@ class ToolImporter(metaclass=ToolImporterMeta): ], }, update={'$inc': {pending_key: 1}}, - new=True, + return_document=True, ) return modified_project is not None @@ -485,10 +485,10 @@ class ToolImporter(metaclass=ToolImporterMeta): to indicate that an import is complete. """ pending_key = 'tool_data.%s.pending' % self.classname - M.Project.query.find_and_modify( - query={'_id': project._id}, + M.Project.query.find_one_and_update( + {'_id': project._id}, update={'$inc': {pending_key: -1}}, - new=True, + return_document=True, ) def import_tool(self, project, user, project_name=None, diff --git a/ForgeSVN/forgesvn/model/svn.py b/ForgeSVN/forgesvn/model/svn.py index e85624d8a..6d210f128 100644 --- a/ForgeSVN/forgesvn/model/svn.py +++ b/ForgeSVN/forgesvn/model/svn.py @@ -450,8 +450,8 @@ class SVNImplementation(M.RepositoryImplementation): path = tree_path.strip('/') RM.LastCommitDoc.m.update_partial( {'commit_id': commit_id, 'path': path}, - {'commit_id': commit_id, 'path': - path, 'entries': lcd_entries}, + {'$set': {'commit_id': commit_id, 'path': + path, 'entries': lcd_entries}}, upsert=True) return tree_id diff --git a/ForgeTracker/forgetracker/model/ticket.py b/ForgeTracker/forgetracker/model/ticket.py index de099a372..1b80511d8 100644 --- a/ForgeTracker/forgetracker/model/ticket.py +++ b/ForgeTracker/forgetracker/model/ticket.py @@ -139,10 +139,10 @@ class Globals(MappedClass): }) def next_ticket_num(self): - gbl = Globals.query.find_and_modify( - query=dict(app_config_id=self.app_config_id), + gbl = Globals.query.find_one_and_update( + dict(app_config_id=self.app_config_id), update={'$inc': {'last_ticket_num': 1}}, - new=True) + return_document=True) session(gbl).expunge(gbl) return gbl.last_ticket_num diff --git a/pytest.ini b/pytest.ini index ffc5fc09b..e6e007360 100644 --- a/pytest.ini +++ b/pytest.ini @@ -22,12 +22,7 @@ filterwarnings = # https://github.com/html5lib/html5lib-python/issues/443 ignore::DeprecationWarning:html5lib.filters.sanitizer - # https://github.com/TurboGears/Ming/issues/48 - ignore::DeprecationWarning:ming - # not sure why timermiddleware is surfacing pymongo warnings, but it does: - ignore:insert is deprecated. Use insert_one or insert_many instead.:DeprecationWarning:timermiddleware - ignore:update is deprecated. Use replace_one, update_one or update_many instead.:DeprecationWarning:timermiddleware - ignore:remove is deprecated. Use delete_one or delete_many instead.:DeprecationWarning:timermiddleware + # don't let us regress on this: error:tmpl_context.form_values:DeprecationWarning:tg.wsgiapp diff --git a/run_tests b/run_tests index 28d2e0b3f..99aa681a9 100755 --- a/run_tests +++ b/run_tests @@ -18,7 +18,9 @@ # under the License. import argparse +from collections import defaultdict from glob import glob +import json import multiprocessing from multiprocessing.pool import ThreadPool import subprocess @@ -125,13 +127,18 @@ def run_tests_in_parallel(options, runner_args): def get_pkg_path(pkg): return ALT_PKG_PATHS.get(pkg, '') + + def get_concurrent_tests(pkg): + if pkg in NOT_MULTIPROC_SAFE: + return 1 + return options.concurrent_tests def get_multiproc_args(pkg): - if options.concurrent_tests == 1: - return '' - return '-n {procs_per_suite} --dist loadfile'.format( - procs_per_suite=options.concurrent_tests - ) if pkg not in NOT_MULTIPROC_SAFE else '' + if get_concurrent_tests(pkg) > 1: + return '-n {procs_per_suite} --dist loadfile'.format( + procs_per_suite=options.concurrent_tests + ) + return '' def get_concurrent_suites(): if '-n' in sys.argv: @@ -140,6 +147,7 @@ def run_tests_in_parallel(options, runner_args): cmds = [] env = dict(os.environ) + concurrent_tests_map = dict() for package in check_packages(options.packages): runner = 'pytest' if options.coverage: @@ -163,6 +171,8 @@ def run_tests_in_parallel(options, runner_args): args=' '.join(default_args + runner_args), multiproc_args=multiproc_args, ) + num_concurrent_tests = get_concurrent_tests(package) + concurrent_tests_map[package] = num_concurrent_tests if num_concurrent_tests > 1 else '.' if options.coverage: cmd += ' && coverage combine' # merge separate files present from multiprocessing config being on cmd += ' && coverage report --include=./* --omit="*/tests/*"' @@ -171,7 +181,14 @@ def run_tests_in_parallel(options, runner_args): # TODO: add a way to include this or not; and add xml output for Jenkins cmds.append(('npm run lint-es6', {})) - ret_codes = run_many(cmds, processes=get_concurrent_suites()) + num_concurrent_suites = get_concurrent_suites() + + print('Running tests in parallel...') + print('\t{:>20}: {}'.format('# CPUS', CPUS)) + print('\t{:>20}: {}'.format('# Suites in Parallel', num_concurrent_suites)) + print('\t{:>20}: {}'.format('# Test in Parallel Per Suite', json.dumps(concurrent_tests_map, indent=2))) + + ret_codes = run_many(cmds, processes=num_concurrent_suites) if options.coverage and not any(ret_codes) and len(options.packages) > 1: subprocess.call('rm .coverage', shell=True)