Script 'mail_helper' called by obssrc
Hello community,

here is the log from the commit of package 389-ds for openSUSE:Factory checked 
in at 2025-10-08 18:13:42
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/389-ds (Old)
 and      /work/SRC/openSUSE:Factory/.389-ds.new.11973 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Package is "389-ds"

Wed Oct  8 18:13:42 2025 rev:87 rq:1309715 version:3.1.3~git111.e953ee704

Changes:
--------
--- /work/SRC/openSUSE:Factory/389-ds/389-ds.changes    2025-10-05 
17:50:51.814623099 +0200
+++ /work/SRC/openSUSE:Factory/.389-ds.new.11973/389-ds.changes 2025-10-08 
18:14:44.010938011 +0200
@@ -1,0 +2,11 @@
+Wed Oct 08 01:25:10 UTC 2025 - [email protected]
+
+- Move librobdb.so from devel to main package
+- Update to version 3.1.3~git111.e953ee704:
+  * Issue 7023 - UI - if first instance that is loaded is stopped it breaks 
parts of the UI
+  * Issue 6753 - Removing ticket 47714 test and porting to DSLdapObject (#6946)
+  * Issue 7027 - 389-ds-base OpenScanHub Leaks Detected (#7028)
+  * Issue 6753 - Removing ticket 47676 test and porting to DSLdapObject (#6938)
+  * Issue 6966 - On large DB, unlimited IDL scan limit reduce the SRCH 
performance (#6967)
+
+-------------------------------------------------------------------

Old:
----
  389-ds-base-3.1.3~git106.bea5091e3.tar.zst

New:
----
  389-ds-base-3.1.3~git111.e953ee704.tar.zst

++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

Other differences:
------------------
++++++ 389-ds.spec ++++++
--- /var/tmp/diff_new_pack.W8bOqE/_old  2025-10-08 18:14:45.098983671 +0200
+++ /var/tmp/diff_new_pack.W8bOqE/_new  2025-10-08 18:14:45.102983839 +0200
@@ -31,7 +31,7 @@
 %define svrcorelib libsvrcore0
 
 Name:           389-ds
-Version:        3.1.3~git106.bea5091e3
+Version:        3.1.3~git111.e953ee704
 Release:        0
 Summary:        389 Directory Server
 License:        GPL-3.0-or-later AND MPL-2.0
@@ -400,6 +400,7 @@
 %{_libdir}/dirsrv/librewriters.so
 %{_libdir}/dirsrv/plugins/*.so
 %{_libdir}/dirsrv/python/*.py
+%{_libdir}/dirsrv/librobdb.so
 %{_libdir}/dirsrv/*.so.*
 %exclude %{_mandir}/man1/ldap-agent*
 %{_mandir}/man1/*
@@ -440,7 +441,6 @@
 %{_libdir}/dirsrv/libslapd.so
 %{_libdir}/dirsrv/libns-dshttpd.so
 %{_libdir}/dirsrv/libldaputil.so
-%{_libdir}/dirsrv/librobdb.so
 %{_libdir}/pkgconfig/dirsrv.pc
 %{_libdir}/pkgconfig/svrcore.pc
 

++++++ 389-ds-base-3.1.3~git106.bea5091e3.tar.zst -> 
389-ds-base-3.1.3~git111.e953ee704.tar.zst ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/389-ds-base-3.1.3~git106.bea5091e3/dirsrvtests/tests/suites/config/config_test.py
 
new/389-ds-base-3.1.3~git111.e953ee704/dirsrvtests/tests/suites/config/config_test.py
--- 
old/389-ds-base-3.1.3~git106.bea5091e3/dirsrvtests/tests/suites/config/config_test.py
       2025-10-02 22:05:50.000000000 +0200
+++ 
new/389-ds-base-3.1.3~git111.e953ee704/dirsrvtests/tests/suites/config/config_test.py
       2025-10-07 21:13:14.000000000 +0200
@@ -503,17 +503,19 @@
         topo.standalone.config.set('nsslapd-ndn-cache-max-size', 
'invalid_value')
 
 
-def test_require_index(topo):
+def test_require_index(topo, request):
     """Validate that unindexed searches are rejected
 
     :id: fb6e31f2-acc2-4e75-a195-5c356faeb803
     :setup: Standalone instance
     :steps:
         1. Set "nsslapd-require-index" to "on"
-        2. Test an unindexed search is rejected
+        2. ancestorid/idlscanlimit to 100
+        3. Test an unindexed search is rejected
     :expectedresults:
         1. Success
         2. Success
+        3. Success
     """
 
     # Set the config
@@ -524,6 +526,10 @@
 
     db_cfg = DatabaseConfig(topo.standalone)
     db_cfg.set([('nsslapd-idlistscanlimit', '100')])
+    backend = Backends(topo.standalone).get_backend(DEFAULT_SUFFIX)
+    ancestorid_index = backend.get_index('ancestorid')
+    ancestorid_index.replace("nsIndexIDListScanLimit", ensure_bytes("limit=100 
type=eq flags=AND"))
+    topo.standalone.restart()
 
     users = UserAccounts(topo.standalone, DEFAULT_SUFFIX)
     for i in range(101):
@@ -534,10 +540,15 @@
     with pytest.raises(ldap.UNWILLING_TO_PERFORM):
         raw_objects.filter("(description=test*)")
 
+    def fin():
+        ancestorid_index.replace("nsIndexIDListScanLimit", 
ensure_bytes("limit=5000 type=eq flags=AND"))
+
+    request.addfinalizer(fin)
+
 
 
 @pytest.mark.skipif(ds_is_older('1.4.2'), reason="The config setting only 
exists in 1.4.2 and higher")
-def test_require_internal_index(topo):
+def test_require_internal_index(topo, request):
     """Ensure internal operations require indexed attributes
 
     :id: 22b94f30-59e3-4f27-89a1-c4f4be036f7f
@@ -568,6 +579,10 @@
     # Create a bunch of users
     db_cfg = DatabaseConfig(topo.standalone)
     db_cfg.set([('nsslapd-idlistscanlimit', '100')])
+    backend = Backends(topo.standalone).get_backend(DEFAULT_SUFFIX)
+    ancestorid_index = backend.get_index('ancestorid')
+    ancestorid_index.replace("nsIndexIDListScanLimit", ensure_bytes("limit=100 
type=eq flags=AND"))
+    topo.standalone.restart()
     users = UserAccounts(topo.standalone, DEFAULT_SUFFIX)
     for i in range(102, 202):
         users.create_test_user(uid=i)
@@ -592,6 +607,12 @@
     with pytest.raises(ldap.UNWILLING_TO_PERFORM):
         user.delete()
 
+    def fin():
+        ancestorid_index.replace("nsIndexIDListScanLimit", 
ensure_bytes("limit=5000 type=eq flags=AND"))
+
+    request.addfinalizer(fin)
+
+
 
 def get_pstack(pid):
     """Get a pstack of the pid."""
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/389-ds-base-3.1.3~git106.bea5091e3/dirsrvtests/tests/suites/paged_results/paged_results_test.py
 
new/389-ds-base-3.1.3~git111.e953ee704/dirsrvtests/tests/suites/paged_results/paged_results_test.py
--- 
old/389-ds-base-3.1.3~git106.bea5091e3/dirsrvtests/tests/suites/paged_results/paged_results_test.py
 2025-10-02 22:05:50.000000000 +0200
+++ 
new/389-ds-base-3.1.3~git111.e953ee704/dirsrvtests/tests/suites/paged_results/paged_results_test.py
 2025-10-07 21:13:14.000000000 +0200
@@ -306,19 +306,19 @@
     del_users(users_list)
 
 
[email protected]("page_size,users_num,suffix,attr_name,attr_value,expected_err",
 [
[email protected]("page_size,users_num,suffix,attr_name,attr_value,expected_err,
 restart", [
     (50, 200, 'cn=config,%s' % DN_LDBM, 'nsslapd-idlistscanlimit', '100',
-     ldap.UNWILLING_TO_PERFORM),
+     ldap.UNWILLING_TO_PERFORM, True),
     (5, 15, DN_CONFIG, 'nsslapd-timelimit', '20',
-     ldap.UNAVAILABLE_CRITICAL_EXTENSION),
+     ldap.UNAVAILABLE_CRITICAL_EXTENSION, False),
     (21, 50, DN_CONFIG, 'nsslapd-sizelimit', '20',
-     ldap.SIZELIMIT_EXCEEDED),
+     ldap.SIZELIMIT_EXCEEDED, False),
     (21, 50, DN_CONFIG, 'nsslapd-pagedsizelimit', '5',
-     ldap.SIZELIMIT_EXCEEDED),
+     ldap.SIZELIMIT_EXCEEDED, False),
     (5, 50, 'cn=config,%s' % DN_LDBM, 'nsslapd-lookthroughlimit', '20',
-     ldap.ADMINLIMIT_EXCEEDED)])
+     ldap.ADMINLIMIT_EXCEEDED, False)])
 def test_search_limits_fail(topology_st, create_user, page_size, users_num,
-                            suffix, attr_name, attr_value, expected_err):
+                            suffix, attr_name, attr_value, expected_err, 
restart):
     """Verify that search with a simple paged results control
     throws expected exceptoins when corresponding limits are
     exceeded.
@@ -341,6 +341,15 @@
 
     users_list = add_users(topology_st, users_num, DEFAULT_SUFFIX)
     attr_value_bck = change_conf_attr(topology_st, suffix, attr_name, 
attr_value)
+    ancestorid_index = None
+    if attr_name == 'nsslapd-idlistscanlimit':
+        backend = Backends(topology_st.standalone).get_backend(DEFAULT_SUFFIX)
+        ancestorid_index = backend.get_index('ancestorid')
+        ancestorid_index.replace("nsIndexIDListScanLimit", 
ensure_bytes("limit=100 type=eq flags=AND"))
+
+    if (restart):
+        log.info('Instance restarted')
+        topology_st.standalone.restart()
     conf_param_dict = {attr_name: attr_value}
     search_flt = r'(uid=test*)'
     searchreq_attrlist = ['dn', 'sn']
@@ -393,6 +402,8 @@
             else:
                 break
     finally:
+        if ancestorid_index:
+            ancestorid_index.replace("nsIndexIDListScanLimit", 
ensure_bytes("limit=5000 type=eq flags=AND"))
         del_users(users_list)
         change_conf_attr(topology_st, suffix, attr_name, attr_value_bck)
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/389-ds-base-3.1.3~git106.bea5091e3/dirsrvtests/tests/suites/plugins/account_policy_login_attr_test.py
 
new/389-ds-base-3.1.3~git111.e953ee704/dirsrvtests/tests/suites/plugins/account_policy_login_attr_test.py
--- 
old/389-ds-base-3.1.3~git106.bea5091e3/dirsrvtests/tests/suites/plugins/account_policy_login_attr_test.py
   1970-01-01 01:00:00.000000000 +0100
+++ 
new/389-ds-base-3.1.3~git111.e953ee704/dirsrvtests/tests/suites/plugins/account_policy_login_attr_test.py
   2025-10-07 21:13:14.000000000 +0200
@@ -0,0 +1,197 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2025 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+import logging
+import os
+import time
+import pytest
+import ldap
+from lib389.topologies import topology_st
+from lib389.idm.user import UserAccounts
+from lib389.idm.directorymanager import DirectoryManager
+from lib389.plugins import AccountPolicyPlugin, AccountPolicyConfig
+from lib389._constants import DEFAULT_SUFFIX, DN_DM, PASSWORD, 
PLUGIN_ACCT_POLICY
+
+pytestmark = pytest.mark.tier2
+logging.getLogger(__name__).setLevel(logging.DEBUG)
+log = logging.getLogger(__name__)
+
+ACCP_CONF = f"cn=config,cn={PLUGIN_ACCT_POLICY},cn=plugins,cn=config"
+ACCT_POLICY_DN = f'cn=Account Inactivation Policy,{DEFAULT_SUFFIX}'
+INACTIVITY_LIMIT = '3000'
+
[email protected](scope="function")
+def account_policy_setup(topology_st, request):
+    """Setup account policy plugin and test user"""
+    inst = topology_st.standalone
+
+    users = UserAccounts(inst, DEFAULT_SUFFIX)
+    policy_user = users.create(properties={
+        'uid': 'test_policy_user',
+        'cn': 'test_policy_user',
+        'sn': 'test_policy_user',
+        'givenname': 'test_policy_user',
+        'userPassword': PASSWORD,
+        'uidNumber': '5000',
+        'gidNumber': '5000',
+        'homeDirectory': '/home/test_policy_user',
+        'acctPolicySubentry': ACCT_POLICY_DN
+    })
+
+    policy_entry = AccountPolicyConfig(inst, dn=ACCT_POLICY_DN)
+    policy_entry.create(properties={
+        'cn': 'Account Inactivation Policy',
+        'accountInactivityLimit': INACTIVITY_LIMIT
+    })
+
+    plugin = AccountPolicyPlugin(inst)
+    plugin.enable()
+
+    def fin():
+        policy_user.delete()
+        policy_entry.delete()
+        plugin.disable()
+
+    request.addfinalizer(fin)
+    return inst, policy_user
+
+
+def test_account_policy_without_always_record_login_attr(account_policy_setup):
+    """Test account policy functionality without alwaysRecordLoginAttr
+
+    :id: e8f4a3b2-1c9d-4e7f-8a6b-5d2c9e1f0a4b
+    :setup: Standalone Instance with Account Policy plugin and test user
+    :steps:
+        1. Configure account policy without alwaysRecordLoginAttr
+        2. Bind as test user
+        3. Check lastLoginTime is updated
+        4. Bind as test user again
+        5. Verify lastLoginTime is updated to a newer value
+        6. Set very low inactivity limit
+        7. Try to bind again should fail with constraint violation
+    :expectedresults:
+        1. Configuration should succeed
+        2. Bind should succeed
+        3. lastLoginTime should be present
+        4. Second bind should succeed
+        5. lastLoginTime should be newer
+        6. Inactivity limit should be set
+        7. Bind should fail due to account inactivity
+    """
+    inst, policy_user = account_policy_setup
+
+    log.info("Configure account policy without alwaysRecordLoginAttr")
+    accp = AccountPolicyConfig(inst, dn=ACCP_CONF)
+    accp.replace_many(
+        ('alwaysrecordlogin', 'yes'),
+        ('stateattrname', 'lastLoginTime'),
+        ('altstateattrname', 'createTimestamp'),
+        ('specattrname', 'acctPolicySubentry'),
+        ('limitattrname', 'accountInactivityLimit')
+    )
+
+    inst.restart()
+
+    log.info("Bind as test user")
+    policy_user.bind(PASSWORD)
+    time.sleep(1)
+
+    log.info("Check lastLoginTime was added")
+    dm = DirectoryManager(inst)
+    dm.bind(PASSWORD)
+    first_login_time = policy_user.get_attr_val_utf8('lastLoginTime')
+    assert first_login_time
+
+    log.info("Bind as test user again")
+    policy_user.bind(PASSWORD)
+    time.sleep(1)
+
+    log.info("Verify lastLoginTime was updated")
+    dm = DirectoryManager(inst)
+    dm.bind(PASSWORD)
+    second_login_time = policy_user.get_attr_val_utf8('lastLoginTime')
+    assert first_login_time < second_login_time
+
+    log.info("Change inactivity limit to trigger account lockout")
+    policy_entry = AccountPolicyConfig(inst, dn=ACCT_POLICY_DN)
+    policy_entry.replace('accountInactivityLimit', '1')
+    time.sleep(1)
+
+    log.info("Bind should fail due to account inactivity")
+    with pytest.raises(ldap.CONSTRAINT_VIOLATION):
+        policy_user.bind(PASSWORD)
+
+    policy_entry.replace('accountInactivityLimit', INACTIVITY_LIMIT)
+
+
+def test_account_policy_with_always_record_login_attr(account_policy_setup):
+    """Test account policy with alwaysRecordLoginAttr functionality
+
+    :id: b7c2f9a1-8d6e-4c3b-9f5a-2e8d1c7a0b4f
+    :setup: Standalone Instance with Account Policy plugin and test user
+    :steps:
+        1. Configure account policy with alwaysRecordLoginAttr set to 
lastLoginTime
+        2. Set stateattrname to bogus value and altstateattrname to 
modifyTimestamp
+        3. Remove any existing lastLoginTime from user
+        4. Bind as test user
+        5. Check lastLoginTime was added via alwaysRecordLoginAttr
+        6. Bind as test user again
+        7. Verify lastLoginTime was updated to newer value
+    :expectedresults:
+        1. Configuration should succeed
+        2. Configuration should be updated
+        3. lastLoginTime should be removed
+        4. Bind should succeed
+        5. lastLoginTime should be present due to alwaysRecordLoginAttr
+        6. Second bind should succeed
+        7. lastLoginTime should be updated to newer value
+    """
+    inst, policy_user = account_policy_setup
+
+    log.info("Remove any existing lastLoginTime from user")
+    try:
+        policy_user.remove('lastLoginTime', None)
+    except ldap.NO_SUCH_ATTRIBUTE:
+        pass
+
+    log.info("Configure account policy with alwaysRecordLoginAttr")
+    accp = AccountPolicyConfig(inst, dn=ACCP_CONF)
+    accp.replace_many(
+        ('alwaysrecordlogin', 'yes'),
+        ('stateattrname', 'bogus'),
+        ('altstateattrname', 'modifyTimestamp'),
+        ('alwaysRecordLoginAttr', 'lastLoginTime'),
+        ('specattrname', 'acctPolicySubentry'),
+        ('limitattrname', 'accountInactivityLimit')
+    )
+
+    inst.restart()
+
+    log.info("Bind as test user")
+    policy_user.bind(PASSWORD)
+    time.sleep(1)
+
+    log.info("Check lastLoginTime was added via alwaysRecordLoginAttr")
+    dm = DirectoryManager(inst)
+    dm.bind(PASSWORD)
+    first_login_time = policy_user.get_attr_val_utf8('lastLoginTime')
+    assert first_login_time
+
+    log.info("Bind as test user again")
+    policy_user.bind(PASSWORD)
+    time.sleep(1)
+
+    log.info("Verify lastLoginTime was updated")
+    dm = DirectoryManager(inst)
+    dm.bind(PASSWORD)
+    second_login_time = policy_user.get_attr_val_utf8('lastLoginTime')
+    assert first_login_time < second_login_time
+
+
+if __name__ == '__main__':
+    CURRENT_FILE = os.path.realpath(__file__)
+    pytest.main(["-s", CURRENT_FILE])
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/389-ds-base-3.1.3~git106.bea5091e3/dirsrvtests/tests/suites/schema/schema_update_policy_test.py
 
new/389-ds-base-3.1.3~git111.e953ee704/dirsrvtests/tests/suites/schema/schema_update_policy_test.py
--- 
old/389-ds-base-3.1.3~git106.bea5091e3/dirsrvtests/tests/suites/schema/schema_update_policy_test.py
 1970-01-01 01:00:00.000000000 +0100
+++ 
new/389-ds-base-3.1.3~git111.e953ee704/dirsrvtests/tests/suites/schema/schema_update_policy_test.py
 2025-10-07 21:13:14.000000000 +0200
@@ -0,0 +1,259 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2025 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+#
+import logging
+import os
+import ldap
+import time
+import pytest
+from lib389._constants import DEFAULT_SUFFIX
+from lib389.topologies import topology_m2
+from lib389.idm.user import UserAccounts
+from lib389.replica import ReplicationManager
+from lib389._mapped_object import DSLdapObject
+from lib389.schema import Schema, OBJECT_MODEL_PARAMS
+from ldap.schema.models import ObjectClass
+
+pytestmark = pytest.mark.tier2
+logging.getLogger(__name__).setLevel(logging.DEBUG)
+log = logging.getLogger(__name__)
+
+
+def wait_for_attr_value(instance, dn, attr, value=None, timeout=30):
+    """Wait for an attribute to have a specific value or be absent."""
+
+    for _ in range(timeout):
+        try:
+            entry_obj = DSLdapObject(instance, dn)
+            if value is None:
+                if not entry_obj.present(attr):
+                    return True
+            else:
+                if entry_obj.present(attr, value):
+                    return True
+        except (ldap.NO_SUCH_OBJECT, ldap.SERVER_DOWN, ldap.CONNECT_ERROR):
+            pass  # Expected during server restart
+        except Exception as e:
+            log.debug(f"Error checking {dn}: {e}")
+
+        time.sleep(1)
+
+    if value is None:
+        raise AssertionError(f"Timeout: attribute '{attr}' still present in 
{dn} after {timeout}s")
+    else:
+        raise AssertionError(f"Timeout: attribute '{attr}' does not have value 
'{value}' in {dn} after {timeout}s")
+
+def _add_oc(instance, oid, name):
+    """Add an objectClass"""
+    schema = Schema(instance)
+    params = OBJECT_MODEL_PARAMS[ObjectClass].copy()
+    params.update({
+        'names': (name,),
+        'oid': oid,
+        'desc': 'To test schema update policies',
+        'must': ('postalAddress', 'postalCode'),
+        'may': ('member', 'street'),
+        'sup': ('person',),
+    })
+    schema.add_objectclass(params)
+
+
[email protected]
+def temporary_oc2(topology_m2):
+    """Fixture to create and automatically clean up OC2UpdatePolicy."""
+    supplier1 = topology_m2.ms["supplier1"]
+    supplier2 = topology_m2.ms["supplier2"]
+    name = "OC2UpdatePolicy"
+
+    log.info(f"Adding temporary objectclass: {name}")
+    _add_oc(supplier1, "1.2.3.4.5.6.7.8.9.10.3", name)
+
+    yield name
+
+    log.info(f"Cleaning up temporary objectclass: {name}")
+    for s in [supplier1, supplier2]:
+        try:
+            schema = Schema(s)
+            schema.remove_objectclass(name)
+        except (ldap.NO_SUCH_OBJECT, ValueError):
+            pass
+
+
[email protected](scope="function")
+def setup_test_env(request, topology_m2):
+    """Initialize the test environment"""
+    supplier1 = topology_m2.ms["supplier1"]
+    supplier2 = topology_m2.ms["supplier2"]
+
+    log.info("Add OCUpdatePolicy that allows 'member' attribute")
+    _add_oc(supplier1, "1.2.3.4.5.6.7.8.9.10.2", "OCUpdatePolicy")
+
+    repl = ReplicationManager(DEFAULT_SUFFIX)
+    repl.wait_for_replication(supplier1, supplier2)
+    repl.wait_for_replication(supplier2, supplier1)
+
+    users = UserAccounts(supplier1, DEFAULT_SUFFIX)
+    users.create_test_user(uid=1000)
+
+    config = supplier1.config
+    config.replace_many(
+        ('nsslapd-errorlog-level', str(128 + 8192))
+    )
+
+    config2 = supplier2.config
+    config2.replace_many(
+        ('nsslapd-errorlog-level', str(128 + 8192))
+    )
+
+    for i in range(10):
+        users.create_test_user(uid=2000 + i)
+
+    log.info("Create main test entry")
+    members = [f"uid=test_user_{2000 + i},ou=People,{DEFAULT_SUFFIX}" for i in 
range(10)]
+    members.append(f"uid=test_user_1000,ou=People,{DEFAULT_SUFFIX}")
+    users.create(properties={
+        'uid': 'test_entry',
+        'cn': 'test_entry',
+        'sn': 'test_entry',
+        'objectClass': ['top', 'person', 'organizationalPerson',
+                        'inetOrgPerson', 'posixAccount', 'account', 
'OCUpdatePolicy'],
+        'postalAddress': 'here',
+        'postalCode': '1234',
+        'member': members,
+        'uidNumber': '3000',
+        'gidNumber': '3000',
+        'homeDirectory': '/home/test_entry'
+    })
+
+    def fin():
+        users = UserAccounts(supplier1, DEFAULT_SUFFIX)
+        for user in users.list():
+            user.delete()
+
+        config.replace('nsslapd-errorlog-level', '0')
+        config2.replace('nsslapd-errorlog-level', '0')
+
+        schema = Schema(supplier1)
+        try:
+            schema.remove_objectclass("OCUpdatePolicy")
+        except (ldap.NO_SUCH_OBJECT, ValueError):
+            pass
+
+    request.addfinalizer(fin)
+
+
+def test_schema_update_policy_allow(topology_m2, setup_test_env):
+    """Test that schema updates are allowed and replicated when no reject 
policy is set
+
+    :id: c8e3d2e4-5b7a-4a9d-9f2e-8a5b3c4d6e7f
+    :setup: Two supplier replication setup with test entries
+    :steps:
+        1. Add an entry with custom objectclass on supplier1
+        2. Check entry is replicated to supplier2
+        3. Update entry on supplier2
+        4. Check update is replicated back to supplier1
+    :expectedresults:
+        1. Entry should be added successfully
+        2. Entry should be replicated with schema
+        3. Update should succeed
+        4. Update should be replicated
+    """
+    supplier1 = topology_m2.ms["supplier1"]
+    supplier2 = topology_m2.ms["supplier2"]
+    repl = ReplicationManager(DEFAULT_SUFFIX)
+
+    log.info("Check entry was replicated to supplier2")
+    repl.wait_for_replication(supplier1, supplier2)
+    users = UserAccounts(supplier1, DEFAULT_SUFFIX)
+    users2 = UserAccounts(supplier2, DEFAULT_SUFFIX)
+    entry = users2.get('test_entry')
+    assert entry
+
+    log.info("Update entry on supplier2")
+    entry.replace('description', 'test_add')
+    repl.wait_for_replication(supplier2, supplier1)
+    entry = users.get('test_entry')
+    assert entry.get_attr_val_utf8('description') == 'test_add'
+
+
+def test_schema_update_policy_reject(topology_m2, setup_test_env, 
temporary_oc2):
+    """Test that schema updates can be rejected based on policy
+
+    :id: d9f4e3b5-6c8b-5b0e-0f3a-9b6c5d8e9g0
+    :setup: Two supplier replication setup with test entries
+    :steps:
+        1. Configure supplier1 to reject schema updates containing OC_NAME
+        2. Add a new objectclass on supplier1
+        3. Update an entry to trigger schema push
+        4. Verify schema was not pushed to supplier2
+        5. Remove reject policy
+        6. Update entry again
+        7. Verify schema is now pushed to supplier2
+    :expectedresults:
+        1. Policy should be configured
+        2. New objectclass should be added
+        3. Update should trigger replication
+        4. Schema should not be pushed due to policy
+        5. Policy should be removed
+        6. Update should trigger replication
+        7. Schema should now be pushed
+    """
+    supplier1 = topology_m2.ms["supplier1"]
+    supplier2 = topology_m2.ms["supplier2"]
+    repl = ReplicationManager(DEFAULT_SUFFIX)
+
+    log.info("Configure supplier to reject schema updates for OCUpdatePolicy")
+    policy_dn = f"cn=supplierUpdatePolicy,cn=replSchema,{supplier1.config.dn}"
+    policy_entry = DSLdapObject(supplier1, policy_dn)
+    policy_entry.add('schemaUpdateObjectclassReject', 'OCUpdatePolicy')
+    supplier1.restart()
+    wait_for_attr_value(supplier1, policy_dn, 'schemaUpdateObjectclassReject', 
'OCUpdatePolicy')
+
+    log.info("Verify OC2UpdatePolicy is in supplier1")
+    schema = Schema(supplier1)
+    schema_attrs = schema.get_objectclasses()
+    assert any('oc2updatepolicy' in (name.lower() for name in oc.names) for oc 
in schema_attrs)
+
+    log.info("Update entry on supplier1 to trigger schema push")
+    users = UserAccounts(supplier1, DEFAULT_SUFFIX)
+    test_user = users.get('test_entry')
+    test_user.replace('description', 'test_reject')
+
+    log.info("Check update was replicated")
+    repl.wait_for_replication(supplier1, supplier2)
+    users2 = UserAccounts(supplier2, DEFAULT_SUFFIX)
+    entry = users2.get('test_entry')
+    assert entry.get_attr_val_utf8('description') == 'test_reject'
+
+    log.info("Verify OC2UpdatePolicy was NOT pushed to supplier2")
+    schema_attrs = supplier2.schema.get_objectclasses()
+    assert not any('oc2updatepolicy' in (name.lower() for name in oc.names) 
for oc in schema_attrs)
+
+    log.info("Remove reject policy")
+    policy_entry.remove('schemaUpdateObjectclassReject', 'OCUpdatePolicy')
+    supplier1.restart()
+    wait_for_attr_value(supplier1, policy_dn, 'schemaUpdateObjectclassReject', 
None)
+
+    log.info("Update entry again to trigger schema push")
+    test_user.replace('description', 'test_no_more_reject')
+
+    log.info("Check update was replicated")
+    repl.wait_for_replication(supplier1, supplier2)
+    entry = users2.get('test_entry')
+    assert entry.get_attr_val_utf8('description') == 'test_no_more_reject'
+
+    log.info("Verify OC2UpdatePolicy is now in supplier2")
+    schema_attrs = supplier2.schema.get_objectclasses()
+    assert any('oc2updatepolicy' in (name.lower() for name in oc.names) for oc 
in schema_attrs)
+
+
+if __name__ == '__main__':
+    # Run isolated
+    # -s for DEBUG mode
+    CURRENT_FILE = os.path.realpath(__file__)
+    pytest.main(["-s", CURRENT_FILE])
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/389-ds-base-3.1.3~git106.bea5091e3/dirsrvtests/tests/tickets/ticket47676_test.py
 
new/389-ds-base-3.1.3~git111.e953ee704/dirsrvtests/tests/tickets/ticket47676_test.py
--- 
old/389-ds-base-3.1.3~git106.bea5091e3/dirsrvtests/tests/tickets/ticket47676_test.py
        2025-10-02 22:05:50.000000000 +0200
+++ 
new/389-ds-base-3.1.3~git111.e953ee704/dirsrvtests/tests/tickets/ticket47676_test.py
        1970-01-01 01:00:00.000000000 +0100
@@ -1,252 +0,0 @@
-# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2016 Red Hat, Inc.
-# All rights reserved.
-#
-# License: GPL (version 3 or any later version).
-# See LICENSE for details.
-# --- END COPYRIGHT BLOCK ---
-#
-'''
-Created on Nov 7, 2013
-
-@author: tbordaz
-'''
-import logging
-import time
-
-import ldap
-import pytest
-from lib389 import Entry
-from lib389._constants import *
-from lib389.topologies import topology_m2
-from lib389.replica import ReplicationManager
-
-logging.getLogger(__name__).setLevel(logging.DEBUG)
-from lib389.utils import *
-
-# Skip on older versions
-pytestmark = [pytest.mark.tier2,
-              pytest.mark.skipif(ds_is_older('1.3.2'), reason="Not 
implemented")]
-log = logging.getLogger(__name__)
-
-SCHEMA_DN = "cn=schema"
-TEST_REPL_DN = "cn=test_repl, %s" % SUFFIX
-OC_NAME = 'OCticket47676'
-OC_OID_EXT = 2
-MUST = "(postalAddress $ postalCode)"
-MAY = "(member $ street)"
-
-OC2_NAME = 'OC2ticket47676'
-OC2_OID_EXT = 3
-MUST_2 = "(postalAddress $ postalCode)"
-MAY_2 = "(member $ street)"
-
-REPL_SCHEMA_POLICY_CONSUMER = "cn=consumerUpdatePolicy,cn=replSchema,cn=config"
-REPL_SCHEMA_POLICY_SUPPLIER = "cn=supplierUpdatePolicy,cn=replSchema,cn=config"
-
-OTHER_NAME = 'other_entry'
-MAX_OTHERS = 10
-
-BIND_NAME = 'bind_entry'
-BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX)
-BIND_PW = 'password'
-
-ENTRY_NAME = 'test_entry'
-ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX)
-ENTRY_OC = "top person %s" % OC_NAME
-
-BASE_OID = "1.2.3.4.5.6.7.8.9.10"
-
-
-def _oc_definition(oid_ext, name, must=None, may=None):
-    oid = "%s.%d" % (BASE_OID, oid_ext)
-    desc = 'To test ticket 47490'
-    sup = 'person'
-    if not must:
-        must = MUST
-    if not may:
-        may = MAY
-
-    new_oc = "( %s  NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % 
(oid, name, desc, sup, must, may)
-    return ensure_bytes(new_oc)
-
-def replication_check(topology_m2):
-    repl = ReplicationManager(SUFFIX)
-    supplier1 = topology_m2.ms["supplier1"]
-    supplier2 = topology_m2.ms["supplier2"]
-    return repl.test_replication(supplier1, supplier2)
-
-def test_ticket47676_init(topology_m2):
-    """
-        It adds
-           - Objectclass with MAY 'member'
-           - an entry ('bind_entry') with which we bind to test the 'SELFDN' 
operation
-        It deletes the anonymous aci
-
-    """
-
-    topology_m2.ms["supplier1"].log.info("Add %s that allows 'member' 
attribute" % OC_NAME)
-    new_oc = _oc_definition(OC_OID_EXT, OC_NAME, must=MUST, may=MAY)
-    topology_m2.ms["supplier1"].schema.add_schema('objectClasses', new_oc)
-
-    # entry used to bind with
-    topology_m2.ms["supplier1"].log.info("Add %s" % BIND_DN)
-    topology_m2.ms["supplier1"].add_s(Entry((BIND_DN, {
-        'objectclass': "top person".split(),
-        'sn': BIND_NAME,
-        'cn': BIND_NAME,
-        'userpassword': BIND_PW})))
-
-    # enable acl error logging
-    mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', ensure_bytes(str(128 + 
8192)))]  # ACL + REPL
-    topology_m2.ms["supplier1"].modify_s(DN_CONFIG, mod)
-    topology_m2.ms["supplier2"].modify_s(DN_CONFIG, mod)
-
-    # add dummy entries
-    for cpt in range(MAX_OTHERS):
-        name = "%s%d" % (OTHER_NAME, cpt)
-        topology_m2.ms["supplier1"].add_s(Entry(("cn=%s,%s" % (name, SUFFIX), {
-            'objectclass': "top person".split(),
-            'sn': name,
-            'cn': name})))
-
-
-def test_ticket47676_skip_oc_at(topology_m2):
-    '''
-        This test ADD an entry on SUPPLIER1 where 47676 is fixed. Then it 
checks that entry is replicated
-        on SUPPLIER2 (even if on SUPPLIER2 47676 is NOT fixed). Then update on 
SUPPLIER2.
-        If the schema has successfully been pushed, updating Supplier2 should 
succeed
-    '''
-    topology_m2.ms["supplier1"].log.info("\n\n######################### ADD 
######################\n")
-
-    # bind as 'cn=Directory manager'
-    topology_m2.ms["supplier1"].log.info("Bind as %s and add the add the entry 
with specific oc" % DN_DM)
-    topology_m2.ms["supplier1"].simple_bind_s(DN_DM, PASSWORD)
-
-    # Prepare the entry with multivalued members
-    entry = Entry(ENTRY_DN)
-    entry.setValues('objectclass', 'top', 'person', 'OCticket47676')
-    entry.setValues('sn', ENTRY_NAME)
-    entry.setValues('cn', ENTRY_NAME)
-    entry.setValues('postalAddress', 'here')
-    entry.setValues('postalCode', '1234')
-    members = []
-    for cpt in range(MAX_OTHERS):
-        name = "%s%d" % (OTHER_NAME, cpt)
-        members.append("cn=%s,%s" % (name, SUFFIX))
-    members.append(BIND_DN)
-    entry.setValues('member', members)
-
-    topology_m2.ms["supplier1"].log.info("Try to add Add  %s should be 
successful" % ENTRY_DN)
-    topology_m2.ms["supplier1"].add_s(entry)
-
-    #
-    # Now check the entry as been replicated
-    #
-    topology_m2.ms["supplier2"].simple_bind_s(DN_DM, PASSWORD)
-    topology_m2.ms["supplier1"].log.info("Try to retrieve %s from Supplier2" % 
ENTRY_DN)
-    replication_check(topology_m2)
-    ent = topology_m2.ms["supplier2"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, 
"(objectclass=*)")
-    assert ent
-    # Now update the entry on Supplier2 (as DM because 47676 is possibly not 
fixed on M2)
-    topology_m2.ms["supplier1"].log.info("Update  %s on M2" % ENTRY_DN)
-    mod = [(ldap.MOD_REPLACE, 'description', b'test_add')]
-    topology_m2.ms["supplier2"].modify_s(ENTRY_DN, mod)
-
-    topology_m2.ms["supplier1"].simple_bind_s(DN_DM, PASSWORD)
-    replication_check(topology_m2)
-    ent = topology_m2.ms["supplier1"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, 
"(objectclass=*)")
-    assert ensure_str(ent.getValue('description')) == 'test_add'
-
-
-def test_ticket47676_reject_action(topology_m2):
-    topology_m2.ms["supplier1"].log.info("\n\n######################### REJECT 
ACTION ######################\n")
-
-    topology_m2.ms["supplier1"].simple_bind_s(DN_DM, PASSWORD)
-    topology_m2.ms["supplier2"].simple_bind_s(DN_DM, PASSWORD)
-
-    # make supplier1 to refuse to push the schema if OC_NAME is present in 
consumer schema
-    mod = [(ldap.MOD_ADD, 'schemaUpdateObjectclassReject', ensure_bytes('%s' % 
(OC_NAME)))]  # ACL + REPL
-    topology_m2.ms["supplier1"].modify_s(REPL_SCHEMA_POLICY_SUPPLIER, mod)
-
-    # Restart is required to take into account that policy
-    topology_m2.ms["supplier1"].stop(timeout=10)
-    topology_m2.ms["supplier1"].start(timeout=10)
-
-    # Add a new OC on M1 so that schema CSN will change and M1 will try to 
push the schema
-    topology_m2.ms["supplier1"].log.info("Add %s on M1" % OC2_NAME)
-    new_oc = _oc_definition(OC2_OID_EXT, OC2_NAME, must=MUST, may=MAY)
-    topology_m2.ms["supplier1"].schema.add_schema('objectClasses', new_oc)
-
-    # Safety checking that the schema has been updated on M1
-    topology_m2.ms["supplier1"].log.info("Check %s is in M1" % OC2_NAME)
-    ent = topology_m2.ms["supplier1"].getEntry(SCHEMA_DN, ldap.SCOPE_BASE, 
"(objectclass=*)", ["objectclasses"])
-    assert ent.hasAttr('objectclasses')
-    found = False
-    for objectclass in ent.getValues('objectclasses'):
-        if str(objectclass).find(OC2_NAME) >= 0:
-            found = True
-            break
-    assert found
-
-    # Do an update of M1 so that M1 will try to push the schema
-    topology_m2.ms["supplier1"].log.info("Update  %s on M1" % ENTRY_DN)
-    mod = [(ldap.MOD_REPLACE, 'description', b'test_reject')]
-    topology_m2.ms["supplier1"].modify_s(ENTRY_DN, mod)
-
-    # Check the replication occured and so also M1 attempted to push the schema
-    topology_m2.ms["supplier1"].log.info("Check updated %s on M2" % ENTRY_DN)
-
-    replication_check(topology_m2)
-    ent = topology_m2.ms["supplier2"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, 
"(objectclass=*)", ['description'])
-    assert ensure_str(ent.getValue('description')) == 'test_reject'
-
-    # Check that the schema has not been pushed
-    topology_m2.ms["supplier1"].log.info("Check %s is not in M2" % OC2_NAME)
-    ent = topology_m2.ms["supplier2"].getEntry(SCHEMA_DN, ldap.SCOPE_BASE, 
"(objectclass=*)", ["objectclasses"])
-    assert ent.hasAttr('objectclasses')
-    found = False
-    for objectclass in ent.getValues('objectclasses'):
-        if str(objectclass).find(OC2_NAME) >= 0:
-            found = True
-            break
-    assert not found
-
-    topology_m2.ms["supplier1"].log.info("\n\n######################### NO 
MORE REJECT ACTION ######################\n")
-
-    # make supplier1 to do no specific action on OC_NAME
-    mod = [(ldap.MOD_DELETE, 'schemaUpdateObjectclassReject', 
ensure_bytes('%s' % (OC_NAME)))]  # ACL + REPL
-    topology_m2.ms["supplier1"].modify_s(REPL_SCHEMA_POLICY_SUPPLIER, mod)
-
-    # Restart is required to take into account that policy
-    topology_m2.ms["supplier1"].stop(timeout=10)
-    topology_m2.ms["supplier1"].start(timeout=10)
-
-    # Do an update of M1 so that M1 will try to push the schema
-    topology_m2.ms["supplier1"].log.info("Update  %s on M1" % ENTRY_DN)
-    mod = [(ldap.MOD_REPLACE, 'description', b'test_no_more_reject')]
-    topology_m2.ms["supplier1"].modify_s(ENTRY_DN, mod)
-
-    # Check the replication occured and so also M1 attempted to push the schema
-    topology_m2.ms["supplier1"].log.info("Check updated %s on M2" % ENTRY_DN)
-
-    replication_check(topology_m2)
-    ent = topology_m2.ms["supplier2"].getEntry(ENTRY_DN, ldap.SCOPE_BASE, 
"(objectclass=*)", ['description'])
-    assert ensure_str(ent.getValue('description')) == 'test_no_more_reject'
-    # Check that the schema has been pushed
-    topology_m2.ms["supplier1"].log.info("Check %s is in M2" % OC2_NAME)
-    ent = topology_m2.ms["supplier2"].getEntry(SCHEMA_DN, ldap.SCOPE_BASE, 
"(objectclass=*)", ["objectclasses"])
-    assert ent.hasAttr('objectclasses')
-    found = False
-    for objectclass in ent.getValues('objectclasses'):
-        if str(objectclass).find(OC2_NAME) >= 0:
-            found = True
-            break
-    assert found
-
-
-if __name__ == '__main__':
-    # Run isolated
-    # -s for DEBUG mode
-    CURRENT_FILE = os.path.realpath(__file__)
-    pytest.main("-s %s" % CURRENT_FILE)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/389-ds-base-3.1.3~git106.bea5091e3/dirsrvtests/tests/tickets/ticket47714_test.py
 
new/389-ds-base-3.1.3~git111.e953ee704/dirsrvtests/tests/tickets/ticket47714_test.py
--- 
old/389-ds-base-3.1.3~git106.bea5091e3/dirsrvtests/tests/tickets/ticket47714_test.py
        2025-10-02 22:05:50.000000000 +0200
+++ 
new/389-ds-base-3.1.3~git111.e953ee704/dirsrvtests/tests/tickets/ticket47714_test.py
        1970-01-01 01:00:00.000000000 +0100
@@ -1,213 +0,0 @@
-# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2016 Red Hat, Inc.
-# All rights reserved.
-#
-# License: GPL (version 3 or any later version).
-# See LICENSE for details.
-# --- END COPYRIGHT BLOCK ---
-#
-import logging
-import time
-
-import ldap
-import pytest
-from lib389 import Entry
-from lib389._constants import *
-from lib389.topologies import topology_st
-
-log = logging.getLogger(__name__)
-
-from lib389.utils import *
-
-# Skip on older versions
-pytestmark = [pytest.mark.tier2,
-              pytest.mark.skipif(ds_is_older('1.3.3'), reason="Not 
implemented")]
-ACCT_POLICY_CONFIG_DN = ('cn=config,cn=%s,cn=plugins,cn=config' %
-                         PLUGIN_ACCT_POLICY)
-ACCT_POLICY_DN = 'cn=Account Inactivation Policy,%s' % SUFFIX
-# Set inactivty high to prevent timing issues with debug options or gdb on 
test runs.
-INACTIVITY_LIMIT = '3000'
-SEARCHFILTER = '(objectclass=*)'
-
-TEST_USER = 'ticket47714user'
-TEST_USER_DN = 'uid=%s,%s' % (TEST_USER, SUFFIX)
-TEST_USER_PW = '%s' % TEST_USER
-
-
-def _header(topology_st, label):
-    
topology_st.standalone.log.info("\n\n###############################################")
-    topology_st.standalone.log.info("#######")
-    topology_st.standalone.log.info("####### %s" % label)
-    topology_st.standalone.log.info("#######")
-    
topology_st.standalone.log.info("###############################################")
-
-
-def test_ticket47714_init(topology_st):
-    """
-    1. Add account policy entry to the DB
-    2. Add a test user to the DB
-    """
-    _header(topology_st,
-            'Testing Ticket 47714 - [RFE] Update lastLoginTime also in Account 
Policy plugin if account lockout is based on passwordExpirationTime.')
-
-    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
-
-    log.info("\n######################### Adding Account Policy entry: %s 
######################\n" % ACCT_POLICY_DN)
-    topology_st.standalone.add_s(
-        Entry((ACCT_POLICY_DN, {'objectclass': "top ldapsubentry 
extensibleObject accountpolicy".split(),
-                                'accountInactivityLimit': INACTIVITY_LIMIT})))
-
-    log.info("\n######################### Adding Test User entry: %s 
######################\n" % TEST_USER_DN)
-    topology_st.standalone.add_s(
-        Entry((TEST_USER_DN, {'objectclass': "top person organizationalPerson 
inetOrgPerson".split(),
-                              'cn': TEST_USER,
-                              'sn': TEST_USER,
-                              'givenname': TEST_USER,
-                              'userPassword': TEST_USER_PW,
-                              'acctPolicySubentry': ACCT_POLICY_DN})))
-
-
-def test_ticket47714_run_0(topology_st):
-    """
-    Check this change has no inpact to the existing functionality.
-    1. Set account policy config without the new attr alwaysRecordLoginAttr
-    2. Bind as a test user
-    3. Bind as the test user again and check the lastLoginTime is updated
-    4. Waint longer than the accountInactivityLimit time and bind as the test 
user,
-       which should fail with CONSTANT_VIOLATION.
-    """
-    _header(topology_st, 'Account Policy - No new attr alwaysRecordLoginAttr 
in config')
-
-    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
-
-    # Modify Account Policy config entry
-    topology_st.standalone.modify_s(ACCT_POLICY_CONFIG_DN, [(ldap.MOD_REPLACE, 
'alwaysrecordlogin', b'yes'),
-                                                            (ldap.MOD_REPLACE, 
'stateattrname', b'lastLoginTime'),
-                                                            (ldap.MOD_REPLACE, 
'altstateattrname', b'createTimestamp'),
-                                                            (ldap.MOD_REPLACE, 
'specattrname', b'acctPolicySubentry'),
-                                                            (ldap.MOD_REPLACE, 
'limitattrname',
-                                                             
b'accountInactivityLimit')])
-
-    # Enable the plugins
-    topology_st.standalone.plugins.enable(name=PLUGIN_ACCT_POLICY)
-
-    topology_st.standalone.restart()
-
-    log.info("\n######################### Bind as %s ######################\n" 
% TEST_USER_DN)
-    try:
-        topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW)
-    except ldap.CONSTRAINT_VIOLATION as e:
-        log.error('CONSTRAINT VIOLATION {}'.format(e.args[0]['desc']))
-
-    time.sleep(2)
-
-    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
-    entry = topology_st.standalone.search_s(TEST_USER_DN, ldap.SCOPE_BASE, 
SEARCHFILTER, ['lastLoginTime'])
-
-    lastLoginTime0 = entry[0].lastLoginTime
-
-    log.info("\n######################### Bind as %s again 
######################\n" % TEST_USER_DN)
-    try:
-        topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW)
-    except ldap.CONSTRAINT_VIOLATION as e:
-        log.error('CONSTRAINT VIOLATION {}'.format(e.args[0]['desc']))
-
-    time.sleep(2)
-
-    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
-    entry = topology_st.standalone.search_s(TEST_USER_DN, ldap.SCOPE_BASE, 
SEARCHFILTER, ['lastLoginTime'])
-
-    lastLoginTime1 = entry[0].lastLoginTime
-
-    log.info("First lastLoginTime: %s, Second lastLoginTime: %s" % 
(lastLoginTime0, lastLoginTime1))
-    assert lastLoginTime0 < lastLoginTime1
-
-    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
-
-    # Now, change the inactivity limit, because that should trigger the 
account to now be locked. This is possible because the check is "delayed" until 
the usage of the account.
-
-    topology_st.standalone.modify_s(ACCT_POLICY_DN, [(ldap.MOD_REPLACE, 
'accountInactivityLimit', b'1'),])
-    time.sleep(2)
-
-    entry = topology_st.standalone.search_s(ACCT_POLICY_DN, ldap.SCOPE_BASE, 
SEARCHFILTER)
-    log.info("\n######################### %s ######################\n" % 
ACCT_POLICY_CONFIG_DN)
-    log.info("accountInactivityLimit: %s" % entry[0].accountInactivityLimit)
-    log.info("\n######################### %s DONE ######################\n" % 
ACCT_POLICY_CONFIG_DN)
-
-    log.info("\n######################### Bind as %s again to fail 
######################\n" % TEST_USER_DN)
-    try:
-        topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW)
-    except ldap.CONSTRAINT_VIOLATION as e:
-        log.info('CONSTRAINT VIOLATION {}'.format(e.args[0]['desc']))
-        log.info("%s was successfully inactivated." % TEST_USER_DN)
-        pass
-
-    # Now reset the value high to prevent issues with the next test.
-    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
-    topology_st.standalone.modify_s(ACCT_POLICY_DN, [(ldap.MOD_REPLACE, 
'accountInactivityLimit', ensure_bytes(INACTIVITY_LIMIT)),])
-
-
-def test_ticket47714_run_1(topology_st):
-    """
-    Verify a new config attr alwaysRecordLoginAttr
-    1. Set account policy config with the new attr alwaysRecordLoginAttr: 
lastLoginTime
-       Note: bogus attr is set to stateattrname.
-             altstateattrname type value is used for checking whether the 
account is idle or not.
-    2. Bind as a test user
-    3. Bind as the test user again and check the alwaysRecordLoginAttr: 
lastLoginTime is updated
-    """
-    _header(topology_st, 'Account Policy - With new attr alwaysRecordLoginAttr 
in config')
-
-    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
-    topology_st.standalone.modify_s(TEST_USER_DN, [(ldap.MOD_DELETE, 
'lastLoginTime', None)])
-
-    # Modify Account Policy config entry
-    topology_st.standalone.modify_s(ACCT_POLICY_CONFIG_DN, [(ldap.MOD_REPLACE, 
'alwaysrecordlogin', b'yes'),
-                                                            (ldap.MOD_REPLACE, 
'stateattrname', b'bogus'),
-                                                            (ldap.MOD_REPLACE, 
'altstateattrname', b'modifyTimestamp'),
-                                                            (
-                                                            ldap.MOD_REPLACE, 
'alwaysRecordLoginAttr', b'lastLoginTime'),
-                                                            (ldap.MOD_REPLACE, 
'specattrname', b'acctPolicySubentry'),
-                                                            (ldap.MOD_REPLACE, 
'limitattrname',
-                                                             
b'accountInactivityLimit')])
-
-    # Enable the plugins
-    topology_st.standalone.plugins.enable(name=PLUGIN_ACCT_POLICY)
-
-    topology_st.standalone.restart()
-
-    log.info("\n######################### Bind as %s ######################\n" 
% TEST_USER_DN)
-    try:
-        topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW)
-    except ldap.CONSTRAINT_VIOLATION as e:
-        log.error('CONSTRAINT VIOLATION {}'.format(e.args[0]['desc']))
-
-    time.sleep(1)
-
-    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
-    entry = topology_st.standalone.search_s(TEST_USER_DN, ldap.SCOPE_BASE, 
SEARCHFILTER, ['lastLoginTime'])
-    lastLoginTime0 = entry[0].lastLoginTime
-
-    log.info("\n######################### Bind as %s again 
######################\n" % TEST_USER_DN)
-    try:
-        topology_st.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW)
-    except ldap.CONSTRAINT_VIOLATION as e:
-        log.error('CONSTRAINT VIOLATION {}'.format(e.args[0]['desc']))
-
-    time.sleep(1)
-
-    topology_st.standalone.simple_bind_s(DN_DM, PASSWORD)
-    entry = topology_st.standalone.search_s(TEST_USER_DN, ldap.SCOPE_BASE, 
SEARCHFILTER, ['lastLoginTime'])
-    lastLoginTime1 = entry[0].lastLoginTime
-
-    log.info("First lastLoginTime: %s, Second lastLoginTime: %s" % 
(lastLoginTime0, lastLoginTime1))
-    assert lastLoginTime0 < lastLoginTime1
-
-    topology_st.standalone.log.info("ticket47714 was successfully verified.")
-
-
-if __name__ == '__main__':
-    # Run isolated
-    # -s for DEBUG mode
-    CURRENT_FILE = os.path.realpath(__file__)
-    pytest.main("-s %s" % CURRENT_FILE)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/389-ds-base-3.1.3~git106.bea5091e3/ldap/servers/slapd/back-ldbm/back-ldbm.h 
new/389-ds-base-3.1.3~git111.e953ee704/ldap/servers/slapd/back-ldbm/back-ldbm.h
--- 
old/389-ds-base-3.1.3~git106.bea5091e3/ldap/servers/slapd/back-ldbm/back-ldbm.h 
    2025-10-02 22:05:50.000000000 +0200
+++ 
new/389-ds-base-3.1.3~git111.e953ee704/ldap/servers/slapd/back-ldbm/back-ldbm.h 
    2025-10-07 21:13:14.000000000 +0200
@@ -548,6 +548,7 @@
     int li_mode;
     int li_lookthroughlimit;
     int li_allidsthreshold;
+    int li_system_allidsthreshold;
     char *li_directory;
     int li_reslimit_lookthrough_handle;
     uint64_t li_dbcachesize;
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/389-ds-base-3.1.3~git106.bea5091e3/ldap/servers/slapd/back-ldbm/index.c 
new/389-ds-base-3.1.3~git111.e953ee704/ldap/servers/slapd/back-ldbm/index.c
--- old/389-ds-base-3.1.3~git106.bea5091e3/ldap/servers/slapd/back-ldbm/index.c 
2025-10-02 22:05:50.000000000 +0200
+++ new/389-ds-base-3.1.3~git111.e953ee704/ldap/servers/slapd/back-ldbm/index.c 
2025-10-07 21:13:14.000000000 +0200
@@ -997,6 +997,8 @@
     }
     if (pb) {
         slapi_pblock_get(pb, SLAPI_SEARCH_IS_AND, &is_and);
+    } else if (strcasecmp(type, LDBM_ANCESTORID_STR) == 0) {
+        is_and = 1;
     }
     ai_flags = is_and ? INDEX_ALLIDS_FLAG_AND : 0;
     /* the caller can pass in a value of 0 - just ignore those - but if the 
index
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/389-ds-base-3.1.3~git106.bea5091e3/ldap/servers/slapd/back-ldbm/instance.c 
new/389-ds-base-3.1.3~git111.e953ee704/ldap/servers/slapd/back-ldbm/instance.c
--- 
old/389-ds-base-3.1.3~git106.bea5091e3/ldap/servers/slapd/back-ldbm/instance.c  
    2025-10-02 22:05:50.000000000 +0200
+++ 
new/389-ds-base-3.1.3~git111.e953ee704/ldap/servers/slapd/back-ldbm/instance.c  
    2025-10-07 21:13:14.000000000 +0200
@@ -16,7 +16,7 @@
 
 /* Forward declarations */
 static void ldbm_instance_destructor(void **arg);
-Slapi_Entry *ldbm_instance_init_config_entry(char *cn_val, char *v1, char *v2, 
char *v3, char *v4, char *mr);
+Slapi_Entry *ldbm_instance_init_config_entry(char *cn_val, char *v1, char *v2, 
char *v3, char *v4, char *mr, char *scanlimit);
 
 
 /* Creates and initializes a new ldbm_instance structure.
@@ -126,7 +126,7 @@
  * Take a bunch of strings, and create a index config entry
  */
 Slapi_Entry *
-ldbm_instance_init_config_entry(char *cn_val, char *val1, char *val2, char 
*val3, char *val4, char *mr)
+ldbm_instance_init_config_entry(char *cn_val, char *val1, char *val2, char 
*val3, char *val4, char *mr, char *scanlimit)
 {
     Slapi_Entry *e = slapi_entry_alloc();
     struct berval *vals[2];
@@ -167,6 +167,11 @@
         slapi_entry_add_values(e, "nsMatchingRule", vals);
     }
 
+    if (scanlimit) {
+        val.bv_val = scanlimit;
+        val.bv_len = strlen(scanlimit);
+        slapi_entry_add_values(e, "nsIndexIDListScanLimit", vals);
+    }
     return e;
 }
 
@@ -179,8 +184,59 @@
 {
     Slapi_Entry *e;
     ldbm_instance *inst = (ldbm_instance *)be->be_instance_info;
+    struct ldbminfo *li = (struct ldbminfo *)be->be_database->plg_private;
     /* write the dse file only on the final index */
     int flags = LDBM_INSTANCE_CONFIG_DONT_WRITE;
+    char *ancestorid_indexes_limit = NULL;
+    char *parentid_indexes_limit = NULL;
+    struct attrinfo *ai = NULL;
+    struct index_idlistsizeinfo *iter;
+    int cookie;
+    int limit;
+
+    ainfo_get(be, (char *)LDBM_ANCESTORID_STR, &ai);
+    if (ai && ai->ai_idlistinfo) {
+        iter = (struct index_idlistsizeinfo *)dl_get_first(ai->ai_idlistinfo, 
&cookie);
+        if (iter) {
+            limit = iter->ai_idlistsizelimit;
+            slapi_log_err(SLAPI_LOG_BACKLDBM, 
"ldbm_instance_create_default_indexes",
+                      "set ancestorid limit to %d from attribute index\n",
+                      limit);
+        } else {
+            limit = li->li_system_allidsthreshold;
+            slapi_log_err(SLAPI_LOG_BACKLDBM, 
"ldbm_instance_create_default_indexes",
+                      "set ancestorid limit to %d from default (fail to read 
limit)\n",
+                      limit);
+        }
+        ancestorid_indexes_limit = slapi_ch_smprintf("limit=%d type=eq 
flags=AND", limit);
+    } else {
+        ancestorid_indexes_limit = slapi_ch_smprintf("limit=%d type=eq 
flags=AND", li->li_system_allidsthreshold);
+        slapi_log_err(SLAPI_LOG_BACKLDBM, 
"ldbm_instance_create_default_indexes",
+                      "set ancestorid limit to %d from default (no attribute 
or limit)\n",
+                      li->li_system_allidsthreshold);
+    }
+
+    ainfo_get(be, (char *)LDBM_PARENTID_STR, &ai);
+    if (ai && ai->ai_idlistinfo) {
+        iter = (struct index_idlistsizeinfo *)dl_get_first(ai->ai_idlistinfo, 
&cookie);
+        if (iter) {
+            limit = iter->ai_idlistsizelimit;
+            slapi_log_err(SLAPI_LOG_BACKLDBM, 
"ldbm_instance_create_default_indexes",
+                      "set parentid limit to %d from attribute index\n",
+                      limit);
+        } else {
+            limit = li->li_system_allidsthreshold;
+            slapi_log_err(SLAPI_LOG_BACKLDBM, 
"ldbm_instance_create_default_indexes",
+                      "set parentid limit to %d from default (fail to read 
limit)\n",
+                      limit);
+        }
+        parentid_indexes_limit = slapi_ch_smprintf("limit=%d type=eq 
flags=AND", limit);
+    } else {
+        parentid_indexes_limit = slapi_ch_smprintf("limit=%d type=eq 
flags=AND", li->li_system_allidsthreshold);
+        slapi_log_err(SLAPI_LOG_BACKLDBM, 
"ldbm_instance_create_default_indexes",
+                      "set parentid limit to %d from default (no attribute or 
limit)\n",
+                      li->li_system_allidsthreshold);
+    }
 
     /*
      * Always index (entrydn or entryrdn), parentid, objectclass,
@@ -188,42 +244,43 @@
      * since they are used by some searches, replication and the
      * ACL routines.
      */
-    e = ldbm_instance_init_config_entry(LDBM_ENTRYRDN_STR, "subtree", 0, 0, 0, 
0);
+    e = ldbm_instance_init_config_entry(LDBM_ENTRYRDN_STR, "subtree", 0, 0, 0, 
0, 0);
     ldbm_instance_config_add_index_entry(inst, e, flags);
     slapi_entry_free(e);
 
-    e = ldbm_instance_init_config_entry(LDBM_PARENTID_STR, "eq", 0, 0, 0, 
"integerOrderingMatch");
+    e = ldbm_instance_init_config_entry(LDBM_PARENTID_STR, "eq", 0, 0, 0, 
"integerOrderingMatch", parentid_indexes_limit);
     ldbm_instance_config_add_index_entry(inst, e, flags);
+    attr_index_config(be, "ldbm index init", 0, e, 1, 0, NULL);
     slapi_entry_free(e);
 
-    e = ldbm_instance_init_config_entry("objectclass", "eq", 0, 0, 0, 0);
+    e = ldbm_instance_init_config_entry("objectclass", "eq", 0, 0, 0, 0, 0);
     ldbm_instance_config_add_index_entry(inst, e, flags);
     slapi_entry_free(e);
 
-    e = ldbm_instance_init_config_entry("aci", "pres", 0, 0, 0, 0);
+    e = ldbm_instance_init_config_entry("aci", "pres", 0, 0, 0, 0, 0);
     ldbm_instance_config_add_index_entry(inst, e, flags);
     slapi_entry_free(e);
 
-    e = ldbm_instance_init_config_entry(LDBM_NUMSUBORDINATES_STR, "pres", 0, 
0, 0, 0);
+    e = ldbm_instance_init_config_entry(LDBM_NUMSUBORDINATES_STR, "pres", 0, 
0, 0, 0, 0);
     ldbm_instance_config_add_index_entry(inst, e, flags);
     slapi_entry_free(e);
 
-    e = ldbm_instance_init_config_entry(SLAPI_ATTR_UNIQUEID, "eq", 0, 0, 0, 0);
+    e = ldbm_instance_init_config_entry(SLAPI_ATTR_UNIQUEID, "eq", 0, 0, 0, 0, 
0);
     ldbm_instance_config_add_index_entry(inst, e, flags);
     slapi_entry_free(e);
 
     /* For MMR, we need this attribute (to replace use of dncomp in delete). */
-    e = ldbm_instance_init_config_entry(ATTR_NSDS5_REPLCONFLICT, "eq", "pres", 
0, 0, 0);
+    e = ldbm_instance_init_config_entry(ATTR_NSDS5_REPLCONFLICT, "eq", "pres", 
0, 0, 0, 0);
     ldbm_instance_config_add_index_entry(inst, e, flags);
     slapi_entry_free(e);
 
     /* write the dse file only on the final index */
-    e = ldbm_instance_init_config_entry(SLAPI_ATTR_NSCP_ENTRYDN, "eq", 0, 0, 
0, 0);
+    e = ldbm_instance_init_config_entry(SLAPI_ATTR_NSCP_ENTRYDN, "eq", 0, 0, 
0, 0, 0);
     ldbm_instance_config_add_index_entry(inst, e, flags);
     slapi_entry_free(e);
 
     /* ldbm_instance_config_add_index_entry(inst, 2, argv); */
-    e = ldbm_instance_init_config_entry(LDBM_PSEUDO_ATTR_DEFAULT, "none", 0, 
0, 0, 0);
+    e = ldbm_instance_init_config_entry(LDBM_PSEUDO_ATTR_DEFAULT, "none", 0, 
0, 0, 0, 0);
     attr_index_config(be, "ldbm index init", 0, e, 1, 0, NULL);
     slapi_entry_free(e);
 
@@ -231,10 +288,14 @@
      * ancestorid is special, there is actually no such attr type
      * but we still want to use the attr index file APIs.
      */
-    e = ldbm_instance_init_config_entry(LDBM_ANCESTORID_STR, "eq", 0, 0, 0, 
"integerOrderingMatch");
+    e = ldbm_instance_init_config_entry(LDBM_ANCESTORID_STR, "eq", 0, 0, 0, 
"integerOrderingMatch", ancestorid_indexes_limit);
+    ldbm_instance_config_add_index_entry(inst, e, flags);
     attr_index_config(be, "ldbm index init", 0, e, 1, 0, NULL);
     slapi_entry_free(e);
 
+    slapi_ch_free_string(&ancestorid_indexes_limit);
+    slapi_ch_free_string(&parentid_indexes_limit);
+
     return 0;
 }
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/389-ds-base-3.1.3~git106.bea5091e3/ldap/servers/slapd/back-ldbm/ldbm_config.c
 
new/389-ds-base-3.1.3~git111.e953ee704/ldap/servers/slapd/back-ldbm/ldbm_config.c
--- 
old/389-ds-base-3.1.3~git106.bea5091e3/ldap/servers/slapd/back-ldbm/ldbm_config.c
   2025-10-02 22:05:50.000000000 +0200
+++ 
new/389-ds-base-3.1.3~git111.e953ee704/ldap/servers/slapd/back-ldbm/ldbm_config.c
   2025-10-07 21:13:14.000000000 +0200
@@ -386,6 +386,35 @@
 }
 
 static void *
+ldbm_config_system_allidsthreshold_get(void *arg)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+
+    return (void *)((uintptr_t)(li->li_system_allidsthreshold));
+}
+
+static int
+ldbm_config_system_allidsthreshold_set(void *arg, void *value, char *errorbuf 
__attribute__((unused)), int phase __attribute__((unused)), int apply)
+{
+    struct ldbminfo *li = (struct ldbminfo *)arg;
+    int retval = LDAP_SUCCESS;
+    int val = (int)((uintptr_t)value);
+
+    /* Do whatever we can to make sure the data is ok. */
+
+    /* Catch attempts to configure a stupidly low ancestorid allidsthreshold */
+    if ((val > -1) && (val < 5000)) {
+        val = 5000;
+    }
+
+    if (apply) {
+        li->li_system_allidsthreshold = val;
+    }
+
+    return retval;
+}
+
+static void *
 ldbm_config_pagedallidsthreshold_get(void *arg)
 {
     struct ldbminfo *li = (struct ldbminfo *)arg;
@@ -926,6 +955,7 @@
     {CONFIG_LOOKTHROUGHLIMIT, CONFIG_TYPE_INT, "5000", 
&ldbm_config_lookthroughlimit_get, &ldbm_config_lookthroughlimit_set, 
CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
     {CONFIG_MODE, CONFIG_TYPE_INT_OCTAL, "0600", &ldbm_config_mode_get, 
&ldbm_config_mode_set, CONFIG_FLAG_ALWAYS_SHOW | 
CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
     {CONFIG_IDLISTSCANLIMIT, CONFIG_TYPE_INT, "2147483646", 
&ldbm_config_allidsthreshold_get, &ldbm_config_allidsthreshold_set, 
CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
+    {CONFIG_SYSTEMIDLISTSCANLIMIT, CONFIG_TYPE_INT, "5000", 
&ldbm_config_system_allidsthreshold_get, 
&ldbm_config_system_allidsthreshold_set, CONFIG_FLAG_ALWAYS_SHOW | 
CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
     {CONFIG_DIRECTORY, CONFIG_TYPE_STRING, "", &ldbm_config_directory_get, 
&ldbm_config_directory_set, CONFIG_FLAG_ALWAYS_SHOW | 
CONFIG_FLAG_ALLOW_RUNNING_CHANGE | CONFIG_FLAG_SKIP_DEFAULT_SETTING},
     {CONFIG_MAXPASSBEFOREMERGE, CONFIG_TYPE_INT, "100", 
&ldbm_config_maxpassbeforemerge_get, &ldbm_config_maxpassbeforemerge_set, 0},
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/389-ds-base-3.1.3~git106.bea5091e3/ldap/servers/slapd/back-ldbm/ldbm_config.h
 
new/389-ds-base-3.1.3~git111.e953ee704/ldap/servers/slapd/back-ldbm/ldbm_config.h
--- 
old/389-ds-base-3.1.3~git106.bea5091e3/ldap/servers/slapd/back-ldbm/ldbm_config.h
   2025-10-02 22:05:50.000000000 +0200
+++ 
new/389-ds-base-3.1.3~git111.e953ee704/ldap/servers/slapd/back-ldbm/ldbm_config.h
   2025-10-07 21:13:14.000000000 +0200
@@ -60,6 +60,7 @@
 #define CONFIG_RANGELOOKTHROUGHLIMIT "nsslapd-rangelookthroughlimit"
 #define CONFIG_PAGEDLOOKTHROUGHLIMIT "nsslapd-pagedlookthroughlimit"
 #define CONFIG_IDLISTSCANLIMIT "nsslapd-idlistscanlimit"
+#define CONFIG_SYSTEMIDLISTSCANLIMIT "nsslapd-systemidlistscanlimit"
 #define CONFIG_PAGEDIDLISTSCANLIMIT "nsslapd-pagedidlistscanlimit"
 #define CONFIG_DIRECTORY "nsslapd-directory"
 #define CONFIG_MODE "nsslapd-mode"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/389-ds-base-3.1.3~git106.bea5091e3/ldap/servers/slapd/back-ldbm/ldbm_index_config.c
 
new/389-ds-base-3.1.3~git111.e953ee704/ldap/servers/slapd/back-ldbm/ldbm_index_config.c
--- 
old/389-ds-base-3.1.3~git106.bea5091e3/ldap/servers/slapd/back-ldbm/ldbm_index_config.c
     2025-10-02 22:05:50.000000000 +0200
+++ 
new/389-ds-base-3.1.3~git111.e953ee704/ldap/servers/slapd/back-ldbm/ldbm_index_config.c
     2025-10-07 21:13:14.000000000 +0200
@@ -384,6 +384,14 @@
         }
     }
 
+    /* get nsIndexIDListScanLimit and its values, and add them */
+    if (0 == slapi_entry_attr_find(e, "nsIndexIDListScanLimit", &attr)) {
+        for (j = slapi_attr_first_value(attr, &sval); j != -1; j = 
slapi_attr_next_value(attr, j, &sval)) {
+            attrValue = slapi_value_get_berval(sval);
+            eBuf = PR_sprintf_append(eBuf, "nsIndexIDListScanLimit: %s\n", 
attrValue->bv_val);
+        }
+    }
+
     ldbm_config_add_dse_entry(li, eBuf, flags);
     if (eBuf) {
         PR_smprintf_free(eBuf);
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/389-ds-base-3.1.3~git106.bea5091e3/ldap/servers/slapd/log.c 
new/389-ds-base-3.1.3~git111.e953ee704/ldap/servers/slapd/log.c
--- old/389-ds-base-3.1.3~git106.bea5091e3/ldap/servers/slapd/log.c     
2025-10-02 22:05:50.000000000 +0200
+++ new/389-ds-base-3.1.3~git111.e953ee704/ldap/servers/slapd/log.c     
2025-10-07 21:13:14.000000000 +0200
@@ -193,8 +193,12 @@
      * compressed content.
      */
     if ((fd = open(gzip_log, O_WRONLY|O_CREAT|O_TRUNC, mode)) >= 0) {
-        /* FIle successfully created, now pass the FD to gzdopen() */
+        /* File successfully created, now pass the FD to gzdopen() */
         outfile = gzdopen(fd, "ab");
+        if (outfile == NULL) {
+            close(fd);
+            return -1;
+        }
     } else {
         return -1;
     }
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/389-ds-base-3.1.3~git106.bea5091e3/src/cockpit/389-console/src/LDAPEditor.jsx
 
new/389-ds-base-3.1.3~git111.e953ee704/src/cockpit/389-console/src/LDAPEditor.jsx
--- 
old/389-ds-base-3.1.3~git106.bea5091e3/src/cockpit/389-console/src/LDAPEditor.jsx
   2025-10-02 22:05:50.000000000 +0200
+++ 
new/389-ds-base-3.1.3~git111.e953ee704/src/cockpit/389-console/src/LDAPEditor.jsx
   2025-10-07 21:13:14.000000000 +0200
@@ -371,6 +371,14 @@
     }
 
     componentDidUpdate(prevProps) {
+        if (this.props.serverId !== prevProps.serverId) {
+            getAllObjectClasses(this.props.serverId, (ocs) => {
+                this.setState({
+                    allObjectclasses: ocs,
+                }, () => { this.getAttributes(this.showSuffixes) });
+            });
+        }
+
         if (this.props.wasActiveList.includes(7)) {
             if (this.state.firstLoad) {
                 this.handleReload(true);
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/389-ds-base-3.1.3~git106.bea5091e3/src/cockpit/389-console/src/database.jsx 
new/389-ds-base-3.1.3~git111.e953ee704/src/cockpit/389-console/src/database.jsx
--- 
old/389-ds-base-3.1.3~git106.bea5091e3/src/cockpit/389-console/src/database.jsx 
    2025-10-02 22:05:50.000000000 +0200
+++ 
new/389-ds-base-3.1.3~git111.e953ee704/src/cockpit/389-console/src/database.jsx 
    2025-10-07 21:13:14.000000000 +0200
@@ -303,8 +303,6 @@
                         if ('nsslapd-directory' in attrs) {
                             dbhome = attrs['nsslapd-directory'][0];
                         }
-                        console.log("MARK loaded attrs: ",attrs);
-
 
                         if (attrs['nsslapd-cache-autosize'][0] !== "-1") {
                             db_cache_auto = true;
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/389-ds-base-3.1.3~git106.bea5091e3/src/cockpit/389-console/src/monitor.jsx 
new/389-ds-base-3.1.3~git111.e953ee704/src/cockpit/389-console/src/monitor.jsx
--- 
old/389-ds-base-3.1.3~git106.bea5091e3/src/cockpit/389-console/src/monitor.jsx  
    2025-10-02 22:05:50.000000000 +0200
+++ 
new/389-ds-base-3.1.3~git111.e953ee704/src/cockpit/389-console/src/monitor.jsx  
    2025-10-07 21:13:14.000000000 +0200
@@ -131,6 +131,7 @@
             } else {
                 if (this.props.serverId !== prevProps.serverId) {
                     this.loadSuffixTree(false);
+                    this.getDBEngine();
                 }
             }
         }
@@ -643,10 +644,7 @@
                 })
                 .fail(err => {
                     const errMsg = JSON.parse(err);
-                    this.props.addNotification(
-                        "error",
-                        cockpit.format("Error detecting DB implementation type 
- $0", errMsg.desc)
-                    );
+                    console.log("getDBEngine - Error detecting DB 
implementation type -", errMsg.desc);
                 });
     }
 
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/389-ds-base-3.1.3~git106.bea5091e3/src/lib389/lib389/backend.py 
new/389-ds-base-3.1.3~git111.e953ee704/src/lib389/lib389/backend.py
--- old/389-ds-base-3.1.3~git106.bea5091e3/src/lib389/lib389/backend.py 
2025-10-02 22:05:50.000000000 +0200
+++ new/389-ds-base-3.1.3~git111.e953ee704/src/lib389/lib389/backend.py 
2025-10-07 21:13:14.000000000 +0200
@@ -617,8 +617,8 @@
         # Default system indexes taken from 
ldap/servers/slapd/back-ldbm/instance.c
         expected_system_indexes = {
             'entryrdn': {'types': ['subtree'], 'matching_rule': None},
-            'parentId': {'types': ['eq'], 'matching_rule': 
'integerOrderingMatch'},
-            'ancestorId': {'types': ['eq'], 'matching_rule': 
'integerOrderingMatch'},
+            'parentId': {'types': ['eq'], 'matching_rule': 
'integerOrderingMatch', 'scanlimit': 'limit=5000 type=eq flags=AND'},
+            'ancestorId': {'types': ['eq'], 'matching_rule': 
'integerOrderingMatch', 'scanlimit': 'limit=5000 type=eq flags=AND'},
             'objectClass': {'types': ['eq'], 'matching_rule': None},
             'aci': {'types': ['pres'], 'matching_rule': None},
             'nscpEntryDN': {'types': ['eq'], 'matching_rule': None},
@@ -668,12 +668,15 @@
                     cmd = f"dsconf YOUR_INSTANCE backend index add {bename} 
--attr {attr_name} {index_types}"
                     if expected_config['matching_rule']:
                         cmd += f" --add-mr {expected_config['matching_rule']}"
+                    if expected_config['scanlimit']:
+                        cmd += f" --add-scanlimit 
{expected_config['scanlimit']}"
                     remediation_commands.append(cmd)
                     reindex_attrs.add(attr_name)  # New index needs reindexing
                 else:
                     # Index exists, check configuration
                     actual_types = index.get_attr_vals_utf8('nsIndexType') or 
[]
                     actual_mrs = index.get_attr_vals_utf8('nsMatchingRule') or 
[]
+                    actual_scanlimit = 
index.get_attr_vals_utf8('nsIndexIDListScanLimit') or []
 
                     # Normalize to lowercase for comparison
                     actual_types = [t.lower() for t in actual_types]
@@ -699,6 +702,19 @@
                             remediation_commands.append(cmd)
                             reindex_attrs.add(attr_name)
 
+                    # Check fine grain definitions for parentid ONLY
+                    expected_scanlimit = expected_config['scanlimit']
+                    if (attr_name.lower() == "parentid") and 
expected_scanlimit and (len(actual_scanlimit) == 0):
+                            discrepancies.append(f"Index {attr_name} missing 
fine grain definition of IDs limit: {expected_mr}")
+                            # Add the missing scanlimit
+                            if expected_mr:
+                                cmd = f"dsconf YOUR_INSTANCE backend index set 
{bename} --attr {attr_name} --add-mr {expected_mr} --add-scanlimit 
{expected_scanlimit}"
+                            else:
+                                cmd = f"dsconf YOUR_INSTANCE backend index set 
{bename} --attr {attr_name} --add-scanlimit {expected_scanlimit}"
+                            remediation_commands.append(cmd)
+                            reindex_attrs.add(attr_name)
+
+
             except Exception as e:
                 self._log.debug(f"_lint_system_indexes - Error checking index 
{attr_name}: {e}")
                 discrepancies.append(f"Unable to check index {attr_name}: 
{str(e)}")
@@ -936,12 +952,13 @@
                 return
         raise ValueError("Can not delete index because it does not exist")
 
-    def add_index(self, attr_name, types, matching_rules=None, reindex=False):
+    def add_index(self, attr_name, types, matching_rules=None, 
idlistscanlimit=None, reindex=False):
         """ Add an index.
 
         :param attr_name - name of the attribute to index
         :param types - a List of index types(eq, pres, sub, approx)
         :param matching_rules - a List of matching rules for the index
+        :param idlistscanlimit - a List of fine grain definitions for scanning 
limit
         :param reindex - If set to True then index the attribute after 
creating it.
         """
 
@@ -971,6 +988,15 @@
             # Only add if there are actually rules present in the list.
             if len(mrs) > 0:
                 props['nsMatchingRule'] = mrs
+
+        if idlistscanlimit is not None:
+            scanlimits = []
+            for scanlimit  in idlistscanlimit:
+                scanlimits.append(scanlimit)
+            # Only add if there are actually limits in the list.
+            if len(scanlimits) > 0:
+                props['nsIndexIDListScanLimit'] = mrs
+
         new_index.create(properties=props, basedn="cn=index," + self._dn)
 
         if reindex:
@@ -1277,6 +1303,7 @@
             'nsslapd-lookthroughlimit',
             'nsslapd-mode',
             'nsslapd-idlistscanlimit',
+            'nsslapd-systemidlistscanlimit',
             'nsslapd-directory',
             'nsslapd-import-cachesize',
             'nsslapd-idl-switch',
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' 
'--exclude=.svnignore' 
old/389-ds-base-3.1.3~git106.bea5091e3/src/lib389/lib389/cli_conf/backend.py 
new/389-ds-base-3.1.3~git111.e953ee704/src/lib389/lib389/cli_conf/backend.py
--- 
old/389-ds-base-3.1.3~git106.bea5091e3/src/lib389/lib389/cli_conf/backend.py    
    2025-10-02 22:05:50.000000000 +0200
+++ 
new/389-ds-base-3.1.3~git111.e953ee704/src/lib389/lib389/cli_conf/backend.py    
    2025-10-07 21:13:14.000000000 +0200
@@ -39,6 +39,7 @@
         'mode': 'nsslapd-mode',
         'state': 'nsslapd-state',
         'idlistscanlimit': 'nsslapd-idlistscanlimit',
+        'systemidlistscanlimit': 'nsslapd-systemidlistscanlimit',
         'directory': 'nsslapd-directory',
         'dbcachesize': 'nsslapd-dbcachesize',
         'logdirectory': 'nsslapd-db-logdirectory',
@@ -604,6 +605,21 @@
             except ldap.NO_SUCH_ATTRIBUTE:
                 raise ValueError('Can not delete matching rule type because it 
does not exist')
 
+    if args.replace_scanlimit is not None:
+        for replace_scanlimit in args.replace_scanlimit:
+            index.replace('nsIndexIDListScanLimit', replace_scanlimit)
+
+    if args.add_scanlimit is not None:
+        for add_scanlimit in args.add_scanlimit:
+            index.add('nsIndexIDListScanLimit', add_scanlimit)
+
+    if args.del_scanlimit is not None:
+        for del_scanlimit in args.del_scanlimit:
+            try:
+                index.remove('nsIndexIDListScanLimit', del_scanlimit)
+            except ldap.NO_SUCH_ATTRIBUTE:
+                raise ValueError('Can not delete a fine grain limit definition 
because it does not exist')
+
     if args.reindex:
         be.reindex(attrs=[args.attr])
     log.info("Index successfully updated")
@@ -925,6 +941,9 @@
     edit_index_parser.add_argument('--del-type', action='append', 
help='Removes an index type from the index: (eq, sub, pres, or approx)')
     edit_index_parser.add_argument('--add-mr', action='append', help='Adds a 
matching-rule to the index')
     edit_index_parser.add_argument('--del-mr', action='append', help='Removes 
a matching-rule from the index')
+    edit_index_parser.add_argument('--add-scanlimit', action='append', 
help='Adds a fine grain limit definiton to the index')
+    edit_index_parser.add_argument('--replace-scanlimit', action='append', 
help='Replaces a fine grain limit definiton to the index')
+    edit_index_parser.add_argument('--del-scanlimit', action='append', 
help='Removes a fine grain limit definiton to the index')
     edit_index_parser.add_argument('--reindex', action='store_true', 
help='Re-indexes the database after editing the index')
     edit_index_parser.add_argument('be_name', help='The backend name or 
suffix')
 
@@ -1051,6 +1070,7 @@
                                                                  'will check 
when examining candidate entries in response to a search request')
     set_db_config_parser.add_argument('--mode', help='Specifies the 
permissions used for newly created index files')
     set_db_config_parser.add_argument('--idlistscanlimit', help='Specifies the 
number of entry IDs that are searched during a search operation')
+    set_db_config_parser.add_argument('--systemidlistscanlimit', 
help='Specifies the number of entry IDs that are fetch from ancestorid/parentid 
indexes')
     set_db_config_parser.add_argument('--directory', help='Specifies absolute 
path to database instance')
     set_db_config_parser.add_argument('--dbcachesize', help='Specifies the 
database index cache size in bytes')
     set_db_config_parser.add_argument('--logdirectory', help='Specifies the 
path to the directory that contains the database transaction logs')

++++++ 389-ds-base.obsinfo ++++++
--- /var/tmp/diff_new_pack.W8bOqE/_old  2025-10-08 18:14:47.471083215 +0200
+++ /var/tmp/diff_new_pack.W8bOqE/_new  2025-10-08 18:14:47.475083383 +0200
@@ -1,5 +1,5 @@
 name: 389-ds-base
-version: 3.1.3~git106.bea5091e3
-mtime: 1759435550
-commit: bea5091e30ca7e52b64257ab44bd0561c9598f2f
+version: 3.1.3~git111.e953ee704
+mtime: 1759864394
+commit: e953ee704cc204b8a3e6498412730162878b52b2
 

++++++ _servicedata ++++++
--- /var/tmp/diff_new_pack.W8bOqE/_old  2025-10-08 18:14:47.639090266 +0200
+++ /var/tmp/diff_new_pack.W8bOqE/_new  2025-10-08 18:14:47.643090434 +0200
@@ -1,6 +1,6 @@
 <servicedata>
 <service name="tar_scm">
                 <param 
name="url">https://github.com/389ds/389-ds-base.git</param>
-              <param 
name="changesrevision">bea5091e30ca7e52b64257ab44bd0561c9598f2f</param></service></servicedata>
+              <param 
name="changesrevision">e953ee704cc204b8a3e6498412730162878b52b2</param></service></servicedata>
 (No newline at EOF)
 

++++++ vendor.tar.zst ++++++
/work/SRC/openSUSE:Factory/389-ds/vendor.tar.zst 
/work/SRC/openSUSE:Factory/.389-ds.new.11973/vendor.tar.zst differ: char 7, 
line 1

Reply via email to