(gdb) bt full
#0 __GI___libc_free (mem=0x41) at malloc.c:2929
ar_ptr = <optimized out>
p = <optimized out>
hook = 0x0
#1 0x00007f87f6fca24c in ber_memvfree_x (vec=0x7f876c00a900, ctx=0x0) at
memory.c:180
i = <optimized out>
#2 0x00007f87f71f2a11 in ldap_mods_free (mods=0x7f876c001fb0, freemods=1) at
free.c:94
i = <optimized out>
#3 0x00007f87f804a6a0 in do_modify (pb=pb@entry=0x7f87b4ff0a90) at
ldap/servers/slapd/modify.c:390
operation = 0x7f87f931bf80
smods = {mods = 0x0, num_elements = 0, num_mods = 0, iterator = 0,
free_mods = 0}
ber = <optimized out>
tag = <optimized out>
len = 18446744073709551615
normalized_mods = 0x7f876c001fb0
mod = 0x0
mods = 0x7f876c00c200
last = 0x7f876c000e23 ""
type = 0x0
old_pw = 0x0
rawdn = 0x7f876c000920
"cn=svcaccount,cn=groups,cn=accounts,dc=MYDOMAIN"
minssf_exclude_rootdse = <optimized out>
ignored_some_mods = <optimized out>
has_password_mod = <optimized out>
pw_change = 0
err = <optimized out>
#4 0x00007f87f85339e0 in connection_dispatch_operation (pb=0x7f87b4ff0a90,
op=0x7f87f931bf80, conn=0x7f87d82d0768) at ldap/servers/slapd/connection.c:627
minssf = 0
minssf_exclude_rootdse = <optimized out>
---Type <return> to continue, or q <return> to quit---
enable_nagle = 1
pop_cork = 0
#5 connection_threadmain () at ldap/servers/slapd/connection.c:1759
is_timedout = 0
curtime = <optimized out>
local_pb = {pb_backend = 0x7f87f8e1a070, pb_conn = 0x7f87d82d0768,
pb_op = 0x7f87f931bf80, pb_plugin = 0x7f87f8c85c50, pb_opreturn = -1, pb_object
= 0x0, pb_destroy_fn = 0x0,
pb_requestor_isroot = 0, pb_config_fname = 0x0, pb_config_lineno =
0, pb_config_argc = 0, pb_config_argv = 0x0, plugin_tracking = 0,
pb_target_entry = 0x0,
pb_existing_dn_entry = 0x7f876c00e880, pb_existing_uniqueid_entry =
0x0, pb_parent_entry = 0x0, pb_newparent_entry = 0x0, pb_pre_op_entry = 0x0,
pb_post_op_entry = 0x0,
pb_seq_type = 0, pb_seq_attrname = 0x0, pb_seq_val = 0x0,
pb_dbverify_dbdir = 0x0, pb_ldif_file = 0x0, pb_removedupvals = 0,
pb_db2index_attrs = 0x0,
pb_ldif2db_noattrindexes = 0, pb_ldif_printkey = 0, pb_instance_name
= 0x0, pb_task = 0x0, pb_task_flags = 0, pb_mr_filter_match_fn = 0x0,
pb_mr_filter_index_fn = 0x0,
pb_mr_filter_reset_fn = 0x0, pb_mr_index_fn = 0x0, pb_mr_oid = 0x0,
pb_mr_type = 0x0, pb_mr_value = 0x0, pb_mr_values = 0x0, pb_mr_keys = 0x0,
pb_mr_filter_reusable = 0,
pb_mr_query_operator = 0, pb_mr_usage = 0,
pb_pwd_storage_scheme_user_passwd = 0x0, pb_pwd_storage_scheme_db_passwd = 0x0,
pb_managedsait = 0, pb_internal_op_result = 0,
pb_plugin_internal_search_op_entries = 0x0,
pb_plugin_internal_search_op_referrals = 0x0, pb_plugin_identity = 0x0,
pb_plugin_config_area = 0x0, pb_parent_txn = 0x0,
pb_txn = 0x0, pb_txn_ruv_mods_fn = 0x7f87ea323470
<replica_ruv_smods_for_op>, pb_dbsize = 0, pb_ldif_files = 0x0, pb_ldif_include
= 0x0, pb_ldif_exclude = 0x0,
pb_ldif_dump_replica = 0, pb_ldif_dump_uniqueid = 0,
pb_ldif_generate_uniqueid = 0, pb_ldif_namespaceid = 0x0, pb_ldif_encrypt = 0,
pb_operation_notes = 0, pb_slapd_argc = 0,
pb_slapd_argv = 0x0, pb_slapd_configdir = 0x0, pb_ctrls_arg = 0x0,
pb_dse_dont_add_write = 0, pb_dse_add_merge = 0, pb_dse_dont_check_dups = 0,
pb_dse_is_primary_file = 0,
pb_schema_flags = 0, pb_result_code = 0, pb_result_text = 0x0,
pb_result_matched = 0x0, pb_nentries = 0, urls = 0x0, pb_import_entry = 0x0,
pb_import_state = 0,
pb_destroy_content = 0, pb_dse_reapply_mods = 0,
pb_urp_naming_collision_dn = 0x0, pb_urp_tombstone_uniqueid = 0x0,
pb_server_running = 0, pb_backend_count = 1,
pb_pwpolicy_ctrl = 0, pb_vattr_context = 0x0, pb_substrlens = 0x0,
pb_plugin_enabled = 0, pb_search_ctrls = 0x0, pb_mr_index_sv_fn = 0x0,
pb_syntax_filter_normalized = 0,
pb_syntax_filter_data = 0x0, pb_paged_results_index = 0,
pb_paged_results_cookie = 0, pwdpolicy = 0x0, op_stack_elem = 0x7f87f8e24d30,
pb_aci_target_check = 0,
pb_pw_entry = 0x0}
pb = 0x7f87b4ff0a90
conn = 0x7f87d82d0768
op = 0x7f87f931bf80
tag = 102
need_wakeup = 0
thread_turbo_flag = <optimized out>
ret = <optimized out>
more_data = 0
---Type <return> to continue, or q <return> to quit---
replication_connection = 0
doshutdown = 0
maxthreads = 5
enable_nunc_stans = 0
bypasspollcnt = <optimized out>
#6 0x00007f87f62299bb in _pt_root () from /lib64/libnspr4.so
No symbol table info available.
#7 0x00007f87f5bc9dc5 in start_thread (arg=0x7f87b4ff1700) at
pthread_create.c:308
__res = <optimized out>
pd = 0x7f87b4ff1700
now = <optimized out>
unwind_buf = {cancel_jmp_buf = {{jmp_buf = {140220833928960,
-5233892399363934943, 0, 140220833929664, 140220833928960, 1,
5211249063945286945, 5211388174174106913},
mask_was_saved = 0}}, priv = {pad = {0x0, 0x0, 0x0, 0x0}, data =
{prev = 0x0, cleanup = 0x0, canceltype = 0}}}
not_first_call = <optimized out>
pagesize_m1 = <optimized out>
sp = <optimized out>
freesize = <optimized out>
#8 0x00007f87f58f873d in clone () at
../sysdeps/unix/sysv/linux/x86_64/clone.S:113
No locals.
I performed an ‘ipactl restart’ on the affected server and attempted
again with the same issue.
I tried adding a non-posix group and it was successful.
I found the dirsrv logs and see the error ‘dna-plugin - dna_pre_op: no
more values available!!’ which lead me to
https://www.redhat.com/archives/freeipa-users/2014-
February/msg00247.h
tml
Performing the ldapserch I see:
dnaMaxValue is 1100
dnaNextValue is 1101
dnaThreshold is 500
Right. A master only gets a range when it needs one. In this case it needed
one after the master holding the entire range went away.
I also did ‘ipa idrange-find’, which shows:
---------------
1 range matched
---------------
Range name: MYDOMAIN.COM_id_range
First Posix ID of the range: 1946000000
Number of IDs in the range: 200000
Range type: local domain range
----------------------------
Number of entries returned 1
----------------------------
So now my question is what do I need to change to fix the issue?
I can do the ldapmodify to adjust the dnaMaxValue, but I don’t know
what I should be adjusting the idrange to?
I’d like to keep the idrange the same and just adjust the dnaMaxValue,
so would I need to change dnaMaxValue to 200000?
See https://blog-rcritten.rhcloud.com/?p=50
rob
Thank you.
Setting the id ranges manually fixed my problem.