Hi Vpp Expert,
Could you please help me on the below vpp hang issue while running below 
configuration:

Developed App using VPP APIs for the below commands:

set int mpls VirtualFuncEthernet0/8/0.1800 enable

ip table add 2

ip route add table 2 1.1.1.1/32 via 107.243.21.116 
VirtualFuncEthernet0/8/0.1800 out-labels 18

added bfd session for  107.243.21.116 at VirtualFuncEthernet0/8/0.1800.

Seeing below backtrace in a loop. Its going through outer context and picking 
same fib_entries all the time.

Thread 1 "vpp_main" hit Breakpoint 1, fib_entry_src_action_reactivate 
(fib_entry=fib_entry@entry=0x7f4c29792060, source=FIB_SOURCE_API) at 
/usr/src/debug/vpp-21.06.0-3~g05567d9c3_dirty.x86_64/src/vnet/fib/fib_entry_src.c:1198

1198 esrc = fib_entry_src_find(fib_entry, source);

$1 = {fe_node = {fn_type = 0x2, fn_pad = 0x0, fn_children = 0xffffffff, 
fn_locks = 0x2}, fe_prefix = {fp_len = 0x10, fp_proto = 0x0, ___fp___pad = 0x0, 
{fp_addr = {{pad = {0x0, 0x0, 0x0}, ip4 = {data = {0xb, 0xd, 0x0, 0x0}, 
data_u32 = 0xd0b, as_u8 = {0xb, 0xd, 0x0, 0x0}, as_u16 = {0xd0b, 0x0}, as_u32 = 
0xd0b}}, ip6 = {as_u8 = {0x0 <repeats 12 times>, 0xb, 0xd, 0x0, 0x0}, as_u16 = 
{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xd0b, 0x0}, as_u32 = {0x0, 0x0, 0x0, 0xd0b}, 
as_u64 = {0x0, 0xd0b00000000}, as_u128 = {0x0, 0xd0b00000000}, as_uword = {0x0, 
0xd0b00000000}}, as_u8 = {0x0 <repeats 12 times>, 0xb, 0xd, 0x0, 0x0}, as_u64 = 
{0x0, 0xd0b00000000}}, {fp_label = 0x0, fp_eos = 0x0, fp_payload_proto = 
0x0}}}, fe_fib_index = 0x2, fe_lb = {{{dpoi_type = 0x4, dpoi_proto = 0x0, 
dpoi_next_node = 0x0, dpoi_index = 0x172}, as_u64 = 0x17200000004}}, fe_srcs = 
0x7f4c2b0a2990, fe_parent = 0x16f, fe_sibling = 0x349, fe_delegates = 0x0}

#0  fib_entry_src_action_reactivate (fib_entry=fib_entry@entry=0x7f4c29792060, 
source=FIB_SOURCE_API) at 
/usr/src/debug/vpp-21.06.0-3~g05567d9c3_dirty.x86_64/src/vnet/fib/fib_entry_src.c:1198

#1  0x00007f4d540d1fe3 in fib_entry_back_walk_notify (node=0x7f4c29792060, 
ctx=0x7f4bcb3aac00) at 
/usr/src/debug/vpp-21.06.0-3~g05567d9c3_dirty.x86_64/src/vnet/fib/fib_entry.c:316

#2  0x00007f4d540c82eb in fib_walk_advance (fwi=fwi@entry=1) at 
/usr/src/debug/vpp-21.06.0-3~g05567d9c3_dirty.x86_64/src/vnet/fib/fib_walk.c:368

#3  0x00007f4d540c8f67 in fib_walk_sync (parent_type=<optimized out>, 
parent_index=<optimized out>, ctx=0x7f4bcb3aacd0) at 
/usr/src/debug/vpp-21.06.0-3~g05567d9c3_dirty.x86_64/src/vnet/fib/fib_walk.c:792

#4  0x00007f4d540e0437 in fib_path_back_walk_notify (node=<optimized out>, 
ctx=0x7f4bcb3aacd0) at 
/usr/src/debug/vpp-21.06.0-3~g05567d9c3_dirty.x86_64/src/vnet/fib/fib_path.c:1214

#5  0x00007f4d540c82eb in fib_walk_advance (fwi=fwi@entry=0) at 
/usr/src/debug/vpp-21.06.0-3~g05567d9c3_dirty.x86_64/src/vnet/fib/fib_walk.c:368

#6  0x00007f4d540c8f67 in fib_walk_sync 
(parent_type=parent_type@entry=FIB_NODE_TYPE_ADJ, 
parent_index=parent_index@entry=155, ctx=ctx@entry=0x7f4bcb3aad78) at 
/usr/src/debug/vpp-21.06.0-3~g05567d9c3_dirty.x86_64/src/vnet/fib/fib_walk.c:792

#7  0x00007f4d541059d3 in adj_bfd_notify (event=<optimized out>, 
session=0x7f4c29448478) at 
/usr/src/debug/vpp-21.06.0-3~g05567d9c3_dirty.x86_64/src/vnet/adj/adj_bfd.c:184

#8  0x00007f4d53b5fa17 in bfd_notify_listeners (bm=0x7f4d55120648 <bfd_main>, 
event=BFD_LISTEN_EVENT_UPDATE, bs=0x7f4c29448478) at 
/usr/src/debug/vpp-21.06.0-3~g05567d9c3_dirty.x86_64/src/vnet/bfd/bfd_main.c:448

#9  bfd_rpc_notify_listeners_cb (a=<optimized out>) at 
/usr/src/debug/vpp-21.06.0-3~g05567d9c3_dirty.x86_64/src/vnet/bfd/bfd_main.c:615

#10 0x00007f4d551680a3 in vl_api_rpc_call_t_handler (mp=0x1300b1950) at 
/usr/src/debug/vpp-21.06.0-3~g05567d9c3_dirty.x86_64/src/vlibmemory/vlib_api.c:532

#11 0x00007f4d5517842c in vl_msg_api_handler_with_vm_node 
(am=am@entry=0x7f4d5518fc98 <api_global_main>, vlib_rp=0x13002d000, 
the_msg=0x1300b1950, vm=<optimized out>, vm@entry=0x7f4bd2f35680, 
node=node@entry=0x7f4bd3e16240, is_private=<optimized out>, is_private@entry=0 
'\000') at 
/usr/src/debug/vpp-21.06.0-3~g05567d9c3_dirty.x86_64/src/vlibapi/api_shared.c:635

#12 0x00007f4d55145bc6 in vl_mem_api_handle_rpc (vm=vm@entry=0x7f4bd2f35680, 
node=node@entry=0x7f4bd3e16240) at 
/usr/src/debug/vpp-21.06.0-3~g05567d9c3_dirty.x86_64/src/vlibmemory/memory_api.c:746

#13 0x00007f4d5515a060 in vl_api_clnt_process (vm=<optimized out>, 
node=<optimized out>, f=<optimized out>) at 
/usr/src/debug/vpp-21.06.0-3~g05567d9c3_dirty.x86_64/src/vlibmemory/vlib_api.c:338

#14 0x00007f4d534fdd07 in vlib_process_bootstrap (_a=<optimized out>) at 
/usr/src/debug/vpp-21.06.0-3~g05567d9c3_dirty.x86_64/src/vlib/main.c:1339

#15 0x00007f4d5343056c in clib_calljmp () at 
/usr/src/debug/vpp-21.06.0-3~g05567d9c3_dirty.x86_64/src/vppinfra/longjmp.S:123

#16 0x00007f4bcd6efd70 in ?? ()

#17 0x00007f4d534f3d8e in vlib_process_startup (vm=0x7f4bcb3ac000, 
p=0x20737365636f7270, f=0x0) at 
/usr/src/debug/vpp-21.06.0-3~g05567d9c3_dirty.x86_64/src/vlib/main.c:1364

#18 dispatch_process (vm=<optimized out>, p=<optimized out>, f=0x0, 
last_time_stamp=<optimized out>) at 
/usr/src/debug/vpp-21.06.0-3~g05567d9c3_dirty.x86_64/src/vlib/main.c:1420

#19 0x0000000000000000 in ?? ()

Is it that we are adding some wrong params to IP ROUTE ADD which creates this 
during bfd event change?

It all works fine if we disable bfd.

With Regards
Sastry
-=-=-=-=-=-=-=-=-=-=-=-
Links: You receive all messages sent to this group.
View/Reply Online (#21027): https://lists.fd.io/g/vpp-dev/message/21027
Mute This Topic: https://lists.fd.io/mt/89811914/21656
Mute #mpls:https://lists.fd.io/g/vpp-dev/mutehashtag/mpls
Group Owner: vpp-dev+ow...@lists.fd.io
Unsubscribe: https://lists.fd.io/g/vpp-dev/unsub [arch...@mail-archive.com]
-=-=-=-=-=-=-=-=-=-=-=-

Reply via email to