When we set tipc address, the following error met:

============================================
WARNING: possible recursive locking detected
4.16.0-rc4-WR7.0.0.21_standard+ #30 Not tainted
--------------------------------------------
tipc-config/391 is trying to acquire lock:
 (rtnl_mutex){+.+.}, at: [<00000000b27b1ec4>] rtnl_lock+0x17/0x20

but task is already holding lock:
 (rtnl_mutex){+.+.}, at: [<00000000b27b1ec4>] rtnl_lock+0x17/0x20

other info that might help us debug this:
 Possible unsafe locking scenario:

       CPU0
       ----
  lock(rtnl_mutex);
  lock(rtnl_mutex);

 *** DEADLOCK ***

 May be due to missing lock nesting notation

3 locks held by tipc-config/391:
 #0:  (cb_lock){++++}, at: [<00000000cb65ad12>] genl_rcv+0x19/0x40
 #1:  (genl_mutex){+.+.}, at: [<000000000bcc4d5b>] genl_rcv_msg+0x7f/0x90
 #2:  (rtnl_mutex){+.+.}, at: [<00000000b27b1ec4>] rtnl_lock+0x17/0x20

stack backtrace:
CPU: 7 PID: 391 Comm: tipc-config Not tainted 4.16.0-rc4-WR7.0.0.21_standard+ 
#30
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 
Ubuntu-1.8.2-1ubuntu1 04/01/2014
Call Trace:
 dump_stack+0x67/0x97
 __lock_acquire+0xf70/0x1880
 ? add_lock_to_list.isra.9+0x80/0xf0
 ? __lock_acquire+0x1420/0x1880
 lock_acquire+0xa3/0x1f0
 ? lock_acquire+0xa3/0x1f0
 ? rtnl_lock+0x17/0x20
 ? rtnl_lock+0x17/0x20
 __mutex_lock+0x7f/0x980
 ? rtnl_lock+0x17/0x20
 ? rtnl_lock+0x17/0x20
 ? rtnl_lock+0x17/0x20
 ? lock_acquire+0xa3/0x1f0
 mutex_lock_nested+0x1b/0x20
 ? mutex_lock_nested+0x1b/0x20
 rtnl_lock+0x17/0x20
 __tipc_nl_net_set+0x1e9/0x380
 ? __nla_put+0x20/0x30
 tipc_nl_compat_doit+0x17e/0x190
 tipc_nl_compat_recv+0x392/0x540
 ? tipc_nl_net_dump+0x2e0/0x2e0
 ? tipc_nl_compat_bearer_dump+0x70/0x70
 genl_family_rcv_msg+0x1ca/0x3c0
 genl_rcv_msg+0x4c/0x90
 ? genl_rcv+0x19/0x40
 ? genl_family_rcv_msg+0x3c0/0x3c0
 netlink_rcv_skb+0xcb/0xf0
 genl_rcv+0x28/0x40
 netlink_unicast+0x163/0x210
 netlink_sendmsg+0x2c2/0x390
 sock_sendmsg+0x1d/0x30
 sock_write_iter+0x7b/0xd0
 __vfs_write+0xcf/0x140
 vfs_write+0xc6/0x1a0
 SyS_write+0x49/0xb0
 do_syscall_64+0x75/0x1c0
 entry_SYSCALL_64_after_hwframe+0x42/0xb7
RIP: 0033:0x3a85cdaae0
RSP: 002b:00007ffdd596e0a8 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
RAX: ffffffffffffffda RBX: 0000000000000024 RCX: 0000003a85cdaae0
RDX: 0000000000000024 RSI: 0000000000616010 RDI: 0000000000000003
RBP: 0000000000616010 R08: 0000000000000004 R09: 0000000000000000
R10: 00007ffdd596e0dc R11: 0000000000000246 R12: 0000000000000024
R13: 0000000000000003 R14: 0000000000616010 R15: 0000000000000008

Below is the deadlock scenario:

__tipc_nl_compat_doit()
   rtnl_lock()
     __tipc_nl_net_set()
      rtnl_lock() --------->DEADLOCK!
        tipc_net_init

As __tipc_nl_net_set() is always called within RTNL lock protection,
we directly remove the lock from its internal code.

Fixes: ed4ffdfec26d ("tipc: Fix missing RTNL lock protection during setting 
link properties")
Signed-off-by: Ying Xue <[email protected]>
---
 net/tipc/net.c | 4 ----
 1 file changed, 4 deletions(-)

diff --git a/net/tipc/net.c b/net/tipc/net.c
index 23ea4ab..a0d49b4 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -243,9 +243,7 @@ int __tipc_nl_net_set(struct sk_buff *skb, struct genl_info 
*info)
                addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]);
                if (!addr)
                        return -EINVAL;
-               rtnl_lock();
                tipc_net_init(net, NULL, addr);
-               rtnl_unlock();
        }
 
        if (attrs[TIPC_NLA_NET_NODEID]) {
@@ -255,9 +253,7 @@ int __tipc_nl_net_set(struct sk_buff *skb, struct genl_info 
*info)
 
                *w0 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID]);
                *w1 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID_W1]);
-               rtnl_lock();
                tipc_net_init(net, node_id, 0);
-               rtnl_unlock();
        }
        return 0;
 }
-- 
2.7.4


------------------------------------------------------------------------------
Check out the vibrant tech community on one of the world's most
engaging tech sites, Slashdot.org! http://sdm.link/slashdot
_______________________________________________
tipc-discussion mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/tipc-discussion

Reply via email to