Similarly to what we did for uprobes_register_batch(), split
uprobe_unregister_batch() into two separate phases with different
locking needs.

First, all the VMA unregistration is performed while holding
a per-uprobe register_rwsem.

Then, we take a batched uprobes_treelock once to __put_uprobe() for all
uprobe_consumers. That uprobe_consumer->uprobe field is really handy in
helping with this.

Signed-off-by: Andrii Nakryiko <and...@kernel.org>
---
 kernel/events/uprobes.c | 14 ++++++++++++--
 1 file changed, 12 insertions(+), 2 deletions(-)

diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 416f408cbed9..7e94671a672a 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -1162,8 +1162,8 @@ __uprobe_unregister(struct uprobe *uprobe, struct 
uprobe_consumer *uc)
  */
 void uprobe_unregister_batch(struct inode *inode, int cnt, uprobe_consumer_fn 
get_uprobe_consumer, void *ctx)
 {
-       struct uprobe *uprobe;
        struct uprobe_consumer *uc;
+       struct uprobe *uprobe;
        int i;
 
        for (i = 0; i < cnt; i++) {
@@ -1176,10 +1176,20 @@ void uprobe_unregister_batch(struct inode *inode, int 
cnt, uprobe_consumer_fn ge
                down_write(&uprobe->register_rwsem);
                __uprobe_unregister(uprobe, uc);
                up_write(&uprobe->register_rwsem);
-               put_uprobe(uprobe);
+       }
 
+       write_lock(&uprobes_treelock);
+       for (i = 0; i < cnt; i++) {
+               uc = get_uprobe_consumer(i, ctx);
+               uprobe = uc->uprobe;
+
+               if (!uprobe)
+                       continue;
+
+               __put_uprobe(uprobe, true);
                uc->uprobe = NULL;
        }
+       write_unlock(&uprobes_treelock);
 }
 
 static struct uprobe_consumer *uprobe_consumer_identity(size_t idx, void *ctx)
-- 
2.43.0


Reply via email to