Add three functions for atomic lifecycle management of watchpoints:
- ksw_watch_get(): Acquires a watchpoint from a llist.
- ksw_watch_on(): Enables the watchpoint on all online CPUs.
- ksw_watch_off(): Disables the watchpoint and returns it to the llist.

For cross-CPU synchronization, updates are propagated using direct
modification on the local CPU and asynchronous IPIs for remote CPUs.

Signed-off-by: Jinchao Wang <[email protected]>
---
 include/linux/kstackwatch.h |  4 ++
 mm/kstackwatch/watch.c      | 85 ++++++++++++++++++++++++++++++++++++-
 2 files changed, 88 insertions(+), 1 deletion(-)

diff --git a/include/linux/kstackwatch.h b/include/linux/kstackwatch.h
index eb9f2b4f2109..d7ea89c8c6af 100644
--- a/include/linux/kstackwatch.h
+++ b/include/linux/kstackwatch.h
@@ -44,11 +44,15 @@ const struct ksw_config *ksw_get_config(void);
 /* watch management */
 struct ksw_watchpoint {
        struct perf_event *__percpu *event;
+       call_single_data_t __percpu *csd;
        struct perf_event_attr attr;
        struct llist_node node; // for atomic watch_on and off
        struct list_head list; // for cpu online and offline
 };
 int ksw_watch_init(void);
 void ksw_watch_exit(void);
+int ksw_watch_get(struct ksw_watchpoint **out_wp);
+int ksw_watch_on(struct ksw_watchpoint *wp, ulong watch_addr, u16 watch_len);
+int ksw_watch_off(struct ksw_watchpoint *wp);
 
 #endif /* _KSTACKWATCH_H */
diff --git a/mm/kstackwatch/watch.c b/mm/kstackwatch/watch.c
index 4947eac32c61..3817a172dc25 100644
--- a/mm/kstackwatch/watch.c
+++ b/mm/kstackwatch/watch.c
@@ -27,11 +27,83 @@ static void ksw_watch_handler(struct perf_event *bp,
                panic("Stack corruption detected");
 }
 
+static void ksw_watch_on_local_cpu(void *info)
+{
+       struct ksw_watchpoint *wp = info;
+       struct perf_event *bp;
+       ulong flags;
+       int cpu;
+       int ret;
+
+       local_irq_save(flags);
+       cpu = raw_smp_processor_id();
+       bp = per_cpu(*wp->event, cpu);
+       if (!bp) {
+               local_irq_restore(flags);
+               return;
+       }
+
+       ret = modify_wide_hw_breakpoint_local(bp, &wp->attr);
+       local_irq_restore(flags);
+       WARN(ret, "fail to reinstall HWBP on CPU%d ret %d", cpu, ret);
+}
+
+static void ksw_watch_update(struct ksw_watchpoint *wp, ulong addr, u16 len)
+{
+       call_single_data_t *csd;
+       int cur_cpu;
+       int cpu;
+
+       wp->attr.bp_addr = addr;
+       wp->attr.bp_len = len;
+
+       cur_cpu = raw_smp_processor_id();
+       for_each_online_cpu(cpu) {
+               /* remote cpu first */
+               if (cpu == cur_cpu)
+                       continue;
+               csd = per_cpu_ptr(wp->csd, cpu);
+               smp_call_function_single_async(cpu, csd);
+       }
+       ksw_watch_on_local_cpu(wp);
+}
+
+int ksw_watch_get(struct ksw_watchpoint **out_wp)
+{
+       struct ksw_watchpoint *wp;
+       struct llist_node *node;
+
+       node = llist_del_first(&free_wp_list);
+       if (!node)
+               return -EBUSY;
+
+       wp = llist_entry(node, struct ksw_watchpoint, node);
+       WARN_ON_ONCE(wp->attr.bp_addr != (u64)&holder);
+
+       *out_wp = wp;
+       return 0;
+}
+int ksw_watch_on(struct ksw_watchpoint *wp, ulong watch_addr, u16 watch_len)
+{
+       ksw_watch_update(wp, watch_addr, watch_len);
+       return 0;
+}
+
+int ksw_watch_off(struct ksw_watchpoint *wp)
+{
+       WARN_ON_ONCE(wp->attr.bp_addr == (u64)&holder);
+       ksw_watch_update(wp, (ulong)&holder, sizeof(ulong));
+       llist_add(&wp->node, &free_wp_list);
+       return 0;
+}
+
 static int ksw_watch_alloc(void)
 {
        int max_watch = ksw_get_config()->max_watch;
        struct ksw_watchpoint *wp;
+       call_single_data_t *csd;
        int success = 0;
+       int cpu;
        int ret;
 
        init_llist_head(&free_wp_list);
@@ -41,6 +113,16 @@ static int ksw_watch_alloc(void)
                wp = kzalloc(sizeof(*wp), GFP_KERNEL);
                if (!wp)
                        return success > 0 ? success : -EINVAL;
+               wp->csd = alloc_percpu(call_single_data_t);
+               if (!wp->csd) {
+                       kfree(wp);
+                       return success > 0 ? success : -EINVAL;
+               }
+
+               for_each_possible_cpu(cpu) {
+                       csd = per_cpu_ptr(wp->csd, cpu);
+                       INIT_CSD(csd, ksw_watch_on_local_cpu, wp);
+               }
 
                hw_breakpoint_init(&wp->attr);
                wp->attr.bp_addr = (ulong)&holder;
@@ -50,6 +132,7 @@ static int ksw_watch_alloc(void)
                                                        ksw_watch_handler, wp);
                if (IS_ERR((void *)wp->event)) {
                        ret = PTR_ERR((void *)wp->event);
+                       free_percpu(wp->csd);
                        kfree(wp);
                        return success > 0 ? success : ret;
                }
@@ -71,6 +154,7 @@ static void ksw_watch_free(void)
        list_for_each_entry_safe(wp, tmp, &all_wp_list, list) {
                list_del(&wp->list);
                unregister_wide_hw_breakpoint(wp->event);
+               free_percpu(wp->csd);
                kfree(wp);
        }
        mutex_unlock(&all_wp_mutex);
@@ -84,7 +168,6 @@ int ksw_watch_init(void)
        if (ret <= 0)
                return -EBUSY;
 
-
        return 0;
 }
 
-- 
2.43.0


Reply via email to