On Mon, 27 Oct 2025 21:05:35 +0800
hongao <[email protected]> wrote:

> The freeing_list cleanup now retries optimizing any sibling probe that was
> deferred while this aggregator was being torn down.  Track the pending
> address in struct optimized_kprobe so __disarm_kprobe() can defer the
> retry until kprobe_optimizer() finishes disarming.

Thanks Hongao! I have some comment below.
Please make it simpler.

> 
> Signed-off-by: hongao <[email protected]>
> 
> diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
> index 8c4f3bb24..33d65b008 100644
> --- a/include/linux/kprobes.h
> +++ b/include/linux/kprobes.h
> @@ -338,6 +338,7 @@ DEFINE_INSN_CACHE_OPS(insn);
>  struct optimized_kprobe {
>       struct kprobe kp;
>       struct list_head list;  /* list for optimizing queue */
> +     kprobe_opcode_t *pending_reopt_addr;    /* addr that should trigger 
> re-optimization */

You may need "bool reopt_unblocked_probes".

>       struct arch_optimized_insn optinsn;
>  };
>  
> diff --git a/kernel/kprobes.c b/kernel/kprobes.c
> index da59c68df..0976ab57d 100644
> --- a/kernel/kprobes.c
> +++ b/kernel/kprobes.c
> @@ -514,6 +514,7 @@ static LIST_HEAD(freeing_list);
>  
>  static void kprobe_optimizer(struct work_struct *work);
>  static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
> +static void optimize_kprobe(struct kprobe *p);
>  #define OPTIMIZE_DELAY 5
>  
>  /*
> @@ -591,6 +592,20 @@ static void do_free_cleaned_kprobes(void)
>                        */
>                       continue;
>               }
> +             if (op->pending_reopt_addr) {
> +                     struct kprobe *blocked;

        unblocked?

> +
> +                     /*
> +                      * The aggregator was holding back another probe while 
> it sat on the
> +                      * unoptimizing/freeing lists.  Now that the aggregator 
> has been fully
> +                      * reverted we can safely retry the optimization of 
> that sibling.
> +                      */
> +
> +                     blocked = get_optimized_kprobe(op->pending_reopt_addr);

You can use op->kp.addr.

> +                     if (unlikely(blocked))
> +                             optimize_kprobe(blocked);
> +             }
> +
>               free_aggr_kprobe(&op->kp);
>       }
>  }
> @@ -1009,13 +1024,13 @@ static void __disarm_kprobe(struct kprobe *p, bool 
> reopt)
>               _p = get_optimized_kprobe(p->addr);
>               if (unlikely(_p) && reopt)
>                       optimize_kprobe(_p);
> +     } else if (reopt && kprobe_aggrprobe(p)) {

Here, the @p is queued. This means @p is an optprobe (and aggrprobe).

> +             struct optimized_kprobe *op =
> +                     container_of(p, struct optimized_kprobe, kp);
> +
> +             /* Defer the re-optimization until the worker finishes 
> disarming. */
> +             op->pending_reopt_addr = p->addr;

You should save the @reopt flag instead of addr.

Thank you,

>       }
> -     /*
> -      * TODO: Since unoptimization and real disarming will be done by
> -      * the worker thread, we can not check whether another probe are
> -      * unoptimized because of this probe here. It should be re-optimized
> -      * by the worker thread.
> -      */
>  }
>  
>  #else /* !CONFIG_OPTPROBES */
> -- 
> 2.47.2
> 


-- 
Masami Hiramatsu (Google) <[email protected]>

Reply via email to