On Fri, 2025-04-11 at 09:37 +0200, Nam Cao wrote:
> Now that there are 2 monitors for real-time applications, users may
> want to
> enable both of them simultaneously. Make the number of per-task
> monitor
> configurable. Default it to 2 for now.
> 
> Signed-off-by: Nam Cao <[email protected]>
> ---
>  include/linux/rv.h      | 2 +-
>  include/linux/sched.h   | 8 +++-----
>  kernel/trace/rv/Kconfig | 9 +++++++++
>  kernel/trace/rv/rv.c    | 8 ++++----
>  4 files changed, 17 insertions(+), 10 deletions(-)
> 
> diff --git a/include/linux/rv.h b/include/linux/rv.h
> index c8320fa3a94b..204436a73bee 100644
> --- a/include/linux/rv.h
> +++ b/include/linux/rv.h
> @@ -75,7 +75,7 @@ static inline bool rv_ltl_all_atoms_known(struct
> ltl_monitor *mon)
>   * these are justified.
>   */
>  #define RV_PER_TASK_MONITORS         1

We could get rid of RV_PER_TASK_MONITORS too I guess.

Rest looks good, thanks.

Reviewed-by: Gabriele Monaco <[email protected]>

> -#define RV_PER_TASK_MONITOR_INIT     (RV_PER_TASK_MONITORS)
> +#define
> RV_PER_TASK_MONITOR_INIT      (CONFIG_RV_PER_TASK_MONITORS)
>  
>  union rv_task_monitor {
>       struct da_monitor       da_mon;
> diff --git a/include/linux/sched.h b/include/linux/sched.h
> index 45be0fa7a5cc..560782493292 100644
> --- a/include/linux/sched.h
> +++ b/include/linux/sched.h
> @@ -1623,12 +1623,10 @@ struct task_struct {
>  
>  #ifdef CONFIG_RV
>       /*
> -      * Per-task RV monitor. Nowadays fixed in
> RV_PER_TASK_MONITORS.
> -      * If we find justification for more monitors, we can think
> -      * about adding more or developing a dynamic method. So far,
> -      * none of these are justified.
> +      * Per-task RV monitor, fixed in
> CONFIG_RV_PER_TASK_MONITORS.
> +      * If memory becomes a concern, we can think about a dynamic
> method.
>        */
> -     union rv_task_monitor           rv[RV_PER_TASK_MONITORS];
> +     union
> rv_task_monitor               rv[CONFIG_RV_PER_TASK_MONITORS];
>  #endif
>  
>  #ifdef CONFIG_USER_EVENTS
> diff --git a/kernel/trace/rv/Kconfig b/kernel/trace/rv/Kconfig
> index 942d57575e67..c11bf7e61ebf 100644
> --- a/kernel/trace/rv/Kconfig
> +++ b/kernel/trace/rv/Kconfig
> @@ -32,6 +32,15 @@ menuconfig RV
>         For further information, see:
>           Documentation/trace/rv/runtime-verification.rst
>  
> +config RV_PER_TASK_MONITORS
> +     int "Maximum number of per-task monitor"
> +     depends on RV
> +     range 1 8
> +     default 2
> +     help
> +       This option configures the maximum number of per-task RV
> monitors that can run
> +       simultaneously.
> +
>  source "kernel/trace/rv/monitors/wip/Kconfig"
>  source "kernel/trace/rv/monitors/wwnr/Kconfig"
>  source "kernel/trace/rv/monitors/sched/Kconfig"
> diff --git a/kernel/trace/rv/rv.c b/kernel/trace/rv/rv.c
> index d493fddf411f..ebd4b4b228bf 100644
> --- a/kernel/trace/rv/rv.c
> +++ b/kernel/trace/rv/rv.c
> @@ -165,7 +165,7 @@ struct dentry *get_monitors_root(void)
>  LIST_HEAD(rv_monitors_list);
>  
>  static int task_monitor_count;
> -static bool task_monitor_slots[RV_PER_TASK_MONITORS];
> +static bool task_monitor_slots[CONFIG_RV_PER_TASK_MONITORS];
>  
>  int rv_get_task_monitor_slot(void)
>  {
> @@ -173,12 +173,12 @@ int rv_get_task_monitor_slot(void)
>  
>       lockdep_assert_held(&rv_interface_lock);
>  
> -     if (task_monitor_count == RV_PER_TASK_MONITORS)
> +     if (task_monitor_count == CONFIG_RV_PER_TASK_MONITORS)
>               return -EBUSY;
>  
>       task_monitor_count++;
>  
> -     for (i = 0; i < RV_PER_TASK_MONITORS; i++) {
> +     for (i = 0; i < CONFIG_RV_PER_TASK_MONITORS; i++) {
>               if (task_monitor_slots[i] == false) {
>                       task_monitor_slots[i] = true;
>                       return i;
> @@ -194,7 +194,7 @@ void rv_put_task_monitor_slot(int slot)
>  {
>       lockdep_assert_held(&rv_interface_lock);
>  
> -     if (slot < 0 || slot >= RV_PER_TASK_MONITORS) {
> +     if (slot < 0 || slot >= CONFIG_RV_PER_TASK_MONITORS) {
>               WARN_ONCE(1, "RV releasing an invalid slot!: %d\n",
> slot);
>               return;
>       }


Reply via email to