Hello,

On Mon, 11 May 2026, Marco Crivellari wrote:

> This patch continues the effort to refactor workqueue APIs, which has begun
> with the changes introducing new workqueues and a new alloc_workqueue flag:
> 
>    commit 128ea9f6ccfb ("workqueue: Add system_percpu_wq and system_dfl_wq")
>    commit 930c2ea566af ("workqueue: Add new WQ_PERCPU flag")
> 
> The point of the refactoring is to eventually alter the default behavior of
> workqueues to become unbound by default so that their workload placement is
> optimized by the scheduler.
> 
> Before that to happen, workqueue users must be converted to the better named
> new workqueues with no intended behaviour changes:
> 
>    system_wq -> system_percpu_wq
>    system_unbound_wq -> system_dfl_wq
> 
> This way the old obsolete workqueues (system_wq, system_unbound_wq) can be
> removed in the future.
> 
> Cc: Julian Anastasov <[email protected]>
> Cc: Pablo Neira Ayuso <[email protected]>
> Cc: Florian Westphal <[email protected]>
> Cc: Phil Sutter <[email protected]>
> Cc: [email protected]
> Cc: [email protected]
> Cc: [email protected]
> Link: https://lore.kernel.org/all/[email protected]/
> Suggested-by: Tejun Heo <[email protected]>
> Signed-off-by: Marco Crivellari <[email protected]>

        Sorry that such change was delayed but there were
many changes in IPVS for the last month. The last that may
delay this patch is:

v3 of "ipvs: avoid possible loop in ip_vs_dst_event on resizing"
https://lore.kernel.org/lvs-devel/[email protected]/T/#u

        May be we have to wait this change to reach net and
net-next. Also, we can reconsider which queue to use, these works
resize hash tables and call synchronize_rcu(), should we switch to
system_dfl_long_wq if such job is considered "long" ?

> ---
>  net/netfilter/ipvs/ip_vs_conn.c |  4 ++--
>  net/netfilter/ipvs/ip_vs_ctl.c  | 10 +++++-----
>  2 files changed, 7 insertions(+), 7 deletions(-)
> 
> diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
> index 9ea6b4fa78bf..2625c0379556 100644
> --- a/net/netfilter/ipvs/ip_vs_conn.c
> +++ b/net/netfilter/ipvs/ip_vs_conn.c
> @@ -285,7 +285,7 @@ static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
>       /* Schedule resizing if load increases */
>       if (atomic_read(&ipvs->conn_count) > t->u_thresh &&
>           !test_and_set_bit(IP_VS_WORK_CONN_RESIZE, &ipvs->work_flags))
> -             mod_delayed_work(system_unbound_wq, &ipvs->conn_resize_work, 0);
> +             mod_delayed_work(system_dfl_wq, &ipvs->conn_resize_work, 0);
>  
>       return ret;
>  }
> @@ -916,7 +916,7 @@ static void conn_resize_work_handler(struct work_struct 
> *work)
>  
>  out:
>       /* Monitor if we need to shrink table */
> -     queue_delayed_work(system_unbound_wq, &ipvs->conn_resize_work,
> +     queue_delayed_work(system_dfl_wq, &ipvs->conn_resize_work,
>                          more_work ? 1 : 2 * HZ);
>  }
>  
> diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
> index c7c7f6a7a9f6..f8fe1c8981d8 100644
> --- a/net/netfilter/ipvs/ip_vs_ctl.c
> +++ b/net/netfilter/ipvs/ip_vs_ctl.c
> @@ -800,7 +800,7 @@ static void svc_resize_work_handler(struct work_struct 
> *work)
>       if (!READ_ONCE(ipvs->enable) || !more_work ||
>           test_bit(IP_VS_WORK_SVC_NORESIZE, &ipvs->work_flags))
>               return;
> -     queue_delayed_work(system_unbound_wq, &ipvs->svc_resize_work, 1);
> +     queue_delayed_work(system_dfl_wq, &ipvs->svc_resize_work, 1);
>  }
>  
>  static inline void
> @@ -1833,7 +1833,7 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct 
> ip_vs_service_user_kern *u,
>       /* Schedule resize work */
>       if (t && ip_vs_get_num_services(ipvs) > t->u_thresh &&
>           !test_and_set_bit(IP_VS_WORK_SVC_RESIZE, &ipvs->work_flags))
> -             queue_delayed_work(system_unbound_wq, &ipvs->svc_resize_work,
> +             queue_delayed_work(system_dfl_wq, &ipvs->svc_resize_work,
>                                  1);
>  
>       *svc_p = svc;
> @@ -2078,7 +2078,7 @@ static int ip_vs_del_service(struct ip_vs_service *svc)
>       } else if (ns <= t->l_thresh &&
>                  !test_and_set_bit(IP_VS_WORK_SVC_RESIZE,
>                                    &ipvs->work_flags)) {
> -             queue_delayed_work(system_unbound_wq, &ipvs->svc_resize_work,
> +             queue_delayed_work(system_dfl_wq, &ipvs->svc_resize_work,
>                                  1);
>       }
>       return 0;
> @@ -2511,7 +2511,7 @@ static int ipvs_proc_conn_lfactor(const struct 
> ctl_table *table, int write,
>               } else {
>                       WRITE_ONCE(*valp, val);
>                       if (rcu_access_pointer(ipvs->conn_tab))
> -                             mod_delayed_work(system_unbound_wq,
> +                             mod_delayed_work(system_dfl_wq,
>                                                &ipvs->conn_resize_work, 0);
>               }
>       }
> @@ -2543,7 +2543,7 @@ static int ipvs_proc_svc_lfactor(const struct ctl_table 
> *table, int write,
>                           READ_ONCE(ipvs->enable) &&
>                           !test_bit(IP_VS_WORK_SVC_NORESIZE,
>                                     &ipvs->work_flags))
> -                             mod_delayed_work(system_unbound_wq,
> +                             mod_delayed_work(system_dfl_wq,
>                                                &ipvs->svc_resize_work, 0);
>                       mutex_unlock(&ipvs->service_mutex);
>               }
> -- 
> 2.54.0

Regards

--
Julian Anastasov <[email protected]>


Reply via email to