Re: [Cluster-devel] [PATCH v4 46/48] mm: shrinker: make memcg slab shrink lockless

2023-08-07 Thread Dave Chinner
On Mon, Aug 07, 2023 at 07:09:34PM +0800, Qi Zheng wrote:
> Like global slab shrink, this commit also uses refcount+RCU method to make
> memcg slab shrink lockless.

This patch does random code cleanups amongst the actual RCU changes.
Can you please move the cleanups to a spearate patch to reduce the
noise in this one?

> diff --git a/mm/shrinker.c b/mm/shrinker.c
> index d318f5621862..fee6f62904fb 100644
> --- a/mm/shrinker.c
> +++ b/mm/shrinker.c
> @@ -107,6 +107,12 @@ static struct shrinker_info 
> *shrinker_info_protected(struct mem_cgroup *memcg,
>lockdep_is_held(_rwsem));
>  }
>  
> +static struct shrinker_info *shrinker_info_rcu(struct mem_cgroup *memcg,
> +int nid)
> +{
> + return rcu_dereference(memcg->nodeinfo[nid]->shrinker_info);
> +}

This helper doesn't add value. It doesn't tell me that
rcu_read_lock() needs to be held when it is called, for one

>  static int expand_one_shrinker_info(struct mem_cgroup *memcg, int new_size,
>   int old_size, int new_nr_max)
>  {
> @@ -198,7 +204,7 @@ void set_shrinker_bit(struct mem_cgroup *memcg, int nid, 
> int shrinker_id)
>   struct shrinker_info_unit *unit;
>  
>   rcu_read_lock();
> - info = rcu_dereference(memcg->nodeinfo[nid]->shrinker_info);
> + info = shrinker_info_rcu(memcg, nid);

... whilst the original code here was obviously correct.

>   unit = info->unit[shriner_id_to_index(shrinker_id)];
>   if (!WARN_ON_ONCE(shrinker_id >= info->map_nr_max)) {
>   /* Pairs with smp mb in shrink_slab() */
> @@ -211,7 +217,7 @@ void set_shrinker_bit(struct mem_cgroup *memcg, int nid, 
> int shrinker_id)
>  
>  static DEFINE_IDR(shrinker_idr);
>  
> -static int prealloc_memcg_shrinker(struct shrinker *shrinker)
> +static int shrinker_memcg_alloc(struct shrinker *shrinker)

Cleanups in a separate patch.

> @@ -253,10 +258,15 @@ static long xchg_nr_deferred_memcg(int nid, struct 
> shrinker *shrinker,
>  {
>   struct shrinker_info *info;
>   struct shrinker_info_unit *unit;
> + long nr_deferred;
>  
> - info = shrinker_info_protected(memcg, nid);
> + rcu_read_lock();
> + info = shrinker_info_rcu(memcg, nid);
>   unit = info->unit[shriner_id_to_index(shrinker->id)];
> - return 
> atomic_long_xchg(>nr_deferred[shriner_id_to_offset(shrinker->id)], 0);
> + nr_deferred = 
> atomic_long_xchg(>nr_deferred[shriner_id_to_offset(shrinker->id)], 0);
> + rcu_read_unlock();
> +
> + return nr_deferred;
>  }

This adds two rcu_read_lock() sections to every call to
do_shrink_slab(). It's not at all clear ifrom any of the other code
that do_shrink_slab() now has internal rcu_read_lock() sections

> @@ -464,18 +480,23 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, 
> int nid,
>   if (!mem_cgroup_online(memcg))
>   return 0;
>  
> - if (!down_read_trylock(_rwsem))
> - return 0;
> -
> - info = shrinker_info_protected(memcg, nid);
> +again:
> + rcu_read_lock();
> + info = shrinker_info_rcu(memcg, nid);
>   if (unlikely(!info))
>   goto unlock;
>  
> - for (; index < shriner_id_to_index(info->map_nr_max); index++) {
> + if (index < shriner_id_to_index(info->map_nr_max)) {
>   struct shrinker_info_unit *unit;
>  
>   unit = info->unit[index];
>  
> + /*
> +  * The shrinker_info_unit will not be freed, so we can
> +  * safely release the RCU lock here.
> +  */
> + rcu_read_unlock();

Why - what guarantees that the shrinker_info_unit exists at this
point? We hold no reference to it, we hold no reference to any
shrinker, etc. What provides this existence guarantee?

> +
>   for_each_set_bit(offset, unit->map, SHRINKER_UNIT_BITS) {
>   struct shrink_control sc = {
>   .gfp_mask = gfp_mask,
> @@ -485,12 +506,14 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, 
> int nid,
>   struct shrinker *shrinker;
>   int shrinker_id = calc_shrinker_id(index, offset);
>  
> + rcu_read_lock();
>   shrinker = idr_find(_idr, shrinker_id);
> - if (unlikely(!shrinker || !(shrinker->flags & 
> SHRINKER_REGISTERED))) {
> - if (!shrinker)
> - clear_bit(offset, unit->map);
> + if (unlikely(!shrinker || !shrinker_try_get(shrinker))) 
> {
> + clear_bit(offset, unit->map);
> + rcu_read_unlock();
>   continue;
>   }
> + rcu_read_unlock();
>  
>   /* Call non-slab shrinkers even though kmem is disabled 
> 

Re: [Cluster-devel] [PATCH v4 06/48] binder: dynamically allocate the android-binder shrinker

2023-08-07 Thread Muchun Song



> On Aug 7, 2023, at 19:08, Qi Zheng  wrote:
> 
> Use new APIs to dynamically allocate the android-binder shrinker.
> 
> Signed-off-by: Qi Zheng 

Reviewed-by: Muchun Song 




Re: [Cluster-devel] [PATCH v4 19/48] rcu: dynamically allocate the rcu-kfree shrinker

2023-08-07 Thread Muchun Song



> On Aug 7, 2023, at 19:09, Qi Zheng  wrote:
> 
> Use new APIs to dynamically allocate the rcu-kfree shrinker.
> 
> Signed-off-by: Qi Zheng 

Reviewed-by: Muchun Song 




Re: [Cluster-devel] [PATCH v4 45/48] mm: shrinker: make global slab shrink lockless

2023-08-07 Thread Dave Chinner
On Mon, Aug 07, 2023 at 07:09:33PM +0800, Qi Zheng wrote:
> diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
> index eb342994675a..f06225f18531 100644
> --- a/include/linux/shrinker.h
> +++ b/include/linux/shrinker.h
> @@ -4,6 +4,8 @@
>  
>  #include 
>  #include 
> +#include 
> +#include 
>  
>  #define SHRINKER_UNIT_BITS   BITS_PER_LONG
>  
> @@ -87,6 +89,10 @@ struct shrinker {
>   int seeks;  /* seeks to recreate an obj */
>   unsigned flags;
>  
> + refcount_t refcount;
> + struct completion done;
> + struct rcu_head rcu;

Documentation, please. What does the refcount protect, what does the
completion provide, etc.

> +
>   void *private_data;
>  
>   /* These are for internal use */
> @@ -120,6 +126,17 @@ struct shrinker *shrinker_alloc(unsigned int flags, 
> const char *fmt, ...);
>  void shrinker_register(struct shrinker *shrinker);
>  void shrinker_free(struct shrinker *shrinker);
>  
> +static inline bool shrinker_try_get(struct shrinker *shrinker)
> +{
> + return refcount_inc_not_zero(>refcount);
> +}
> +
> +static inline void shrinker_put(struct shrinker *shrinker)
> +{
> + if (refcount_dec_and_test(>refcount))
> + complete(>done);
> +}
> +
>  #ifdef CONFIG_SHRINKER_DEBUG
>  extern int __printf(2, 3) shrinker_debugfs_rename(struct shrinker *shrinker,
> const char *fmt, ...);
> diff --git a/mm/shrinker.c b/mm/shrinker.c
> index 1911c06b8af5..d318f5621862 100644
> --- a/mm/shrinker.c
> +++ b/mm/shrinker.c
> @@ -2,6 +2,7 @@
>  #include 
>  #include 
>  #include 
> +#include 
>  #include 
>  
>  #include "internal.h"
> @@ -577,33 +578,42 @@ unsigned long shrink_slab(gfp_t gfp_mask, int nid, 
> struct mem_cgroup *memcg,
>   if (!mem_cgroup_disabled() && !mem_cgroup_is_root(memcg))
>   return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
>  
> - if (!down_read_trylock(_rwsem))
> - goto out;
> -
> - list_for_each_entry(shrinker, _list, list) {
> + rcu_read_lock();
> + list_for_each_entry_rcu(shrinker, _list, list) {
>   struct shrink_control sc = {
>   .gfp_mask = gfp_mask,
>   .nid = nid,
>   .memcg = memcg,
>   };
>  
> + if (!shrinker_try_get(shrinker))
> + continue;
> +
> + /*
> +  * We can safely unlock the RCU lock here since we already
> +  * hold the refcount of the shrinker.
> +  */
> + rcu_read_unlock();
> +
>   ret = do_shrink_slab(, shrinker, priority);
>   if (ret == SHRINK_EMPTY)
>   ret = 0;
>   freed += ret;
> +
>   /*
> -  * Bail out if someone want to register a new shrinker to
> -  * prevent the registration from being stalled for long periods
> -  * by parallel ongoing shrinking.
> +  * This shrinker may be deleted from shrinker_list and freed
> +  * after the shrinker_put() below, but this shrinker is still
> +  * used for the next traversal. So it is necessary to hold the
> +  * RCU lock first to prevent this shrinker from being freed,
> +  * which also ensures that the next shrinker that is traversed
> +  * will not be freed (even if it is deleted from shrinker_list
> +  * at the same time).
>*/

This needs to be moved to the head of the function, and document
the whole list walk, get, put and completion parts of the algorithm
that make it safe. There's more to this than "we hold a reference
count", especially the tricky "we might see the shrinker before it
is fully initialised" case


.
>  void shrinker_free(struct shrinker *shrinker)
>  {
>   struct dentry *debugfs_entry = NULL;
> @@ -686,9 +712,18 @@ void shrinker_free(struct shrinker *shrinker)
>   if (!shrinker)
>   return;
>  
> + if (shrinker->flags & SHRINKER_REGISTERED) {
> + shrinker_put(shrinker);
> + wait_for_completion(>done);
> + }

Needs a comment explaining why we need to wait here...
> +
>   down_write(_rwsem);
>   if (shrinker->flags & SHRINKER_REGISTERED) {
> - list_del(>list);
> + /*
> +  * Lookups on the shrinker are over and will fail in the future,
> +  * so we can now remove it from the lists and free it.
> +  */

 rather than here after the wait has been done and provided the
guarantee that no shrinker is running or will run again...

-Dave.
-- 
Dave Chinner
da...@fromorbit.com



Re: [Cluster-devel] [PATCH v4 44/48] mm: shrinker: add a secondary array for shrinker_info::{map, nr_deferred}

2023-08-07 Thread Dave Chinner
On Mon, Aug 07, 2023 at 07:09:32PM +0800, Qi Zheng wrote:
> Currently, we maintain two linear arrays per node per memcg, which are
> shrinker_info::map and shrinker_info::nr_deferred. And we need to resize
> them when the shrinker_nr_max is exceeded, that is, allocate a new array,
> and then copy the old array to the new array, and finally free the old
> array by RCU.
> 
> For shrinker_info::map, we do set_bit() under the RCU lock, so we may set
> the value into the old map which is about to be freed. This may cause the
> value set to be lost. The current solution is not to copy the old map when
> resizing, but to set all the corresponding bits in the new map to 1. This
> solves the data loss problem, but bring the overhead of more pointless
> loops while doing memcg slab shrink.
> 
> For shrinker_info::nr_deferred, we will only modify it under the read lock
> of shrinker_rwsem, so it will not run concurrently with the resizing. But
> after we make memcg slab shrink lockless, there will be the same data loss
> problem as shrinker_info::map, and we can't work around it like the map.
> 
> For such resizable arrays, the most straightforward idea is to change it
> to xarray, like we did for list_lru [1]. We need to do xa_store() in the
> list_lru_add()-->set_shrinker_bit(), but this will cause memory
> allocation, and the list_lru_add() doesn't accept failure. A possible
> solution is to pre-allocate, but the location of pre-allocation is not
> well determined.

So you implemented a two level array that preallocates leaf
nodes to work around it? It's remarkable complex for what it does,
I can't help but think a radix tree using a special holder for
nr_deferred values of zero would end up being simpler...

> Therefore, this commit chooses to introduce a secondary array for
> shrinker_info::{map, nr_deferred}, so that we only need to copy this
> secondary array every time the size is resized. Then even if we get the
> old secondary array under the RCU lock, the found map and nr_deferred are
> also true, so no data is lost.

I don't understand what you are trying to describe here. If we get
the old array, then don't we get either a stale nr_deferred value,
or the update we do gets lost because the next shrinker lookup will
find the new array and os the deferred value stored to the old one
is never seen again?

> 
> [1]. 
> https://lore.kernel.org/all/20220228122126.37293-13-songmuc...@bytedance.com/
> 
> Signed-off-by: Qi Zheng 
> Reviewed-by: Muchun Song 
> ---
.
> diff --git a/mm/shrinker.c b/mm/shrinker.c
> index a27779ed3798..1911c06b8af5 100644
> --- a/mm/shrinker.c
> +++ b/mm/shrinker.c
> @@ -12,15 +12,50 @@ DECLARE_RWSEM(shrinker_rwsem);
>  #ifdef CONFIG_MEMCG
>  static int shrinker_nr_max;
>  
> -/* The shrinker_info is expanded in a batch of BITS_PER_LONG */
> -static inline int shrinker_map_size(int nr_items)
> +static inline int shrinker_unit_size(int nr_items)
>  {
> - return (DIV_ROUND_UP(nr_items, BITS_PER_LONG) * sizeof(unsigned long));
> + return (DIV_ROUND_UP(nr_items, SHRINKER_UNIT_BITS) * sizeof(struct 
> shrinker_info_unit *));
>  }
>  
> -static inline int shrinker_defer_size(int nr_items)
> +static inline void shrinker_unit_free(struct shrinker_info *info, int start)
>  {
> - return (round_up(nr_items, BITS_PER_LONG) * sizeof(atomic_long_t));
> + struct shrinker_info_unit **unit;
> + int nr, i;
> +
> + if (!info)
> + return;
> +
> + unit = info->unit;
> + nr = DIV_ROUND_UP(info->map_nr_max, SHRINKER_UNIT_BITS);
> +
> + for (i = start; i < nr; i++) {
> + if (!unit[i])
> + break;
> +
> + kvfree(unit[i]);
> + unit[i] = NULL;
> + }
> +}
> +
> +static inline int shrinker_unit_alloc(struct shrinker_info *new,
> +struct shrinker_info *old, int nid)
> +{
> + struct shrinker_info_unit *unit;
> + int nr = DIV_ROUND_UP(new->map_nr_max, SHRINKER_UNIT_BITS);
> + int start = old ? DIV_ROUND_UP(old->map_nr_max, SHRINKER_UNIT_BITS) : 0;
> + int i;
> +
> + for (i = start; i < nr; i++) {
> + unit = kvzalloc_node(sizeof(*unit), GFP_KERNEL, nid);

A unit is 576 bytes. Why is this using kvzalloc_node()?

> + if (!unit) {
> + shrinker_unit_free(new, start);
> + return -ENOMEM;
> + }
> +
> + new->unit[i] = unit;
> + }
> +
> + return 0;
>  }
>  
>  void free_shrinker_info(struct mem_cgroup *memcg)
> @@ -32,6 +67,7 @@ void free_shrinker_info(struct mem_cgroup *memcg)
>   for_each_node(nid) {
>   pn = memcg->nodeinfo[nid];
>   info = rcu_dereference_protected(pn->shrinker_info, true);
> + shrinker_unit_free(info, 0);
>   kvfree(info);
>   rcu_assign_pointer(pn->shrinker_info, NULL);
>   }

Why is this safe? The info and maps are looked up by RCU, so why is
freeing them without a RCU grace 

Re: [Cluster-devel] [PATCH v4 45/48] mm: shrinker: make global slab shrink lockless

2023-08-07 Thread Dave Chinner
On Mon, Aug 07, 2023 at 07:09:33PM +0800, Qi Zheng wrote:
> The shrinker_rwsem is a global read-write lock in shrinkers subsystem,
> which protects most operations such as slab shrink, registration and
> unregistration of shrinkers, etc. This can easily cause problems in the
> following cases.

> This commit uses the refcount+RCU method [5] proposed by Dave Chinner
> to re-implement the lockless global slab shrink. The memcg slab shrink is
> handled in the subsequent patch.

> ---
>  include/linux/shrinker.h | 17 ++
>  mm/shrinker.c| 70 +---
>  2 files changed, 68 insertions(+), 19 deletions(-)

There's no documentation in the code explaining how the lockless
shrinker algorithm works. It's left to the reader to work out how
this all goes together

> diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
> index eb342994675a..f06225f18531 100644
> --- a/include/linux/shrinker.h
> +++ b/include/linux/shrinker.h
> @@ -4,6 +4,8 @@
>  
>  #include 
>  #include 
> +#include 
> +#include 
>  
>  #define SHRINKER_UNIT_BITS   BITS_PER_LONG
>  
> @@ -87,6 +89,10 @@ struct shrinker {
>   int seeks;  /* seeks to recreate an obj */
>   unsigned flags;
>  
> + refcount_t refcount;
> + struct completion done;
> + struct rcu_head rcu;

What does the refcount protect, why do we need the completion, etc?

> +
>   void *private_data;
>  
>   /* These are for internal use */
> @@ -120,6 +126,17 @@ struct shrinker *shrinker_alloc(unsigned int flags, 
> const char *fmt, ...);
>  void shrinker_register(struct shrinker *shrinker);
>  void shrinker_free(struct shrinker *shrinker);
>  
> +static inline bool shrinker_try_get(struct shrinker *shrinker)
> +{
> + return refcount_inc_not_zero(>refcount);
> +}
> +
> +static inline void shrinker_put(struct shrinker *shrinker)
> +{
> + if (refcount_dec_and_test(>refcount))
> + complete(>done);
> +}
> +
>  #ifdef CONFIG_SHRINKER_DEBUG
>  extern int __printf(2, 3) shrinker_debugfs_rename(struct shrinker *shrinker,
> const char *fmt, ...);
> diff --git a/mm/shrinker.c b/mm/shrinker.c
> index 1911c06b8af5..d318f5621862 100644
> --- a/mm/shrinker.c
> +++ b/mm/shrinker.c
> @@ -2,6 +2,7 @@
>  #include 
>  #include 
>  #include 
> +#include 
>  #include 
>  
>  #include "internal.h"
> @@ -577,33 +578,42 @@ unsigned long shrink_slab(gfp_t gfp_mask, int nid, 
> struct mem_cgroup *memcg,
>   if (!mem_cgroup_disabled() && !mem_cgroup_is_root(memcg))
>   return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
>  
> - if (!down_read_trylock(_rwsem))
> - goto out;
> -
> - list_for_each_entry(shrinker, _list, list) {
> + rcu_read_lock();
> + list_for_each_entry_rcu(shrinker, _list, list) {
>   struct shrink_control sc = {
>   .gfp_mask = gfp_mask,
>   .nid = nid,
>   .memcg = memcg,
>   };
>  
> + if (!shrinker_try_get(shrinker))
> + continue;
> +
> + /*
> +  * We can safely unlock the RCU lock here since we already
> +  * hold the refcount of the shrinker.
> +  */
> + rcu_read_unlock();
> +
>   ret = do_shrink_slab(, shrinker, priority);
>   if (ret == SHRINK_EMPTY)
>   ret = 0;
>   freed += ret;
> +
>   /*
> -  * Bail out if someone want to register a new shrinker to
> -  * prevent the registration from being stalled for long periods
> -  * by parallel ongoing shrinking.
> +  * This shrinker may be deleted from shrinker_list and freed
> +  * after the shrinker_put() below, but this shrinker is still
> +  * used for the next traversal. So it is necessary to hold the
> +  * RCU lock first to prevent this shrinker from being freed,
> +  * which also ensures that the next shrinker that is traversed
> +  * will not be freed (even if it is deleted from shrinker_list
> +  * at the same time).
>*/

This comment really should be at the head of the function,
describing the algorithm used within the function itself. i.e. how
reference counts are used w.r.t. the rcu_read_lock() usage to
guarantee existence of the shrinker and the validity of the list
walk.

I'm not going to remember all these little details when I look at
this code in another 6 months time, and having to work it out from
first principles every time I look at the code will waste of a lot
of time...

-Dave.
-- 
Dave Chinner
da...@fromorbit.com



[Cluster-devel] [PATCH v7 09/13] fs: add infrastructure for multigrain timestamps

2023-08-07 Thread Jeff Layton
The VFS always uses coarse-grained timestamps when updating the ctime
and mtime after a change. This has the benefit of allowing filesystems
to optimize away a lot metadata updates, down to around 1 per jiffy,
even when a file is under heavy writes.

Unfortunately, this has always been an issue when we're exporting via
NFSv3, which relies on timestamps to validate caches. A lot of changes
can happen in a jiffy, so timestamps aren't sufficient to help the
client decide to invalidate the cache. Even with NFSv4, a lot of
exported filesystems don't properly support a change attribute and are
subject to the same problems with timestamp granularity. Other
applications have similar issues with timestamps (e.g backup
applications).

If we were to always use fine-grained timestamps, that would improve the
situation, but that becomes rather expensive, as the underlying
filesystem would have to log a lot more metadata updates.

What we need is a way to only use fine-grained timestamps when they are
being actively queried.

POSIX generally mandates that when the the mtime changes, the ctime must
also change. The kernel always stores normalized ctime values, so only
the first 30 bits of the tv_nsec field are ever used.

Use the 31st bit of the ctime tv_nsec field to indicate that something
has queried the inode for the mtime or ctime. When this flag is set,
on the next mtime or ctime update, the kernel will fetch a fine-grained
timestamp instead of the usual coarse-grained one.

Filesytems can opt into this behavior by setting the FS_MGTIME flag in
the fstype. Filesystems that don't set this flag will continue to use
coarse-grained timestamps.

Later patches will convert individual filesystems to use the new
infrastructure.

Signed-off-by: Jeff Layton 
---
 fs/inode.c | 82 --
 fs/stat.c  | 41 +--
 include/linux/fs.h | 46 --
 3 files changed, 162 insertions(+), 7 deletions(-)

diff --git a/fs/inode.c b/fs/inode.c
index e50d94a136fe..f55957ac80e6 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -2118,10 +2118,52 @@ int file_remove_privs(struct file *file)
 }
 EXPORT_SYMBOL(file_remove_privs);
 
+/**
+ * current_mgtime - Return FS time (possibly fine-grained)
+ * @inode: inode.
+ *
+ * Return the current time truncated to the time granularity supported by
+ * the fs, as suitable for a ctime/mtime change. If the ctime is flagged
+ * as having been QUERIED, get a fine-grained timestamp.
+ */
+struct timespec64 current_mgtime(struct inode *inode)
+{
+   struct timespec64 now, ctime;
+   atomic_long_t *pnsec = (atomic_long_t *)>__i_ctime.tv_nsec;
+   long nsec = atomic_long_read(pnsec);
+
+   if (nsec & I_CTIME_QUERIED) {
+   ktime_get_real_ts64();
+   return timestamp_truncate(now, inode);
+   }
+
+   ktime_get_coarse_real_ts64();
+   now = timestamp_truncate(now, inode);
+
+   /*
+* If we've recently fetched a fine-grained timestamp
+* then the coarse-grained one may still be earlier than the
+* existing ctime. Just keep the existing value if so.
+*/
+   ctime = inode_get_ctime(inode);
+   if (timespec64_compare(, ) > 0)
+   now = ctime;
+
+   return now;
+}
+EXPORT_SYMBOL(current_mgtime);
+
+static struct timespec64 current_ctime(struct inode *inode)
+{
+   if (is_mgtime(inode))
+   return current_mgtime(inode);
+   return current_time(inode);
+}
+
 static int inode_needs_update_time(struct inode *inode)
 {
int sync_it = 0;
-   struct timespec64 now = current_time(inode);
+   struct timespec64 now = current_ctime(inode);
struct timespec64 ctime;
 
/* First try to exhaust all avenues to not sync */
@@ -2552,9 +2594,43 @@ EXPORT_SYMBOL(current_time);
  */
 struct timespec64 inode_set_ctime_current(struct inode *inode)
 {
-   struct timespec64 now = current_time(inode);
+   struct timespec64 now;
+   struct timespec64 ctime;
+
+   ctime.tv_nsec = READ_ONCE(inode->__i_ctime.tv_nsec);
+   if (!(ctime.tv_nsec & I_CTIME_QUERIED)) {
+   now = current_time(inode);
 
-   inode_set_ctime(inode, now.tv_sec, now.tv_nsec);
+   /* Just copy it into place if it's not multigrain */
+   if (!is_mgtime(inode)) {
+   inode_set_ctime_to_ts(inode, now);
+   return now;
+   }
+
+   /*
+* If we've recently updated with a fine-grained timestamp,
+* then the coarse-grained one may still be earlier than the
+* existing ctime. Just keep the existing value if so.
+*/
+   ctime.tv_sec = inode->__i_ctime.tv_sec;
+   if (timespec64_compare(, ) > 0)
+   return ctime;
+
+   /*
+* Ctime updates are usually protected by the 

[Cluster-devel] [PATCH v7 12/13] ext4: switch to multigrain timestamps

2023-08-07 Thread Jeff Layton
Enable multigrain timestamps, which should ensure that there is an
apparent change to the timestamp whenever it has been written after
being actively observed via getattr.

For ext4, we only need to enable the FS_MGTIME flag.

Acked-by: Theodore Ts'o 
Reviewed-by: Jan Kara 
Signed-off-by: Jeff Layton 
---
 fs/ext4/super.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index b54c70e1a74e..cb1ff47af156 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -7279,7 +7279,7 @@ static struct file_system_type ext4_fs_type = {
.init_fs_context= ext4_init_fs_context,
.parameters = ext4_param_specs,
.kill_sb= kill_block_super,
-   .fs_flags   = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
+   .fs_flags   = FS_REQUIRES_DEV | FS_ALLOW_IDMAP | FS_MGTIME,
 };
 MODULE_ALIAS_FS("ext4");
 

-- 
2.41.0



[Cluster-devel] [PATCH v7 13/13] btrfs: convert to multigrain timestamps

2023-08-07 Thread Jeff Layton
Enable multigrain timestamps, which should ensure that there is an
apparent change to the timestamp whenever it has been written after
being actively observed via getattr.

Beyond enabling the FS_MGTIME flag, this patch eliminates
update_time_for_write, which goes to great pains to avoid in-memory
stores. Just have it overwrite the timestamps unconditionally.

Signed-off-by: Jeff Layton 
Acked-by: David Sterba 
---
 fs/btrfs/file.c  | 24 
 fs/btrfs/super.c |  5 +++--
 2 files changed, 7 insertions(+), 22 deletions(-)

diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index d7a9ece7a40b..b9e75c9f95ac 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1106,25 +1106,6 @@ void btrfs_check_nocow_unlock(struct btrfs_inode *inode)
btrfs_drew_write_unlock(>root->snapshot_lock);
 }
 
-static void update_time_for_write(struct inode *inode)
-{
-   struct timespec64 now, ctime;
-
-   if (IS_NOCMTIME(inode))
-   return;
-
-   now = current_time(inode);
-   if (!timespec64_equal(>i_mtime, ))
-   inode->i_mtime = now;
-
-   ctime = inode_get_ctime(inode);
-   if (!timespec64_equal(, ))
-   inode_set_ctime_to_ts(inode, now);
-
-   if (IS_I_VERSION(inode))
-   inode_inc_iversion(inode);
-}
-
 static int btrfs_write_check(struct kiocb *iocb, struct iov_iter *from,
 size_t count)
 {
@@ -1156,7 +1137,10 @@ static int btrfs_write_check(struct kiocb *iocb, struct 
iov_iter *from,
 * need to start yet another transaction to update the inode as we will
 * update the inode when we finish writing whatever data we write.
 */
-   update_time_for_write(inode);
+   if (!IS_NOCMTIME(inode)) {
+   inode->i_mtime = inode_set_ctime_current(inode);
+   inode_inc_iversion(inode);
+   }
 
start_pos = round_down(pos, fs_info->sectorsize);
oldsize = i_size_read(inode);
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index f1dd172d8d5b..8eda51b095c9 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -2144,7 +2144,7 @@ static struct file_system_type btrfs_fs_type = {
.name   = "btrfs",
.mount  = btrfs_mount,
.kill_sb= btrfs_kill_super,
-   .fs_flags   = FS_REQUIRES_DEV | FS_BINARY_MOUNTDATA,
+   .fs_flags   = FS_REQUIRES_DEV | FS_BINARY_MOUNTDATA | FS_MGTIME,
 };
 
 static struct file_system_type btrfs_root_fs_type = {
@@ -2152,7 +2152,8 @@ static struct file_system_type btrfs_root_fs_type = {
.name   = "btrfs",
.mount  = btrfs_mount_root,
.kill_sb= btrfs_kill_super,
-   .fs_flags   = FS_REQUIRES_DEV | FS_BINARY_MOUNTDATA | 
FS_ALLOW_IDMAP,
+   .fs_flags   = FS_REQUIRES_DEV | FS_BINARY_MOUNTDATA |
+ FS_ALLOW_IDMAP | FS_MGTIME,
 };
 
 MODULE_ALIAS_FS("btrfs");

-- 
2.41.0



[Cluster-devel] [PATCH v7 02/13] fs: pass the request_mask to generic_fillattr

2023-08-07 Thread Jeff Layton
generic_fillattr just fills in the entire stat struct indiscriminately
today, copying data from the inode. There is at least one attribute
(STATX_CHANGE_COOKIE) that can have side effects when it is reported,
and we're looking at adding more with the addition of multigrain
timestamps.

Add a request_mask argument to generic_fillattr and have most callers
just pass in the value that is passed to getattr. Have other callers
(e.g. ksmbd) just pass in STATX_BASIC_STATS. Also move the setting of
STATX_CHANGE_COOKIE into generic_fillattr.

Acked-by: Joseph Qi 
Reviewed-by: Xiubo Li 
Reviewed-by: "Paulo Alcantara (SUSE)" 
Reviewed-by: Jan Kara 
Signed-off-by: Jeff Layton 
---
 fs/9p/vfs_inode.c   |  4 ++--
 fs/9p/vfs_inode_dotl.c  |  4 ++--
 fs/afs/inode.c  |  2 +-
 fs/btrfs/inode.c|  2 +-
 fs/ceph/inode.c |  2 +-
 fs/coda/inode.c |  3 ++-
 fs/ecryptfs/inode.c |  5 +++--
 fs/erofs/inode.c|  2 +-
 fs/exfat/file.c |  2 +-
 fs/ext2/inode.c |  2 +-
 fs/ext4/inode.c |  2 +-
 fs/f2fs/file.c  |  2 +-
 fs/fat/file.c   |  2 +-
 fs/fuse/dir.c   |  2 +-
 fs/gfs2/inode.c |  2 +-
 fs/hfsplus/inode.c  |  2 +-
 fs/kernfs/inode.c   |  2 +-
 fs/libfs.c  |  4 ++--
 fs/minix/inode.c|  2 +-
 fs/nfs/inode.c  |  2 +-
 fs/nfs/namespace.c  |  3 ++-
 fs/ntfs3/file.c |  2 +-
 fs/ocfs2/file.c |  2 +-
 fs/orangefs/inode.c |  2 +-
 fs/proc/base.c  |  4 ++--
 fs/proc/fd.c|  2 +-
 fs/proc/generic.c   |  2 +-
 fs/proc/proc_net.c  |  2 +-
 fs/proc/proc_sysctl.c   |  2 +-
 fs/proc/root.c  |  3 ++-
 fs/smb/client/inode.c   |  2 +-
 fs/smb/server/smb2pdu.c | 22 +++---
 fs/smb/server/vfs.c |  3 ++-
 fs/stat.c   | 24 +---
 fs/sysv/itree.c |  3 ++-
 fs/ubifs/dir.c  |  2 +-
 fs/udf/symlink.c|  2 +-
 fs/vboxsf/utils.c   |  2 +-
 include/linux/fs.h  |  2 +-
 mm/shmem.c  |  2 +-
 40 files changed, 73 insertions(+), 65 deletions(-)

diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 16d85e6033a3..d24d1f20e922 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -1016,7 +1016,7 @@ v9fs_vfs_getattr(struct mnt_idmap *idmap, const struct 
path *path,
p9_debug(P9_DEBUG_VFS, "dentry: %p\n", dentry);
v9ses = v9fs_dentry2v9ses(dentry);
if (v9ses->cache & (CACHE_META|CACHE_LOOSE)) {
-   generic_fillattr(_mnt_idmap, inode, stat);
+   generic_fillattr(_mnt_idmap, request_mask, inode, stat);
return 0;
} else if (v9ses->cache & CACHE_WRITEBACK) {
if (S_ISREG(inode->i_mode)) {
@@ -1037,7 +1037,7 @@ v9fs_vfs_getattr(struct mnt_idmap *idmap, const struct 
path *path,
return PTR_ERR(st);
 
v9fs_stat2inode(st, d_inode(dentry), dentry->d_sb, 0);
-   generic_fillattr(_mnt_idmap, d_inode(dentry), stat);
+   generic_fillattr(_mnt_idmap, request_mask, d_inode(dentry), stat);
 
p9stat_free(st);
kfree(st);
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index 464ea73d1bf8..8e8d5d2a13d8 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -451,7 +451,7 @@ v9fs_vfs_getattr_dotl(struct mnt_idmap *idmap,
p9_debug(P9_DEBUG_VFS, "dentry: %p\n", dentry);
v9ses = v9fs_dentry2v9ses(dentry);
if (v9ses->cache & (CACHE_META|CACHE_LOOSE)) {
-   generic_fillattr(_mnt_idmap, inode, stat);
+   generic_fillattr(_mnt_idmap, request_mask, inode, stat);
return 0;
} else if (v9ses->cache) {
if (S_ISREG(inode->i_mode)) {
@@ -476,7 +476,7 @@ v9fs_vfs_getattr_dotl(struct mnt_idmap *idmap,
return PTR_ERR(st);
 
v9fs_stat2inode_dotl(st, d_inode(dentry), 0);
-   generic_fillattr(_mnt_idmap, d_inode(dentry), stat);
+   generic_fillattr(_mnt_idmap, request_mask, d_inode(dentry), stat);
/* Change block size to what the server returned */
stat->blksize = st->st_blksize;
 
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 6b636f43f548..1c794a1896aa 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -773,7 +773,7 @@ int afs_getattr(struct mnt_idmap *idmap, const struct path 
*path,
 
do {
read_seqbegin_or_lock(>cb_lock, );
-   generic_fillattr(_mnt_idmap, inode, stat);
+   generic_fillattr(_mnt_idmap, request_mask, inode, stat);
if (test_bit(AFS_VNODE_SILLY_DELETED, >flags) &&
stat->nlink > 0)
stat->nlink -= 1;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index ceac62c1cbfc..29a20f828dda 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -8746,7 +8746,7 @@ static int btrfs_getattr(struct mnt_idmap *idmap,
  STATX_ATTR_IMMUTABLE |
  

[Cluster-devel] [PATCH v7 08/13] fs: drop the timespec64 argument from update_time

2023-08-07 Thread Jeff Layton
Now that all of the update_time operations are prepared for it, we can
drop the timespec64 argument from the update_time operation. Do that and
remove it from some associated functions like inode_update_time and
inode_needs_update_time.

Signed-off-by: Jeff Layton 
---
 fs/bad_inode.c   |  3 +--
 fs/btrfs/inode.c |  3 +--
 fs/btrfs/volumes.c   |  4 +---
 fs/fat/fat.h |  3 +--
 fs/fat/misc.c|  2 +-
 fs/gfs2/inode.c  |  3 +--
 fs/inode.c   | 30 +-
 fs/overlayfs/inode.c |  2 +-
 fs/overlayfs/overlayfs.h |  2 +-
 fs/ubifs/file.c  |  3 +--
 fs/ubifs/ubifs.h |  2 +-
 fs/xfs/xfs_iops.c|  1 -
 include/linux/fs.h   |  4 ++--
 13 files changed, 25 insertions(+), 37 deletions(-)

diff --git a/fs/bad_inode.c b/fs/bad_inode.c
index 6e21f7412a85..83f9566c973b 100644
--- a/fs/bad_inode.c
+++ b/fs/bad_inode.c
@@ -133,8 +133,7 @@ static int bad_inode_fiemap(struct inode *inode,
return -EIO;
 }
 
-static int bad_inode_update_time(struct inode *inode, struct timespec64 *time,
-int flags)
+static int bad_inode_update_time(struct inode *inode, int flags)
 {
return -EIO;
 }
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index d52e7d64570a..0964c66411a1 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -6059,8 +6059,7 @@ static int btrfs_dirty_inode(struct btrfs_inode *inode)
  * This is a copy of file_update_time.  We need this so we can return error on
  * ENOSPC for updating the inode in the case of file write and mmap writes.
  */
-static int btrfs_update_time(struct inode *inode, struct timespec64 *now,
-int flags)
+static int btrfs_update_time(struct inode *inode, int flags)
 {
struct btrfs_root *root = BTRFS_I(inode)->root;
bool dirty = flags & ~S_VERSION;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 73f9ea7672db..264c71590370 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1917,15 +1917,13 @@ static int btrfs_add_dev_item(struct btrfs_trans_handle 
*trans,
 static void update_dev_time(const char *device_path)
 {
struct path path;
-   struct timespec64 now;
int ret;
 
ret = kern_path(device_path, LOOKUP_FOLLOW, );
if (ret)
return;
 
-   now = current_time(d_inode(path.dentry));
-   inode_update_time(d_inode(path.dentry), , S_MTIME | S_CTIME | 
S_VERSION);
+   inode_update_time(d_inode(path.dentry), S_MTIME | S_CTIME | S_VERSION);
path_put();
 }
 
diff --git a/fs/fat/fat.h b/fs/fat/fat.h
index e3b690b48e3e..66cf4778cf3b 100644
--- a/fs/fat/fat.h
+++ b/fs/fat/fat.h
@@ -460,8 +460,7 @@ extern struct timespec64 fat_truncate_mtime(const struct 
msdos_sb_info *sbi,
const struct timespec64 *ts);
 extern int fat_truncate_time(struct inode *inode, struct timespec64 *now,
 int flags);
-extern int fat_update_time(struct inode *inode, struct timespec64 *now,
-  int flags);
+extern int fat_update_time(struct inode *inode, int flags);
 extern int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs);
 
 int fat_cache_init(void);
diff --git a/fs/fat/misc.c b/fs/fat/misc.c
index 8cab87145d63..080a5035483f 100644
--- a/fs/fat/misc.c
+++ b/fs/fat/misc.c
@@ -339,7 +339,7 @@ int fat_truncate_time(struct inode *inode, struct 
timespec64 *now, int flags)
 }
 EXPORT_SYMBOL_GPL(fat_truncate_time);
 
-int fat_update_time(struct inode *inode, struct timespec64 *now, int flags)
+int fat_update_time(struct inode *inode, int flags)
 {
int dirty_flags = 0;
 
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index f1f04557aa21..a21ac41d6669 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -2139,8 +2139,7 @@ loff_t gfs2_seek_hole(struct file *file, loff_t offset)
return vfs_setpos(file, ret, inode->i_sb->s_maxbytes);
 }
 
-static int gfs2_update_time(struct inode *inode, struct timespec64 *time,
-   int flags)
+static int gfs2_update_time(struct inode *inode, int flags)
 {
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_glock *gl = ip->i_gl;
diff --git a/fs/inode.c b/fs/inode.c
index e07e45f6cd01..e50d94a136fe 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -1958,10 +1958,10 @@ EXPORT_SYMBOL(generic_update_time);
  * This does the actual work of updating an inodes time or version.  Must have
  * had called mnt_want_write() before calling this.
  */
-int inode_update_time(struct inode *inode, struct timespec64 *time, int flags)
+int inode_update_time(struct inode *inode, int flags)
 {
if (inode->i_op->update_time)
-   return inode->i_op->update_time(inode, time, flags);
+   return inode->i_op->update_time(inode, flags);
generic_update_time(inode, flags);
return 0;
 }
@@ -2015,7 +2015,6 @@ void touch_atime(const struct path *path)
 {

[Cluster-devel] [PATCH v7 03/13] fs: drop the timespec64 arg from generic_update_time

2023-08-07 Thread Jeff Layton
In future patches we're going to change how the ctime is updated
to keep track of when it has been queried. The way that the update_time
operation works (and a lot of its callers) make this difficult, since
they grab a timestamp early and then pass it down to eventually be
copied into the inode.

All of the existing update_time callers pass in the result of
current_time() in some fashion. Drop the "time" parameter from
generic_update_time, and rework it to fetch its own timestamp.

This change means that an update_time could fetch a different timestamp
than was seen in inode_needs_update_time. update_time is only ever
called with one of two flag combinations: Either S_ATIME is set, or
S_MTIME|S_CTIME|S_VERSION are set.

With this change we now treat the flags argument as an indicator that
some value needed to be updated when last checked, rather than an
indication to update specific timestamps.

Rework the logic for updating the timestamps and put it in a new
inode_update_timestamps helper that other update_time routines can use.
S_ATIME is as treated as we always have, but if any of the other three
are set, then we attempt to update all three.

Also, some callers of generic_update_time need to know what timestamps
were actually updated. Change it to return an S_* flag mask to indicate
that and rework the callers to expect it.

Signed-off-by: Jeff Layton 
---
 fs/gfs2/inode.c |  3 +-
 fs/inode.c  | 84 +
 fs/orangefs/inode.c |  3 +-
 fs/ubifs/file.c |  6 ++--
 fs/xfs/xfs_iops.c   |  6 ++--
 include/linux/fs.h  |  3 +-
 6 files changed, 80 insertions(+), 25 deletions(-)

diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 200cabf3b393..f1f04557aa21 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -2155,7 +2155,8 @@ static int gfs2_update_time(struct inode *inode, struct 
timespec64 *time,
if (error)
return error;
}
-   return generic_update_time(inode, time, flags);
+   generic_update_time(inode, flags);
+   return 0;
 }
 
 static const struct inode_operations gfs2_file_iops = {
diff --git a/fs/inode.c b/fs/inode.c
index 3fc251bfaf73..e07e45f6cd01 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -1881,29 +1881,76 @@ static int relatime_need_update(struct vfsmount *mnt, 
struct inode *inode,
return 0;
 }
 
-int generic_update_time(struct inode *inode, struct timespec64 *time, int 
flags)
+/**
+ * inode_update_timestamps - update the timestamps on the inode
+ * @inode: inode to be updated
+ * @flags: S_* flags that needed to be updated
+ *
+ * The update_time function is called when an inode's timestamps need to be
+ * updated for a read or write operation. This function handles updating the
+ * actual timestamps. It's up to the caller to ensure that the inode is marked
+ * dirty appropriately.
+ *
+ * In the case where any of S_MTIME, S_CTIME, or S_VERSION need to be updated,
+ * attempt to update all three of them. S_ATIME updates can be handled
+ * independently of the rest.
+ *
+ * Returns a set of S_* flags indicating which values changed.
+ */
+int inode_update_timestamps(struct inode *inode, int flags)
 {
-   int dirty_flags = 0;
+   int updated = 0;
+   struct timespec64 now;
+
+   if (flags & (S_MTIME|S_CTIME|S_VERSION)) {
+   struct timespec64 ctime = inode_get_ctime(inode);
 
-   if (flags & (S_ATIME | S_CTIME | S_MTIME)) {
-   if (flags & S_ATIME)
-   inode->i_atime = *time;
-   if (flags & S_CTIME)
-   inode_set_ctime_to_ts(inode, *time);
-   if (flags & S_MTIME)
-   inode->i_mtime = *time;
-
-   if (inode->i_sb->s_flags & SB_LAZYTIME)
-   dirty_flags |= I_DIRTY_TIME;
-   else
-   dirty_flags |= I_DIRTY_SYNC;
+   now = inode_set_ctime_current(inode);
+   if (!timespec64_equal(, ))
+   updated |= S_CTIME;
+   if (!timespec64_equal(, >i_mtime)) {
+   inode->i_mtime = now;
+   updated |= S_MTIME;
+   }
+   if (IS_I_VERSION(inode) && inode_maybe_inc_iversion(inode, 
updated))
+   updated |= S_VERSION;
+   } else {
+   now = current_time(inode);
}
 
-   if ((flags & S_VERSION) && inode_maybe_inc_iversion(inode, false))
-   dirty_flags |= I_DIRTY_SYNC;
+   if (flags & S_ATIME) {
+   if (!timespec64_equal(, >i_atime)) {
+   inode->i_atime = now;
+   updated |= S_ATIME;
+   }
+   }
+   return updated;
+}
+EXPORT_SYMBOL(inode_update_timestamps);
+
+/**
+ * generic_update_time - update the timestamps on the inode
+ * @inode: inode to be updated
+ * @flags: S_* flags that needed to be updated
+ *
+ * The update_time function is 

[Cluster-devel] [PATCH v7 07/13] xfs: have xfs_vn_update_time gets its own timestamp

2023-08-07 Thread Jeff Layton
In later patches we're going to drop the "now" parameter from the
update_time operation. Prepare XFS for this by reworking how it fetches
timestamps and sets them in the inode. Ensure that we update the ctime
even if only S_MTIME is set.

Signed-off-by: Jeff Layton 
---
 fs/xfs/xfs_iops.c | 12 
 1 file changed, 8 insertions(+), 4 deletions(-)

diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 731f45391baa..72d18e7840f5 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -1037,6 +1037,7 @@ xfs_vn_update_time(
int log_flags = XFS_ILOG_TIMESTAMP;
struct xfs_trans*tp;
int error;
+   struct timespec64   now = current_time(inode);
 
trace_xfs_update_time(ip);
 
@@ -1056,12 +1057,15 @@ xfs_vn_update_time(
return error;
 
xfs_ilock(ip, XFS_ILOCK_EXCL);
-   if (flags & S_CTIME)
-   inode_set_ctime_to_ts(inode, *now);
+   if (flags & (S_CTIME|S_MTIME))
+   now = inode_set_ctime_current(inode);
+   else
+   now = current_time(inode);
+
if (flags & S_MTIME)
-   inode->i_mtime = *now;
+   inode->i_mtime = now;
if (flags & S_ATIME)
-   inode->i_atime = *now;
+   inode->i_atime = now;
 
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
xfs_trans_log_inode(tp, ip, log_flags);

-- 
2.41.0



[Cluster-devel] [PATCH v7 10/13] tmpfs: add support for multigrain timestamps

2023-08-07 Thread Jeff Layton
Enable multigrain timestamps, which should ensure that there is an
apparent change to the timestamp whenever it has been written after
being actively observed via getattr.

tmpfs only requires the FS_MGTIME flag.

Reviewed-by: Jan Kara 
Signed-off-by: Jeff Layton 
---
 mm/shmem.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/mm/shmem.c b/mm/shmem.c
index 142ead70e8c1..98cc4be7a8a8 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -4220,7 +4220,7 @@ static struct file_system_type shmem_fs_type = {
 #endif
.kill_sb= kill_litter_super,
 #ifdef CONFIG_SHMEM
-   .fs_flags   = FS_USERNS_MOUNT | FS_ALLOW_IDMAP,
+   .fs_flags   = FS_USERNS_MOUNT | FS_ALLOW_IDMAP | FS_MGTIME,
 #else
.fs_flags   = FS_USERNS_MOUNT,
 #endif

-- 
2.41.0



[Cluster-devel] [PATCH v7 04/13] btrfs: have it use inode_update_timestamps

2023-08-07 Thread Jeff Layton
In later patches, we're going to drop the "now" argument from the
update_time operation. Have btrfs_update_time use the new
inode_update_timestamps helper to fetch a new timestamp and update it
properly.

Signed-off-by: Jeff Layton 
---
 fs/btrfs/inode.c | 9 +
 1 file changed, 1 insertion(+), 8 deletions(-)

diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 29a20f828dda..d52e7d64570a 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -6068,14 +6068,7 @@ static int btrfs_update_time(struct inode *inode, struct 
timespec64 *now,
if (btrfs_root_readonly(root))
return -EROFS;
 
-   if (flags & S_VERSION)
-   dirty |= inode_maybe_inc_iversion(inode, dirty);
-   if (flags & S_CTIME)
-   inode_set_ctime_to_ts(inode, *now);
-   if (flags & S_MTIME)
-   inode->i_mtime = *now;
-   if (flags & S_ATIME)
-   inode->i_atime = *now;
+   dirty = inode_update_timestamps(inode, flags);
return dirty ? btrfs_dirty_inode(BTRFS_I(inode)) : 0;
 }
 

-- 
2.41.0



[Cluster-devel] [PATCH v7 05/13] fat: make fat_update_time get its own timestamp

2023-08-07 Thread Jeff Layton
In later patches, we're going to drop the "now" parameter from the
update_time operation. Fix fat_update_time to fetch its own timestamp.
It turns out that this is easily done by just passing a NULL timestamp
pointer to fat_update_time.

Also, it may be that things have changed by the time we get to calling
fat_update_time after checking inode_needs_update_time. Ensure that we
attempt the i_version bump if any of the S_* flags besides S_ATIME are
set.

Signed-off-by: Jeff Layton 
---
 fs/fat/misc.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/fs/fat/misc.c b/fs/fat/misc.c
index 67006ea08db6..8cab87145d63 100644
--- a/fs/fat/misc.c
+++ b/fs/fat/misc.c
@@ -347,14 +347,14 @@ int fat_update_time(struct inode *inode, struct 
timespec64 *now, int flags)
return 0;
 
if (flags & (S_ATIME | S_CTIME | S_MTIME)) {
-   fat_truncate_time(inode, now, flags);
+   fat_truncate_time(inode, NULL, flags);
if (inode->i_sb->s_flags & SB_LAZYTIME)
dirty_flags |= I_DIRTY_TIME;
else
dirty_flags |= I_DIRTY_SYNC;
}
 
-   if ((flags & S_VERSION) && inode_maybe_inc_iversion(inode, false))
+   if ((flags & (S_VERSION|S_CTIME|S_MTIME)) && 
inode_maybe_inc_iversion(inode, false))
dirty_flags |= I_DIRTY_SYNC;
 
__mark_inode_dirty(inode, dirty_flags);

-- 
2.41.0



[Cluster-devel] [PATCH v7 11/13] xfs: switch to multigrain timestamps

2023-08-07 Thread Jeff Layton
Enable multigrain timestamps, which should ensure that there is an
apparent change to the timestamp whenever it has been written after
being actively observed via getattr.

Also, anytime the mtime changes, the ctime must also change, and those
are now the only two options for xfs_trans_ichgtime. Have that function
unconditionally bump the ctime, and ASSERT that XFS_ICHGTIME_CHG is
always set.

Acked-by: "Darrick J. Wong" 
Signed-off-by: Jeff Layton 
---
 fs/xfs/libxfs/xfs_trans_inode.c | 6 +++---
 fs/xfs/xfs_iops.c   | 8 
 fs/xfs/xfs_super.c  | 2 +-
 3 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/fs/xfs/libxfs/xfs_trans_inode.c b/fs/xfs/libxfs/xfs_trans_inode.c
index 6b2296ff248a..ad22656376d3 100644
--- a/fs/xfs/libxfs/xfs_trans_inode.c
+++ b/fs/xfs/libxfs/xfs_trans_inode.c
@@ -62,12 +62,12 @@ xfs_trans_ichgtime(
ASSERT(tp);
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 
-   tv = current_time(inode);
+   /* If the mtime changes, then ctime must also change */
+   ASSERT(flags & XFS_ICHGTIME_CHG);
 
+   tv = inode_set_ctime_current(inode);
if (flags & XFS_ICHGTIME_MOD)
inode->i_mtime = tv;
-   if (flags & XFS_ICHGTIME_CHG)
-   inode_set_ctime_to_ts(inode, tv);
if (flags & XFS_ICHGTIME_CREATE)
ip->i_crtime = tv;
 }
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index c73529f77bac..2ededd3f6b8c 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -573,10 +573,10 @@ xfs_vn_getattr(
stat->gid = vfsgid_into_kgid(vfsgid);
stat->ino = ip->i_ino;
stat->atime = inode->i_atime;
-   stat->mtime = inode->i_mtime;
-   stat->ctime = inode_get_ctime(inode);
stat->blocks = XFS_FSB_TO_BB(mp, ip->i_nblocks + ip->i_delayed_blks);
 
+   fill_mg_cmtime(stat, request_mask, inode);
+
if (xfs_has_v3inodes(mp)) {
if (request_mask & STATX_BTIME) {
stat->result_mask |= STATX_BTIME;
@@ -917,7 +917,7 @@ xfs_setattr_size(
if (newsize != oldsize &&
!(iattr->ia_valid & (ATTR_CTIME | ATTR_MTIME))) {
iattr->ia_ctime = iattr->ia_mtime =
-   current_time(inode);
+   current_mgtime(inode);
iattr->ia_valid |= ATTR_CTIME | ATTR_MTIME;
}
 
@@ -1036,7 +1036,7 @@ xfs_vn_update_time(
int log_flags = XFS_ILOG_TIMESTAMP;
struct xfs_trans*tp;
int error;
-   struct timespec64   now = current_time(inode);
+   struct timespec64   now;
 
trace_xfs_update_time(ip);
 
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 818510243130..4b10edb2c972 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -2009,7 +2009,7 @@ static struct file_system_type xfs_fs_type = {
.init_fs_context= xfs_init_fs_context,
.parameters = xfs_fs_parameters,
.kill_sb= kill_block_super,
-   .fs_flags   = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
+   .fs_flags   = FS_REQUIRES_DEV | FS_ALLOW_IDMAP | FS_MGTIME,
 };
 MODULE_ALIAS_FS("xfs");
 

-- 
2.41.0



[Cluster-devel] [PATCH v7 06/13] ubifs: have ubifs_update_time use inode_update_timestamps

2023-08-07 Thread Jeff Layton
In later patches, we're going to drop the "now" parameter from the
update_time operation. Prepare ubifs for this, by having it use the new
inode_update_timestamps helper.

Signed-off-by: Jeff Layton 
---
 fs/ubifs/file.c | 10 ++
 1 file changed, 2 insertions(+), 8 deletions(-)

diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index df9086b19cd0..2d0178922e19 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -1397,15 +1397,9 @@ int ubifs_update_time(struct inode *inode, struct 
timespec64 *time,
return err;
 
mutex_lock(>ui_mutex);
-   if (flags & S_ATIME)
-   inode->i_atime = *time;
-   if (flags & S_CTIME)
-   inode_set_ctime_to_ts(inode, *time);
-   if (flags & S_MTIME)
-   inode->i_mtime = *time;
-
-   release = ui->dirty;
+   inode_update_timestamps(inode, flags);
__mark_inode_dirty(inode, I_DIRTY_SYNC);
+   release = ui->dirty;
mutex_unlock(>ui_mutex);
if (release)
ubifs_release_budget(c, );

-- 
2.41.0



[Cluster-devel] [PATCH v7 00/13] fs: implement multigrain timestamps

2023-08-07 Thread Jeff Layton
The VFS always uses coarse-grained timestamps when updating the
ctime and mtime after a change. This has the benefit of allowing
filesystems to optimize away a lot metadata updates, down to around 1
per jiffy, even when a file is under heavy writes.

Unfortunately, this coarseness has always been an issue when we're
exporting via NFSv3, which relies on timestamps to validate caches. A
lot of changes can happen in a jiffy, so timestamps aren't sufficient to
help the client decide to invalidate the cache.

Even with NFSv4, a lot of exported filesystems don't properly support a
change attribute and are subject to the same problems with timestamp
granularity. Other applications have similar issues with timestamps (e.g
backup applications).

If we were to always use fine-grained timestamps, that would improve the
situation, but that becomes rather expensive, as the underlying
filesystem would have to log a lot more metadata updates.

What we need is a way to only use fine-grained timestamps when they are
being actively queried. The idea is to use an unused bit in the ctime's
tv_nsec field to mark when the mtime or ctime has been queried via
getattr. Once that has been marked, the next m/ctime update will use a
fine-grained timestamp.

Credit goes to Dave Chinner for the original idea, and to Ben Coddington
for the catchy name. This series should apply cleanly onto Christian's
vfs.ctime branch, once the v6 mgtime patches have been dropped. That
should be everything above this commit:

525deaeb2fbf gfs2: fix timestamp handling on quota inodes

base-commit: cf22d118b89a09a0160586412160d89098f7c4c7
Signed-off-by: Jeff Layton 
---
Changes in v7:
- change update_time operation to fetch the current time itself
- don't modify current_time operation. Leave it always returning coarse 
timestamp
- rework inode_set_ctime_current for better atomicity and ensure that
  all mgtime filesystems use it
- reorder arguments to fill_mg_cmtime

Changes in v6:
- drop the patch that removed XFS_ICHGTIME_CHG
- change WARN_ON_ONCE to ASSERT in xfs conversion patch

---
Jeff Layton (13):
  fs: remove silly warning from current_time
  fs: pass the request_mask to generic_fillattr
  fs: drop the timespec64 arg from generic_update_time
  btrfs: have it use inode_update_timestamps
  fat: make fat_update_time get its own timestamp
  ubifs: have ubifs_update_time use inode_update_timestamps
  xfs: have xfs_vn_update_time gets its own timestamp
  fs: drop the timespec64 argument from update_time
  fs: add infrastructure for multigrain timestamps
  tmpfs: add support for multigrain timestamps
  xfs: switch to multigrain timestamps
  ext4: switch to multigrain timestamps
  btrfs: convert to multigrain timestamps

 fs/9p/vfs_inode.c   |   4 +-
 fs/9p/vfs_inode_dotl.c  |   4 +-
 fs/afs/inode.c  |   2 +-
 fs/bad_inode.c  |   3 +-
 fs/btrfs/file.c |  24 +
 fs/btrfs/inode.c|  14 +--
 fs/btrfs/super.c|   5 +-
 fs/btrfs/volumes.c  |   4 +-
 fs/ceph/inode.c |   2 +-
 fs/coda/inode.c |   3 +-
 fs/ecryptfs/inode.c |   5 +-
 fs/erofs/inode.c|   2 +-
 fs/exfat/file.c |   2 +-
 fs/ext2/inode.c |   2 +-
 fs/ext4/inode.c |   2 +-
 fs/ext4/super.c |   2 +-
 fs/f2fs/file.c  |   2 +-
 fs/fat/fat.h|   3 +-
 fs/fat/file.c   |   2 +-
 fs/fat/misc.c   |   6 +-
 fs/fuse/dir.c   |   2 +-
 fs/gfs2/inode.c |   8 +-
 fs/hfsplus/inode.c  |   2 +-
 fs/inode.c  | 200 +++-
 fs/kernfs/inode.c   |   2 +-
 fs/libfs.c  |   4 +-
 fs/minix/inode.c|   2 +-
 fs/nfs/inode.c  |   2 +-
 fs/nfs/namespace.c  |   3 +-
 fs/ntfs3/file.c |   2 +-
 fs/ocfs2/file.c |   2 +-
 fs/orangefs/inode.c |   5 +-
 fs/overlayfs/inode.c|   2 +-
 fs/overlayfs/overlayfs.h|   2 +-
 fs/proc/base.c  |   4 +-
 fs/proc/fd.c|   2 +-
 fs/proc/generic.c   |   2 +-
 fs/proc/proc_net.c  |   2 +-
 fs/proc/proc_sysctl.c   |   2 +-
 fs/proc/root.c  |   3 +-
 fs/smb/client/inode.c   |   2 +-
 fs/smb/server/smb2pdu.c |  22 ++---
 fs/smb/server/vfs.c |   3 +-
 fs/stat.c   |  65 ++---
 fs/sysv/itree.c |   3 +-
 fs/ubifs/dir.c  |   2 +-
 fs/ubifs/file.c |  19 ++--
 fs/ubifs/ubifs.h|   2 +-
 fs/udf/symlink.c|   2 +-
 fs/vboxsf/utils.c   |   2 +-
 fs/xfs/libxfs/xfs_trans_inode.c |   6 +-
 fs/xfs/xfs_iops.c   

[Cluster-devel] [PATCH v7 01/13] fs: remove silly warning from current_time

2023-08-07 Thread Jeff Layton
An inode with no superblock? Unpossible!

Signed-off-by: Jeff Layton 
---
 fs/inode.c | 6 --
 1 file changed, 6 deletions(-)

diff --git a/fs/inode.c b/fs/inode.c
index d4ab92233062..3fc251bfaf73 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -2495,12 +2495,6 @@ struct timespec64 current_time(struct inode *inode)
struct timespec64 now;
 
ktime_get_coarse_real_ts64();
-
-   if (unlikely(!inode->i_sb)) {
-   WARN(1, "current_time() called with uninitialized super_block 
in the inode");
-   return now;
-   }
-
return timestamp_truncate(now, inode);
 }
 EXPORT_SYMBOL(current_time);

-- 
2.41.0



Re: [Cluster-devel] [PATCH v4 19/48] rcu: dynamically allocate the rcu-kfree shrinker

2023-08-07 Thread Joel Fernandes
On Mon, Aug 7, 2023 at 7:17 AM Qi Zheng  wrote:
>
> Use new APIs to dynamically allocate the rcu-kfree shrinker.
>
> Signed-off-by: Qi Zheng 

For RCU:
Reviewed-by: Joel Fernandes (Google) 

thanks,

- Joel


> ---
>  kernel/rcu/tree.c | 22 +-
>  1 file changed, 13 insertions(+), 9 deletions(-)
>
> diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> index 7c79480bfaa0..3b20fc46c514 100644
> --- a/kernel/rcu/tree.c
> +++ b/kernel/rcu/tree.c
> @@ -3449,13 +3449,6 @@ kfree_rcu_shrink_scan(struct shrinker *shrink, struct 
> shrink_control *sc)
> return freed == 0 ? SHRINK_STOP : freed;
>  }
>
> -static struct shrinker kfree_rcu_shrinker = {
> -   .count_objects = kfree_rcu_shrink_count,
> -   .scan_objects = kfree_rcu_shrink_scan,
> -   .batch = 0,
> -   .seeks = DEFAULT_SEEKS,
> -};
> -
>  void __init kfree_rcu_scheduler_running(void)
>  {
> int cpu;
> @@ -4931,6 +4924,7 @@ static void __init kfree_rcu_batch_init(void)
>  {
> int cpu;
> int i, j;
> +   struct shrinker *kfree_rcu_shrinker;
>
> /* Clamp it to [0:100] seconds interval. */
> if (rcu_delay_page_cache_fill_msec < 0 ||
> @@ -4962,8 +4956,18 @@ static void __init kfree_rcu_batch_init(void)
> INIT_DELAYED_WORK(>page_cache_work, 
> fill_page_cache_func);
> krcp->initialized = true;
> }
> -   if (register_shrinker(_rcu_shrinker, "rcu-kfree"))
> -   pr_err("Failed to register kfree_rcu() shrinker!\n");
> +
> +   kfree_rcu_shrinker = shrinker_alloc(0, "rcu-kfree");
> +   if (!kfree_rcu_shrinker) {
> +   pr_err("Failed to allocate kfree_rcu() shrinker!\n");
> +   return;
> +   }
> +
> +   kfree_rcu_shrinker->count_objects = kfree_rcu_shrink_count;
> +   kfree_rcu_shrinker->scan_objects = kfree_rcu_shrink_scan;
> +   kfree_rcu_shrinker->seeks = DEFAULT_SEEKS;
> +
> +   shrinker_register(kfree_rcu_shrinker);
>  }
>
>  void __init rcu_init(void)
> --
> 2.30.2
>



Re: [Cluster-devel] [PATCH v4 18/48] rcu: dynamically allocate the rcu-lazy shrinker

2023-08-07 Thread Joel Fernandes
On Mon, Aug 7, 2023 at 7:36 AM Qi Zheng  wrote:
>
> Use new APIs to dynamically allocate the rcu-lazy shrinker.
>
> Signed-off-by: Qi Zheng 

For RCU:
Reviewed-by: Joel Fernandes (Google) 

thanks,

- Joel


> ---
>  kernel/rcu/tree_nocb.h | 20 +++-
>  1 file changed, 11 insertions(+), 9 deletions(-)
>
> diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h
> index 5598212d1f27..e1c59c33738a 100644
> --- a/kernel/rcu/tree_nocb.h
> +++ b/kernel/rcu/tree_nocb.h
> @@ -1396,13 +1396,6 @@ lazy_rcu_shrink_scan(struct shrinker *shrink, struct 
> shrink_control *sc)
>
> return count ? count : SHRINK_STOP;
>  }
> -
> -static struct shrinker lazy_rcu_shrinker = {
> -   .count_objects = lazy_rcu_shrink_count,
> -   .scan_objects = lazy_rcu_shrink_scan,
> -   .batch = 0,
> -   .seeks = DEFAULT_SEEKS,
> -};
>  #endif // #ifdef CONFIG_RCU_LAZY
>
>  void __init rcu_init_nohz(void)
> @@ -1410,6 +1403,7 @@ void __init rcu_init_nohz(void)
> int cpu;
> struct rcu_data *rdp;
> const struct cpumask *cpumask = NULL;
> +   struct shrinker * __maybe_unused lazy_rcu_shrinker;
>
>  #if defined(CONFIG_NO_HZ_FULL)
> if (tick_nohz_full_running && !cpumask_empty(tick_nohz_full_mask))
> @@ -1436,8 +1430,16 @@ void __init rcu_init_nohz(void)
> return;
>
>  #ifdef CONFIG_RCU_LAZY
> -   if (register_shrinker(_rcu_shrinker, "rcu-lazy"))
> -   pr_err("Failed to register lazy_rcu shrinker!\n");
> +   lazy_rcu_shrinker = shrinker_alloc(0, "rcu-lazy");
> +   if (!lazy_rcu_shrinker) {
> +   pr_err("Failed to allocate lazy_rcu shrinker!\n");
> +   } else {
> +   lazy_rcu_shrinker->count_objects = lazy_rcu_shrink_count;
> +   lazy_rcu_shrinker->scan_objects = lazy_rcu_shrink_scan;
> +   lazy_rcu_shrinker->seeks = DEFAULT_SEEKS;
> +
> +   shrinker_register(lazy_rcu_shrinker);
> +   }
>  #endif // #ifdef CONFIG_RCU_LAZY
>
> if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) {
> --
> 2.30.2
>



[Cluster-devel] [PATCH v4 48/48] mm: shrinker: convert shrinker_rwsem to mutex

2023-08-07 Thread Qi Zheng
Now there are no readers of shrinker_rwsem, so we can simply replace it
with mutex lock.

Signed-off-by: Qi Zheng 
---
 drivers/md/dm-cache-metadata.c |  2 +-
 fs/super.c |  2 +-
 mm/shrinker.c  | 28 ++--
 mm/shrinker_debug.c| 14 +++---
 4 files changed, 23 insertions(+), 23 deletions(-)

diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index acffed750e3e..9e0c69958587 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -1828,7 +1828,7 @@ int dm_cache_metadata_abort(struct dm_cache_metadata *cmd)
 * Replacement block manager (new_bm) is created and old_bm destroyed 
outside of
 * cmd root_lock to avoid ABBA deadlock that would result (due to 
life-cycle of
 * shrinker associated with the block manager's bufio client vs cmd 
root_lock).
-* - must take shrinker_rwsem without holding cmd->root_lock
+* - must take shrinker_mutex without holding cmd->root_lock
 */
new_bm = dm_block_manager_create(cmd->bdev, 
DM_CACHE_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
 CACHE_MAX_CONCURRENT_LOCKS);
diff --git a/fs/super.c b/fs/super.c
index a28193045345..60c2d290c754 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -54,7 +54,7 @@ static char *sb_writers_name[SB_FREEZE_LEVELS] = {
  * One thing we have to be careful of with a per-sb shrinker is that we don't
  * drop the last active reference to the superblock from within the shrinker.
  * If that happens we could trigger unregistering the shrinker from within the
- * shrinker path and that leads to deadlock on the shrinker_rwsem. Hence we
+ * shrinker path and that leads to deadlock on the shrinker_mutex. Hence we
  * take a passive reference to the superblock to avoid this from occurring.
  */
 static unsigned long super_cache_scan(struct shrinker *shrink,
diff --git a/mm/shrinker.c b/mm/shrinker.c
index a12dede5d21f..3d44a335ef3c 100644
--- a/mm/shrinker.c
+++ b/mm/shrinker.c
@@ -8,7 +8,7 @@
 #include "internal.h"
 
 LIST_HEAD(shrinker_list);
-DECLARE_RWSEM(shrinker_rwsem);
+DEFINE_MUTEX(shrinker_mutex);
 
 #ifdef CONFIG_MEMCG
 static int shrinker_nr_max;
@@ -80,7 +80,7 @@ int alloc_shrinker_info(struct mem_cgroup *memcg)
int nid, ret = 0;
int array_size = 0;
 
-   down_write(_rwsem);
+   mutex_lock(_mutex);
array_size = shrinker_unit_size(shrinker_nr_max);
for_each_node(nid) {
info = kvzalloc_node(sizeof(*info) + array_size, GFP_KERNEL, 
nid);
@@ -91,7 +91,7 @@ int alloc_shrinker_info(struct mem_cgroup *memcg)
goto err;
rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, info);
}
-   up_write(_rwsem);
+   mutex_unlock(_mutex);
 
return ret;
 
@@ -104,7 +104,7 @@ static struct shrinker_info *shrinker_info_protected(struct 
mem_cgroup *memcg,
 int nid)
 {
return rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info,
-lockdep_is_held(_rwsem));
+lockdep_is_held(_mutex));
 }
 
 static struct shrinker_info *shrinker_info_rcu(struct mem_cgroup *memcg,
@@ -161,7 +161,7 @@ static int expand_shrinker_info(int new_id)
if (!root_mem_cgroup)
goto out;
 
-   lockdep_assert_held(_rwsem);
+   lockdep_assert_held(_mutex);
 
new_size = shrinker_unit_size(new_nr_max);
old_size = shrinker_unit_size(shrinker_nr_max);
@@ -224,7 +224,7 @@ static int shrinker_memcg_alloc(struct shrinker *shrinker)
if (mem_cgroup_disabled())
return -ENOSYS;
 
-   down_write(_rwsem);
+   mutex_lock(_mutex);
id = idr_alloc(_idr, shrinker, 0, 0, GFP_KERNEL);
if (id < 0)
goto unlock;
@@ -238,7 +238,7 @@ static int shrinker_memcg_alloc(struct shrinker *shrinker)
shrinker->id = id;
ret = 0;
 unlock:
-   up_write(_rwsem);
+   mutex_unlock(_mutex);
return ret;
 }
 
@@ -248,7 +248,7 @@ static void shrinker_memcg_remove(struct shrinker *shrinker)
 
BUG_ON(id < 0);
 
-   lockdep_assert_held(_rwsem);
+   lockdep_assert_held(_mutex);
 
idr_remove(_idr, id);
 }
@@ -299,7 +299,7 @@ void reparent_shrinker_deferred(struct mem_cgroup *memcg)
parent = root_mem_cgroup;
 
/* Prevent from concurrent shrinker_info expand */
-   down_write(_rwsem);
+   mutex_lock(_mutex);
for_each_node(nid) {
child_info = shrinker_info_protected(memcg, nid);
parent_info = shrinker_info_protected(parent, nid);
@@ -312,7 +312,7 @@ void reparent_shrinker_deferred(struct mem_cgroup *memcg)
}
}
}
-   up_write(_rwsem);
+   mutex_unlock(_mutex);
 }
 #else
 static int 

[Cluster-devel] [PATCH v4 47/48] mm: shrinker: hold write lock to reparent shrinker nr_deferred

2023-08-07 Thread Qi Zheng
For now, reparent_shrinker_deferred() is the only holder of read lock of
shrinker_rwsem. And it already holds the global cgroup_mutex, so it will
not be called in parallel.

Therefore, in order to convert shrinker_rwsem to shrinker_mutex later,
here we change to hold the write lock of shrinker_rwsem to reparent.

Signed-off-by: Qi Zheng 
---
 mm/shrinker.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/mm/shrinker.c b/mm/shrinker.c
index fee6f62904fb..a12dede5d21f 100644
--- a/mm/shrinker.c
+++ b/mm/shrinker.c
@@ -299,7 +299,7 @@ void reparent_shrinker_deferred(struct mem_cgroup *memcg)
parent = root_mem_cgroup;
 
/* Prevent from concurrent shrinker_info expand */
-   down_read(_rwsem);
+   down_write(_rwsem);
for_each_node(nid) {
child_info = shrinker_info_protected(memcg, nid);
parent_info = shrinker_info_protected(parent, nid);
@@ -312,7 +312,7 @@ void reparent_shrinker_deferred(struct mem_cgroup *memcg)
}
}
}
-   up_read(_rwsem);
+   up_write(_rwsem);
 }
 #else
 static int shrinker_memcg_alloc(struct shrinker *shrinker)
-- 
2.30.2



[Cluster-devel] [PATCH v4 41/48] fs: super: dynamically allocate the s_shrink

2023-08-07 Thread Qi Zheng
In preparation for implementing lockless slab shrink, use new APIs to
dynamically allocate the s_shrink, so that it can be freed asynchronously
using kfree_rcu(). Then it doesn't need to wait for RCU read-side critical
section when releasing the struct super_block.

Signed-off-by: Qi Zheng 
Reviewed-by: Muchun Song 
---
 fs/btrfs/super.c   |  2 +-
 fs/kernfs/mount.c  |  2 +-
 fs/proc/root.c |  2 +-
 fs/super.c | 34 +++---
 include/linux/fs.h |  2 +-
 5 files changed, 23 insertions(+), 19 deletions(-)

diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 5ca07b41b4cd..c6980fc6fe02 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1514,7 +1514,7 @@ static struct dentry *btrfs_mount_root(struct 
file_system_type *fs_type,
 
snprintf(s->s_id, sizeof(s->s_id), "%pg",
 fs_devices->latest_dev->bdev);
-   shrinker_debugfs_rename(>s_shrink, "sb-%s:%s", fs_type->name,
+   shrinker_debugfs_rename(s->s_shrink, "sb-%s:%s", fs_type->name,
s->s_id);
fs_info->bdev_holder = s;
error = btrfs_fill_super(s, fs_devices, data);
diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c
index c4bf26142eec..79b96e74a8a0 100644
--- a/fs/kernfs/mount.c
+++ b/fs/kernfs/mount.c
@@ -265,7 +265,7 @@ static int kernfs_fill_super(struct super_block *sb, struct 
kernfs_fs_context *k
sb->s_time_gran = 1;
 
/* sysfs dentries and inodes don't require IO to create */
-   sb->s_shrink.seeks = 0;
+   sb->s_shrink->seeks = 0;
 
/* get root inode, initialize and unlock it */
down_read(_root->kernfs_rwsem);
diff --git a/fs/proc/root.c b/fs/proc/root.c
index 9191248f2dac..b55dbc70287b 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -188,7 +188,7 @@ static int proc_fill_super(struct super_block *s, struct 
fs_context *fc)
s->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH;
 
/* procfs dentries and inodes don't require IO to create */
-   s->s_shrink.seeks = 0;
+   s->s_shrink->seeks = 0;
 
pde_get(_root);
root_inode = proc_get_inode(s, _root);
diff --git a/fs/super.c b/fs/super.c
index 2354bcab6fff..a28193045345 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -67,7 +67,7 @@ static unsigned long super_cache_scan(struct shrinker *shrink,
longdentries;
longinodes;
 
-   sb = container_of(shrink, struct super_block, s_shrink);
+   sb = shrink->private_data;
 
/*
 * Deadlock avoidance.  We may hold various FS locks, and we don't want
@@ -120,7 +120,7 @@ static unsigned long super_cache_count(struct shrinker 
*shrink,
struct super_block *sb;
longtotal_objects = 0;
 
-   sb = container_of(shrink, struct super_block, s_shrink);
+   sb = shrink->private_data;
 
/*
 * We don't call trylock_super() here as it is a scalability bottleneck,
@@ -182,7 +182,7 @@ static void destroy_unused_super(struct super_block *s)
security_sb_free(s);
put_user_ns(s->s_user_ns);
kfree(s->s_subtype);
-   free_prealloced_shrinker(>s_shrink);
+   shrinker_free(s->s_shrink);
/* no delays needed */
destroy_super_work(>destroy_work);
 }
@@ -259,16 +259,20 @@ static struct super_block *alloc_super(struct 
file_system_type *type, int flags,
s->s_time_min = TIME64_MIN;
s->s_time_max = TIME64_MAX;
 
-   s->s_shrink.seeks = DEFAULT_SEEKS;
-   s->s_shrink.scan_objects = super_cache_scan;
-   s->s_shrink.count_objects = super_cache_count;
-   s->s_shrink.batch = 1024;
-   s->s_shrink.flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE;
-   if (prealloc_shrinker(>s_shrink, "sb-%s", type->name))
+   s->s_shrink = shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE,
+"sb-%s", type->name);
+   if (!s->s_shrink)
goto fail;
-   if (list_lru_init_memcg(>s_dentry_lru, >s_shrink))
+
+   s->s_shrink->seeks = DEFAULT_SEEKS;
+   s->s_shrink->scan_objects = super_cache_scan;
+   s->s_shrink->count_objects = super_cache_count;
+   s->s_shrink->batch = 1024;
+   s->s_shrink->private_data = s;
+
+   if (list_lru_init_memcg(>s_dentry_lru, s->s_shrink))
goto fail;
-   if (list_lru_init_memcg(>s_inode_lru, >s_shrink))
+   if (list_lru_init_memcg(>s_inode_lru, s->s_shrink))
goto fail;
return s;
 
@@ -326,7 +330,7 @@ void deactivate_locked_super(struct super_block *s)
 {
struct file_system_type *fs = s->s_type;
if (atomic_dec_and_test(>s_active)) {
-   unregister_shrinker(>s_shrink);
+   shrinker_free(s->s_shrink);
fs->kill_sb(s);
 
/*
@@ -610,7 +614,7 @@ struct super_block *sget_fc(struct fs_context *fc,
hlist_add_head(>s_instances, >s_type->fs_supers);

[Cluster-devel] [PATCH v4 46/48] mm: shrinker: make memcg slab shrink lockless

2023-08-07 Thread Qi Zheng
Like global slab shrink, this commit also uses refcount+RCU method to make
memcg slab shrink lockless.

Use the following script to do slab shrink stress test:

```

DIR="/root/shrinker/memcg/mnt"

do_create()
{
mkdir -p /sys/fs/cgroup/memory/test
echo 4G > /sys/fs/cgroup/memory/test/memory.limit_in_bytes
for i in `seq 0 $1`;
do
mkdir -p /sys/fs/cgroup/memory/test/$i;
echo $$ > /sys/fs/cgroup/memory/test/$i/cgroup.procs;
mkdir -p $DIR/$i;
done
}

do_mount()
{
for i in `seq $1 $2`;
do
mount -t tmpfs $i $DIR/$i;
done
}

do_touch()
{
for i in `seq $1 $2`;
do
echo $$ > /sys/fs/cgroup/memory/test/$i/cgroup.procs;
dd if=/dev/zero of=$DIR/$i/file$i bs=1M count=1 &
done
}

case "$1" in
  touch)
do_touch $2 $3
;;
  test)
do_create 4000
do_mount 0 4000
do_touch 0 3000
;;
  *)
exit 1
;;
esac
```

Save the above script, then run test and touch commands. Then we can use
the following perf command to view hotspots:

perf top -U -F 999

1) Before applying this patchset:

  40.44%  [kernel][k] down_read_trylock
  17.59%  [kernel][k] up_read
  13.64%  [kernel][k] pv_native_safe_halt
  11.90%  [kernel][k] shrink_slab
   8.21%  [kernel][k] idr_find
   2.71%  [kernel][k] _find_next_bit
   1.36%  [kernel][k] shrink_node
   0.81%  [kernel][k] shrink_lruvec
   0.80%  [kernel][k] __radix_tree_lookup
   0.50%  [kernel][k] do_shrink_slab
   0.21%  [kernel][k] list_lru_count_one
   0.16%  [kernel][k] mem_cgroup_iter

2) After applying this patchset:

  60.17%  [kernel]   [k] shrink_slab
  20.42%  [kernel]   [k] pv_native_safe_halt
   3.03%  [kernel]   [k] do_shrink_slab
   2.73%  [kernel]   [k] shrink_node
   2.27%  [kernel]   [k] shrink_lruvec
   2.00%  [kernel]   [k] __rcu_read_unlock
   1.92%  [kernel]   [k] mem_cgroup_iter
   0.98%  [kernel]   [k] __rcu_read_lock
   0.91%  [kernel]   [k] osq_lock
   0.63%  [kernel]   [k] mem_cgroup_calculate_protection
   0.55%  [kernel]   [k] shrinker_put
   0.46%  [kernel]   [k] list_lru_count_one

We can see that the first perf hotspot becomes shrink_slab, which is what
we expect.

Signed-off-by: Qi Zheng 
---
 mm/shrinker.c | 80 ++-
 1 file changed, 54 insertions(+), 26 deletions(-)

diff --git a/mm/shrinker.c b/mm/shrinker.c
index d318f5621862..fee6f62904fb 100644
--- a/mm/shrinker.c
+++ b/mm/shrinker.c
@@ -107,6 +107,12 @@ static struct shrinker_info 
*shrinker_info_protected(struct mem_cgroup *memcg,
 lockdep_is_held(_rwsem));
 }
 
+static struct shrinker_info *shrinker_info_rcu(struct mem_cgroup *memcg,
+  int nid)
+{
+   return rcu_dereference(memcg->nodeinfo[nid]->shrinker_info);
+}
+
 static int expand_one_shrinker_info(struct mem_cgroup *memcg, int new_size,
int old_size, int new_nr_max)
 {
@@ -198,7 +204,7 @@ void set_shrinker_bit(struct mem_cgroup *memcg, int nid, 
int shrinker_id)
struct shrinker_info_unit *unit;
 
rcu_read_lock();
-   info = rcu_dereference(memcg->nodeinfo[nid]->shrinker_info);
+   info = shrinker_info_rcu(memcg, nid);
unit = info->unit[shriner_id_to_index(shrinker_id)];
if (!WARN_ON_ONCE(shrinker_id >= info->map_nr_max)) {
/* Pairs with smp mb in shrink_slab() */
@@ -211,7 +217,7 @@ void set_shrinker_bit(struct mem_cgroup *memcg, int nid, 
int shrinker_id)
 
 static DEFINE_IDR(shrinker_idr);
 
-static int prealloc_memcg_shrinker(struct shrinker *shrinker)
+static int shrinker_memcg_alloc(struct shrinker *shrinker)
 {
int id, ret = -ENOMEM;
 
@@ -219,7 +225,6 @@ static int prealloc_memcg_shrinker(struct shrinker 
*shrinker)
return -ENOSYS;
 
down_write(_rwsem);
-   /* This may call shrinker, so it must use down_read_trylock() */
id = idr_alloc(_idr, shrinker, 0, 0, GFP_KERNEL);
if (id < 0)
goto unlock;
@@ -237,7 +242,7 @@ static int prealloc_memcg_shrinker(struct shrinker 
*shrinker)
return ret;
 }
 
-static void unregister_memcg_shrinker(struct shrinker *shrinker)
+static void shrinker_memcg_remove(struct shrinker *shrinker)
 {
int id = shrinker->id;
 
@@ -253,10 +258,15 @@ static long xchg_nr_deferred_memcg(int nid, struct 
shrinker *shrinker,
 {
struct shrinker_info *info;
struct shrinker_info_unit *unit;
+   long nr_deferred;
 
-   info = shrinker_info_protected(memcg, nid);
+   rcu_read_lock();
+   info = shrinker_info_rcu(memcg, nid);
unit = info->unit[shriner_id_to_index(shrinker->id)];
-   

[Cluster-devel] [PATCH v4 45/48] mm: shrinker: make global slab shrink lockless

2023-08-07 Thread Qi Zheng
The shrinker_rwsem is a global read-write lock in shrinkers subsystem,
which protects most operations such as slab shrink, registration and
unregistration of shrinkers, etc. This can easily cause problems in the
following cases.

1) When the memory pressure is high and there are many filesystems
   mounted or unmounted at the same time, slab shrink will be affected
   (down_read_trylock() failed).

   Such as the real workload mentioned by Kirill Tkhai:

   ```
   One of the real workloads from my experience is start
   of an overcommitted node containing many starting
   containers after node crash (or many resuming containers
   after reboot for kernel update). In these cases memory
   pressure is huge, and the node goes round in long reclaim.
   ```

2) If a shrinker is blocked (such as the case mentioned
   in [1]) and a writer comes in (such as mount a fs),
   then this writer will be blocked and cause all
   subsequent shrinker-related operations to be blocked.

Even if there is no competitor when shrinking slab, there may still be a
problem. The down_read_trylock() may become a perf hotspot with frequent
calls to shrink_slab(). Because of the poor multicore scalability of
atomic operations, this can lead to a significant drop in IPC
(instructions per cycle).

We used to implement the lockless slab shrink with SRCU [2], but then
kernel test robot reported -88.8% regression in
stress-ng.ramfs.ops_per_sec test case [3], so we reverted it [4].

This commit uses the refcount+RCU method [5] proposed by Dave Chinner
to re-implement the lockless global slab shrink. The memcg slab shrink is
handled in the subsequent patch.

For now, all shrinker instances are converted to dynamically allocated and
will be freed by call_rcu(). So we can use rcu_read_{lock,unlock}() to
ensure that the shrinker instance is valid.

And the shrinker instance will not be run again after unregistration. So
the structure that records the pointer of shrinker instance can be safely
freed without waiting for the RCU read-side critical section.

In this way, while we implement the lockless slab shrink, we don't need to
be blocked in unregister_shrinker().

The following are the test results:

stress-ng --timeout 60 --times --verify --metrics-brief --ramfs 9 &

1) Before applying this patchset:

setting to a 60 second run per stressor
dispatching hogs: 9 ramfs
stressor   bogo ops real time  usr time  sys time   bogo ops/s bogo 
ops/s
  (secs)(secs)(secs)   (real time) (usr+sys 
time)
ramfs735238 60.00 12.37363.70 12253.05
1955.08
for a 60.01s run time:
   1440.27s available CPU time
 12.36s user time   (  0.86%)
363.70s system time ( 25.25%)
376.06s total time  ( 26.11%)
load average: 10.79 4.47 1.69
passed: 9: ramfs (9)
failed: 0
skipped: 0
successful run completed in 60.01s (1 min, 0.01 secs)

2) After applying this patchset:

setting to a 60 second run per stressor
dispatching hogs: 9 ramfs
stressor   bogo ops real time  usr time  sys time   bogo ops/s bogo 
ops/s
  (secs)(secs)(secs)   (real time) (usr+sys 
time)
ramfs746698 60.00 12.45376.16 12444.02
1921.47
for a 60.01s run time:
   1440.28s available CPU time
 12.44s user time   (  0.86%)
376.16s system time ( 26.12%)
388.60s total time  ( 26.98%)
load average: 9.01 3.85 1.49
passed: 9: ramfs (9)
failed: 0
skipped: 0
successful run completed in 60.01s (1 min, 0.01 secs)

We can see that the ops/s has hardly changed.

[1]. 
https://lore.kernel.org/lkml/20191129214541.3110-1-ptikhomi...@virtuozzo.com/
[2]. 
https://lore.kernel.org/lkml/20230313112819.38938-1-zhengqi.a...@bytedance.com/
[3]. https://lore.kernel.org/lkml/202305230837.db2c233f-yujie@intel.com/
[4]. https://lore.kernel.org/all/20230609081518.3039120-1-qi.zh...@linux.dev/
[5]. https://lore.kernel.org/lkml/zijhou1d55d4h...@dread.disaster.area/

Signed-off-by: Qi Zheng 
---
 include/linux/shrinker.h | 17 ++
 mm/shrinker.c| 70 +---
 2 files changed, 68 insertions(+), 19 deletions(-)

diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index eb342994675a..f06225f18531 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -4,6 +4,8 @@
 
 #include 
 #include 
+#include 
+#include 
 
 #define SHRINKER_UNIT_BITS BITS_PER_LONG
 
@@ -87,6 +89,10 @@ struct shrinker {
int seeks;  /* seeks to recreate an obj */
unsigned flags;
 
+   refcount_t refcount;
+   struct completion done;
+   struct rcu_head rcu;
+
void *private_data;
 
/* These are for internal use */
@@ -120,6 +126,17 @@ struct shrinker *shrinker_alloc(unsigned int flags, const 
char *fmt, ...);
 void shrinker_register(struct shrinker *shrinker);
 void shrinker_free(struct shrinker *shrinker);
 
+static inline bool shrinker_try_get(struct shrinker *shrinker)

[Cluster-devel] [PATCH v4 44/48] mm: shrinker: add a secondary array for shrinker_info::{map, nr_deferred}

2023-08-07 Thread Qi Zheng
Currently, we maintain two linear arrays per node per memcg, which are
shrinker_info::map and shrinker_info::nr_deferred. And we need to resize
them when the shrinker_nr_max is exceeded, that is, allocate a new array,
and then copy the old array to the new array, and finally free the old
array by RCU.

For shrinker_info::map, we do set_bit() under the RCU lock, so we may set
the value into the old map which is about to be freed. This may cause the
value set to be lost. The current solution is not to copy the old map when
resizing, but to set all the corresponding bits in the new map to 1. This
solves the data loss problem, but bring the overhead of more pointless
loops while doing memcg slab shrink.

For shrinker_info::nr_deferred, we will only modify it under the read lock
of shrinker_rwsem, so it will not run concurrently with the resizing. But
after we make memcg slab shrink lockless, there will be the same data loss
problem as shrinker_info::map, and we can't work around it like the map.

For such resizable arrays, the most straightforward idea is to change it
to xarray, like we did for list_lru [1]. We need to do xa_store() in the
list_lru_add()-->set_shrinker_bit(), but this will cause memory
allocation, and the list_lru_add() doesn't accept failure. A possible
solution is to pre-allocate, but the location of pre-allocation is not
well determined.

Therefore, this commit chooses to introduce a secondary array for
shrinker_info::{map, nr_deferred}, so that we only need to copy this
secondary array every time the size is resized. Then even if we get the
old secondary array under the RCU lock, the found map and nr_deferred are
also true, so no data is lost.

[1]. 
https://lore.kernel.org/all/20220228122126.37293-13-songmuc...@bytedance.com/

Signed-off-by: Qi Zheng 
Reviewed-by: Muchun Song 
---
 include/linux/memcontrol.h |  12 +-
 include/linux/shrinker.h   |  17 +++
 mm/shrinker.c  | 250 +++--
 3 files changed, 172 insertions(+), 107 deletions(-)

diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 11810a2cfd2d..b49515bb6fbd 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -21,6 +21,7 @@
 #include 
 #include 
 #include 
+#include 
 
 struct mem_cgroup;
 struct obj_cgroup;
@@ -88,17 +89,6 @@ struct mem_cgroup_reclaim_iter {
unsigned int generation;
 };
 
-/*
- * Bitmap and deferred work of shrinker::id corresponding to memcg-aware
- * shrinkers, which have elements charged to this memcg.
- */
-struct shrinker_info {
-   struct rcu_head rcu;
-   atomic_long_t *nr_deferred;
-   unsigned long *map;
-   int map_nr_max;
-};
-
 struct lruvec_stats_percpu {
/* Local (CPU and cgroup) state */
long state[NR_VM_NODE_STAT_ITEMS];
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index 025c8070dd86..eb342994675a 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -5,6 +5,23 @@
 #include 
 #include 
 
+#define SHRINKER_UNIT_BITS BITS_PER_LONG
+
+/*
+ * Bitmap and deferred work of shrinker::id corresponding to memcg-aware
+ * shrinkers, which have elements charged to the memcg.
+ */
+struct shrinker_info_unit {
+   atomic_long_t nr_deferred[SHRINKER_UNIT_BITS];
+   DECLARE_BITMAP(map, SHRINKER_UNIT_BITS);
+};
+
+struct shrinker_info {
+   struct rcu_head rcu;
+   int map_nr_max;
+   struct shrinker_info_unit *unit[];
+};
+
 /*
  * This struct is used to pass information from page reclaim to the shrinkers.
  * We consolidate the values for easier extension later.
diff --git a/mm/shrinker.c b/mm/shrinker.c
index a27779ed3798..1911c06b8af5 100644
--- a/mm/shrinker.c
+++ b/mm/shrinker.c
@@ -12,15 +12,50 @@ DECLARE_RWSEM(shrinker_rwsem);
 #ifdef CONFIG_MEMCG
 static int shrinker_nr_max;
 
-/* The shrinker_info is expanded in a batch of BITS_PER_LONG */
-static inline int shrinker_map_size(int nr_items)
+static inline int shrinker_unit_size(int nr_items)
 {
-   return (DIV_ROUND_UP(nr_items, BITS_PER_LONG) * sizeof(unsigned long));
+   return (DIV_ROUND_UP(nr_items, SHRINKER_UNIT_BITS) * sizeof(struct 
shrinker_info_unit *));
 }
 
-static inline int shrinker_defer_size(int nr_items)
+static inline void shrinker_unit_free(struct shrinker_info *info, int start)
 {
-   return (round_up(nr_items, BITS_PER_LONG) * sizeof(atomic_long_t));
+   struct shrinker_info_unit **unit;
+   int nr, i;
+
+   if (!info)
+   return;
+
+   unit = info->unit;
+   nr = DIV_ROUND_UP(info->map_nr_max, SHRINKER_UNIT_BITS);
+
+   for (i = start; i < nr; i++) {
+   if (!unit[i])
+   break;
+
+   kvfree(unit[i]);
+   unit[i] = NULL;
+   }
+}
+
+static inline int shrinker_unit_alloc(struct shrinker_info *new,
+  struct shrinker_info *old, int nid)
+{
+   struct shrinker_info_unit *unit;
+   int nr = 

[Cluster-devel] [PATCH v4 43/48] drm/ttm: introduce pool_shrink_rwsem

2023-08-07 Thread Qi Zheng
Currently, the synchronize_shrinkers() is only used by TTM pool. It only
requires that no shrinkers run in parallel.

After we use RCU+refcount method to implement the lockless slab shrink,
we can not use shrinker_rwsem or synchronize_rcu() to guarantee that all
shrinker invocations have seen an update before freeing memory.

So we introduce a new pool_shrink_rwsem to implement a private
synchronize_shrinkers(), so as to achieve the same purpose.

Signed-off-by: Qi Zheng 
Reviewed-by: Muchun Song 
---
 drivers/gpu/drm/ttm/ttm_pool.c | 15 +++
 include/linux/shrinker.h   |  2 --
 mm/shrinker.c  | 15 ---
 3 files changed, 15 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index c9c9618c0dce..38b4c280725c 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -74,6 +74,7 @@ static struct ttm_pool_type global_dma32_uncached[MAX_ORDER + 
1];
 static spinlock_t shrinker_lock;
 static struct list_head shrinker_list;
 static struct shrinker *mm_shrinker;
+static DECLARE_RWSEM(pool_shrink_rwsem);
 
 /* Allocate pages of size 1 << order with the given gfp_flags */
 static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
@@ -317,6 +318,7 @@ static unsigned int ttm_pool_shrink(void)
unsigned int num_pages;
struct page *p;
 
+   down_read(_shrink_rwsem);
spin_lock(_lock);
pt = list_first_entry(_list, typeof(*pt), shrinker_list);
list_move_tail(>shrinker_list, _list);
@@ -329,6 +331,7 @@ static unsigned int ttm_pool_shrink(void)
} else {
num_pages = 0;
}
+   up_read(_shrink_rwsem);
 
return num_pages;
 }
@@ -572,6 +575,18 @@ void ttm_pool_init(struct ttm_pool *pool, struct device 
*dev,
 }
 EXPORT_SYMBOL(ttm_pool_init);
 
+/**
+ * synchronize_shrinkers - Wait for all running shrinkers to complete.
+ *
+ * This is useful to guarantee that all shrinker invocations have seen an
+ * update, before freeing memory, similar to rcu.
+ */
+static void synchronize_shrinkers(void)
+{
+   down_write(_shrink_rwsem);
+   up_write(_shrink_rwsem);
+}
+
 /**
  * ttm_pool_fini - Cleanup a pool
  *
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index c55c07c3f0cb..025c8070dd86 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -103,8 +103,6 @@ struct shrinker *shrinker_alloc(unsigned int flags, const 
char *fmt, ...);
 void shrinker_register(struct shrinker *shrinker);
 void shrinker_free(struct shrinker *shrinker);
 
-extern void synchronize_shrinkers(void);
-
 #ifdef CONFIG_SHRINKER_DEBUG
 extern int __printf(2, 3) shrinker_debugfs_rename(struct shrinker *shrinker,
  const char *fmt, ...);
diff --git a/mm/shrinker.c b/mm/shrinker.c
index 3ab301ff122d..a27779ed3798 100644
--- a/mm/shrinker.c
+++ b/mm/shrinker.c
@@ -650,18 +650,3 @@ void shrinker_free(struct shrinker *shrinker)
kfree(shrinker);
 }
 EXPORT_SYMBOL_GPL(shrinker_free);
-
-/**
- * synchronize_shrinkers - Wait for all running shrinkers to complete.
- *
- * This is equivalent to calling unregister_shrink() and register_shrinker(),
- * but atomically and with less overhead. This is useful to guarantee that all
- * shrinker invocations have seen an update, before freeing memory, similar to
- * rcu.
- */
-void synchronize_shrinkers(void)
-{
-   down_write(_rwsem);
-   up_write(_rwsem);
-}
-EXPORT_SYMBOL(synchronize_shrinkers);
-- 
2.30.2



[Cluster-devel] [PATCH v4 42/48] mm: shrinker: remove old APIs

2023-08-07 Thread Qi Zheng
Now no users are using the old APIs, just remove them.

Signed-off-by: Qi Zheng 
Reviewed-by: Muchun Song 
---
 include/linux/shrinker.h |   7 --
 mm/shrinker.c| 143 ---
 2 files changed, 150 deletions(-)

diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index cc23ff0aee20..c55c07c3f0cb 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -103,13 +103,6 @@ struct shrinker *shrinker_alloc(unsigned int flags, const 
char *fmt, ...);
 void shrinker_register(struct shrinker *shrinker);
 void shrinker_free(struct shrinker *shrinker);
 
-extern int __printf(2, 3) prealloc_shrinker(struct shrinker *shrinker,
-   const char *fmt, ...);
-extern void register_shrinker_prepared(struct shrinker *shrinker);
-extern int __printf(2, 3) register_shrinker(struct shrinker *shrinker,
-   const char *fmt, ...);
-extern void unregister_shrinker(struct shrinker *shrinker);
-extern void free_prealloced_shrinker(struct shrinker *shrinker);
 extern void synchronize_shrinkers(void);
 
 #ifdef CONFIG_SHRINKER_DEBUG
diff --git a/mm/shrinker.c b/mm/shrinker.c
index 43a375f954f3..3ab301ff122d 100644
--- a/mm/shrinker.c
+++ b/mm/shrinker.c
@@ -651,149 +651,6 @@ void shrinker_free(struct shrinker *shrinker)
 }
 EXPORT_SYMBOL_GPL(shrinker_free);
 
-/*
- * Add a shrinker callback to be called from the vm.
- */
-static int __prealloc_shrinker(struct shrinker *shrinker)
-{
-   unsigned int size;
-   int err;
-
-   if (shrinker->flags & SHRINKER_MEMCG_AWARE) {
-   err = prealloc_memcg_shrinker(shrinker);
-   if (err != -ENOSYS)
-   return err;
-
-   shrinker->flags &= ~SHRINKER_MEMCG_AWARE;
-   }
-
-   size = sizeof(*shrinker->nr_deferred);
-   if (shrinker->flags & SHRINKER_NUMA_AWARE)
-   size *= nr_node_ids;
-
-   shrinker->nr_deferred = kzalloc(size, GFP_KERNEL);
-   if (!shrinker->nr_deferred)
-   return -ENOMEM;
-
-   return 0;
-}
-
-#ifdef CONFIG_SHRINKER_DEBUG
-int prealloc_shrinker(struct shrinker *shrinker, const char *fmt, ...)
-{
-   va_list ap;
-   int err;
-
-   va_start(ap, fmt);
-   shrinker->name = kvasprintf_const(GFP_KERNEL, fmt, ap);
-   va_end(ap);
-   if (!shrinker->name)
-   return -ENOMEM;
-
-   err = __prealloc_shrinker(shrinker);
-   if (err) {
-   kfree_const(shrinker->name);
-   shrinker->name = NULL;
-   }
-
-   return err;
-}
-#else
-int prealloc_shrinker(struct shrinker *shrinker, const char *fmt, ...)
-{
-   return __prealloc_shrinker(shrinker);
-}
-#endif
-
-void free_prealloced_shrinker(struct shrinker *shrinker)
-{
-#ifdef CONFIG_SHRINKER_DEBUG
-   kfree_const(shrinker->name);
-   shrinker->name = NULL;
-#endif
-   if (shrinker->flags & SHRINKER_MEMCG_AWARE) {
-   down_write(_rwsem);
-   unregister_memcg_shrinker(shrinker);
-   up_write(_rwsem);
-   return;
-   }
-
-   kfree(shrinker->nr_deferred);
-   shrinker->nr_deferred = NULL;
-}
-
-void register_shrinker_prepared(struct shrinker *shrinker)
-{
-   down_write(_rwsem);
-   list_add_tail(>list, _list);
-   shrinker->flags |= SHRINKER_REGISTERED;
-   shrinker_debugfs_add(shrinker);
-   up_write(_rwsem);
-}
-
-static int __register_shrinker(struct shrinker *shrinker)
-{
-   int err = __prealloc_shrinker(shrinker);
-
-   if (err)
-   return err;
-   register_shrinker_prepared(shrinker);
-   return 0;
-}
-
-#ifdef CONFIG_SHRINKER_DEBUG
-int register_shrinker(struct shrinker *shrinker, const char *fmt, ...)
-{
-   va_list ap;
-   int err;
-
-   va_start(ap, fmt);
-   shrinker->name = kvasprintf_const(GFP_KERNEL, fmt, ap);
-   va_end(ap);
-   if (!shrinker->name)
-   return -ENOMEM;
-
-   err = __register_shrinker(shrinker);
-   if (err) {
-   kfree_const(shrinker->name);
-   shrinker->name = NULL;
-   }
-   return err;
-}
-#else
-int register_shrinker(struct shrinker *shrinker, const char *fmt, ...)
-{
-   return __register_shrinker(shrinker);
-}
-#endif
-EXPORT_SYMBOL(register_shrinker);
-
-/*
- * Remove one
- */
-void unregister_shrinker(struct shrinker *shrinker)
-{
-   struct dentry *debugfs_entry;
-   int debugfs_id;
-
-   if (!(shrinker->flags & SHRINKER_REGISTERED))
-   return;
-
-   down_write(_rwsem);
-   list_del(>list);
-   shrinker->flags &= ~SHRINKER_REGISTERED;
-   if (shrinker->flags & SHRINKER_MEMCG_AWARE)
-   unregister_memcg_shrinker(shrinker);
-   debugfs_entry = shrinker_debugfs_detach(shrinker, _id);
-   up_write(_rwsem);
-
-   shrinker_debugfs_remove(debugfs_entry, debugfs_id);
-
-   kfree(shrinker->nr_deferred);
-   

[Cluster-devel] [PATCH v4 40/48] zsmalloc: dynamically allocate the mm-zspool shrinker

2023-08-07 Thread Qi Zheng
In preparation for implementing lockless slab shrink, use new APIs to
dynamically allocate the mm-zspool shrinker, so that it can be freed
asynchronously using kfree_rcu(). Then it doesn't need to wait for RCU
read-side critical section when releasing the struct zs_pool.

Signed-off-by: Qi Zheng 
Reviewed-by: Muchun Song 
---
 mm/zsmalloc.c | 28 
 1 file changed, 16 insertions(+), 12 deletions(-)

diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index b58f957429f0..1909234bb345 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -229,7 +229,7 @@ struct zs_pool {
struct zs_pool_stats stats;
 
/* Compact classes */
-   struct shrinker shrinker;
+   struct shrinker *shrinker;
 
 #ifdef CONFIG_ZSMALLOC_STAT
struct dentry *stat_dentry;
@@ -2086,8 +2086,7 @@ static unsigned long zs_shrinker_scan(struct shrinker 
*shrinker,
struct shrink_control *sc)
 {
unsigned long pages_freed;
-   struct zs_pool *pool = container_of(shrinker, struct zs_pool,
-   shrinker);
+   struct zs_pool *pool = shrinker->private_data;
 
/*
 * Compact classes and calculate compaction delta.
@@ -2105,8 +2104,7 @@ static unsigned long zs_shrinker_count(struct shrinker 
*shrinker,
int i;
struct size_class *class;
unsigned long pages_to_free = 0;
-   struct zs_pool *pool = container_of(shrinker, struct zs_pool,
-   shrinker);
+   struct zs_pool *pool = shrinker->private_data;
 
for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) {
class = pool->size_class[i];
@@ -2121,18 +2119,24 @@ static unsigned long zs_shrinker_count(struct shrinker 
*shrinker,
 
 static void zs_unregister_shrinker(struct zs_pool *pool)
 {
-   unregister_shrinker(>shrinker);
+   shrinker_free(pool->shrinker);
 }
 
 static int zs_register_shrinker(struct zs_pool *pool)
 {
-   pool->shrinker.scan_objects = zs_shrinker_scan;
-   pool->shrinker.count_objects = zs_shrinker_count;
-   pool->shrinker.batch = 0;
-   pool->shrinker.seeks = DEFAULT_SEEKS;
+   pool->shrinker = shrinker_alloc(0, "mm-zspool:%s", pool->name);
+   if (!pool->shrinker)
+   return -ENOMEM;
+
+   pool->shrinker->scan_objects = zs_shrinker_scan;
+   pool->shrinker->count_objects = zs_shrinker_count;
+   pool->shrinker->batch = 0;
+   pool->shrinker->seeks = DEFAULT_SEEKS;
+   pool->shrinker->private_data = pool;
 
-   return register_shrinker(>shrinker, "mm-zspool:%s",
-pool->name);
+   shrinker_register(pool->shrinker);
+
+   return 0;
 }
 
 static int calculate_zspage_chain_size(int class_size)
-- 
2.30.2



[Cluster-devel] [PATCH v4 39/48] xfs: dynamically allocate the xfs-qm shrinker

2023-08-07 Thread Qi Zheng
In preparation for implementing lockless slab shrink, use new APIs to
dynamically allocate the xfs-qm shrinker, so that it can be freed
asynchronously using kfree_rcu(). Then it doesn't need to wait for RCU
read-side critical section when releasing the struct xfs_quotainfo.

Signed-off-by: Qi Zheng 
Reviewed-by: Muchun Song 
---
 fs/xfs/xfs_qm.c | 28 +++-
 fs/xfs/xfs_qm.h |  2 +-
 2 files changed, 16 insertions(+), 14 deletions(-)

diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 6abcc34fafd8..15ba23f11271 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -504,8 +504,7 @@ xfs_qm_shrink_scan(
struct shrinker *shrink,
struct shrink_control   *sc)
 {
-   struct xfs_quotainfo*qi = container_of(shrink,
-   struct xfs_quotainfo, qi_shrinker);
+   struct xfs_quotainfo*qi = shrink->private_data;
struct xfs_qm_isolate   isol;
unsigned long   freed;
int error;
@@ -539,8 +538,7 @@ xfs_qm_shrink_count(
struct shrinker *shrink,
struct shrink_control   *sc)
 {
-   struct xfs_quotainfo*qi = container_of(shrink,
-   struct xfs_quotainfo, qi_shrinker);
+   struct xfs_quotainfo*qi = shrink->private_data;
 
return list_lru_shrink_count(>qi_lru, sc);
 }
@@ -680,15 +678,19 @@ xfs_qm_init_quotainfo(
if (XFS_IS_PQUOTA_ON(mp))
xfs_qm_set_defquota(mp, XFS_DQTYPE_PROJ, qinf);
 
-   qinf->qi_shrinker.count_objects = xfs_qm_shrink_count;
-   qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
-   qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
-   qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
-
-   error = register_shrinker(>qi_shrinker, "xfs-qm:%s",
- mp->m_super->s_id);
-   if (error)
+   qinf->qi_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE, "xfs-qm:%s",
+  mp->m_super->s_id);
+   if (!qinf->qi_shrinker) {
+   error = -ENOMEM;
goto out_free_inos;
+   }
+
+   qinf->qi_shrinker->count_objects = xfs_qm_shrink_count;
+   qinf->qi_shrinker->scan_objects = xfs_qm_shrink_scan;
+   qinf->qi_shrinker->seeks = DEFAULT_SEEKS;
+   qinf->qi_shrinker->private_data = qinf;
+
+   shrinker_register(qinf->qi_shrinker);
 
return 0;
 
@@ -718,7 +720,7 @@ xfs_qm_destroy_quotainfo(
qi = mp->m_quotainfo;
ASSERT(qi != NULL);
 
-   unregister_shrinker(>qi_shrinker);
+   shrinker_free(qi->qi_shrinker);
list_lru_destroy(>qi_lru);
xfs_qm_destroy_quotainos(qi);
mutex_destroy(>qi_tree_lock);
diff --git a/fs/xfs/xfs_qm.h b/fs/xfs/xfs_qm.h
index 9683f0457d19..d5c9fc4ba591 100644
--- a/fs/xfs/xfs_qm.h
+++ b/fs/xfs/xfs_qm.h
@@ -63,7 +63,7 @@ struct xfs_quotainfo {
struct xfs_def_quotaqi_usr_default;
struct xfs_def_quotaqi_grp_default;
struct xfs_def_quotaqi_prj_default;
-   struct shrinker qi_shrinker;
+   struct shrinker *qi_shrinker;
 
/* Minimum and maximum quota expiration timestamp values. */
time64_tqi_expiry_min;
-- 
2.30.2



[Cluster-devel] [PATCH v4 38/48] xfs: dynamically allocate the xfs-inodegc shrinker

2023-08-07 Thread Qi Zheng
In preparation for implementing lockless slab shrink, use new APIs to
dynamically allocate the xfs-inodegc shrinker, so that it can be freed
asynchronously using kfree_rcu(). Then it doesn't need to wait for RCU
read-side critical section when releasing the struct xfs_mount.

Signed-off-by: Qi Zheng 
Reviewed-by: Muchun Song 
---
 fs/xfs/xfs_icache.c | 26 +++---
 fs/xfs/xfs_mount.c  |  4 ++--
 fs/xfs/xfs_mount.h  |  2 +-
 3 files changed, 18 insertions(+), 14 deletions(-)

diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 453890942d9f..751c380afd5a 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -2225,8 +2225,7 @@ xfs_inodegc_shrinker_count(
struct shrinker *shrink,
struct shrink_control   *sc)
 {
-   struct xfs_mount*mp = container_of(shrink, struct xfs_mount,
-  m_inodegc_shrinker);
+   struct xfs_mount*mp = shrink->private_data;
struct xfs_inodegc  *gc;
int cpu;
 
@@ -2247,8 +2246,7 @@ xfs_inodegc_shrinker_scan(
struct shrinker *shrink,
struct shrink_control   *sc)
 {
-   struct xfs_mount*mp = container_of(shrink, struct xfs_mount,
-  m_inodegc_shrinker);
+   struct xfs_mount*mp = shrink->private_data;
struct xfs_inodegc  *gc;
int cpu;
boolno_items = true;
@@ -2284,13 +2282,19 @@ int
 xfs_inodegc_register_shrinker(
struct xfs_mount*mp)
 {
-   struct shrinker *shrink = >m_inodegc_shrinker;
+   mp->m_inodegc_shrinker = shrinker_alloc(SHRINKER_NONSLAB,
+   "xfs-inodegc:%s",
+   mp->m_super->s_id);
+   if (!mp->m_inodegc_shrinker)
+   return -ENOMEM;
+
+   mp->m_inodegc_shrinker->count_objects = xfs_inodegc_shrinker_count;
+   mp->m_inodegc_shrinker->scan_objects = xfs_inodegc_shrinker_scan;
+   mp->m_inodegc_shrinker->seeks = 0;
+   mp->m_inodegc_shrinker->batch = XFS_INODEGC_SHRINKER_BATCH;
+   mp->m_inodegc_shrinker->private_data = mp;
 
-   shrink->count_objects = xfs_inodegc_shrinker_count;
-   shrink->scan_objects = xfs_inodegc_shrinker_scan;
-   shrink->seeks = 0;
-   shrink->flags = SHRINKER_NONSLAB;
-   shrink->batch = XFS_INODEGC_SHRINKER_BATCH;
+   shrinker_register(mp->m_inodegc_shrinker);
 
-   return register_shrinker(shrink, "xfs-inodegc:%s", mp->m_super->s_id);
+   return 0;
 }
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index fb87ffb48f7f..640d09891a4e 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -1018,7 +1018,7 @@ xfs_mountfs(
  out_log_dealloc:
xfs_log_mount_cancel(mp);
  out_inodegc_shrinker:
-   unregister_shrinker(>m_inodegc_shrinker);
+   shrinker_free(mp->m_inodegc_shrinker);
  out_fail_wait:
if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp)
xfs_buftarg_drain(mp->m_logdev_targp);
@@ -1100,7 +1100,7 @@ xfs_unmountfs(
 #if defined(DEBUG)
xfs_errortag_clearall(mp);
 #endif
-   unregister_shrinker(>m_inodegc_shrinker);
+   shrinker_free(mp->m_inodegc_shrinker);
xfs_free_perag(mp);
 
xfs_errortag_del(mp);
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index e2866e7fa60c..562c294ca08e 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -217,7 +217,7 @@ typedef struct xfs_mount {
atomic_tm_agirotor; /* last ag dir inode alloced */
 
/* Memory shrinker to throttle and reprioritize inodegc */
-   struct shrinker m_inodegc_shrinker;
+   struct shrinker *m_inodegc_shrinker;
/*
 * Workqueue item so that we can coalesce multiple inode flush attempts
 * into a single flush.
-- 
2.30.2



[Cluster-devel] [PATCH v4 37/48] xfs: dynamically allocate the xfs-buf shrinker

2023-08-07 Thread Qi Zheng
In preparation for implementing lockless slab shrink, use new APIs to
dynamically allocate the xfs-buf shrinker, so that it can be freed
asynchronously using kfree_rcu(). Then it doesn't need to wait for RCU
read-side critical section when releasing the struct xfs_buftarg.

Signed-off-by: Qi Zheng 
Reviewed-by: Muchun Song 
---
 fs/xfs/xfs_buf.c | 25 ++---
 fs/xfs/xfs_buf.h |  2 +-
 2 files changed, 15 insertions(+), 12 deletions(-)

diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 15d1e5a7c2d3..715730fc91cb 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -1906,8 +1906,7 @@ xfs_buftarg_shrink_scan(
struct shrinker *shrink,
struct shrink_control   *sc)
 {
-   struct xfs_buftarg  *btp = container_of(shrink,
-   struct xfs_buftarg, bt_shrinker);
+   struct xfs_buftarg  *btp = shrink->private_data;
LIST_HEAD(dispose);
unsigned long   freed;
 
@@ -1929,8 +1928,7 @@ xfs_buftarg_shrink_count(
struct shrinker *shrink,
struct shrink_control   *sc)
 {
-   struct xfs_buftarg  *btp = container_of(shrink,
-   struct xfs_buftarg, bt_shrinker);
+   struct xfs_buftarg  *btp = shrink->private_data;
return list_lru_shrink_count(>bt_lru, sc);
 }
 
@@ -1938,7 +1936,7 @@ void
 xfs_free_buftarg(
struct xfs_buftarg  *btp)
 {
-   unregister_shrinker(>bt_shrinker);
+   shrinker_free(btp->bt_shrinker);
ASSERT(percpu_counter_sum(>bt_io_count) == 0);
percpu_counter_destroy(>bt_io_count);
list_lru_destroy(>bt_lru);
@@ -2021,13 +2019,18 @@ xfs_alloc_buftarg(
if (percpu_counter_init(>bt_io_count, 0, GFP_KERNEL))
goto error_lru;
 
-   btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count;
-   btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan;
-   btp->bt_shrinker.seeks = DEFAULT_SEEKS;
-   btp->bt_shrinker.flags = SHRINKER_NUMA_AWARE;
-   if (register_shrinker(>bt_shrinker, "xfs-buf:%s",
- mp->m_super->s_id))
+   btp->bt_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE, "xfs-buf:%s",
+ mp->m_super->s_id);
+   if (!btp->bt_shrinker)
goto error_pcpu;
+
+   btp->bt_shrinker->count_objects = xfs_buftarg_shrink_count;
+   btp->bt_shrinker->scan_objects = xfs_buftarg_shrink_scan;
+   btp->bt_shrinker->seeks = DEFAULT_SEEKS;
+   btp->bt_shrinker->private_data = btp;
+
+   shrinker_register(btp->bt_shrinker);
+
return btp;
 
 error_pcpu:
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index 549c60942208..4e6969a675f7 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -102,7 +102,7 @@ typedef struct xfs_buftarg {
size_t  bt_logical_sectormask;
 
/* LRU control structures */
-   struct shrinker bt_shrinker;
+   struct shrinker *bt_shrinker;
struct list_lru bt_lru;
 
struct percpu_counter   bt_io_count;
-- 
2.30.2



[Cluster-devel] [PATCH v4 36/48] nfsd: dynamically allocate the nfsd-reply shrinker

2023-08-07 Thread Qi Zheng
In preparation for implementing lockless slab shrink, use new APIs to
dynamically allocate the nfsd-reply shrinker, so that it can be freed
asynchronously using kfree_rcu(). Then it doesn't need to wait for RCU
read-side critical section when releasing the struct nfsd_net.

Signed-off-by: Qi Zheng 
Acked-by: Chuck Lever 
Acked-by: Jeff Layton 
---
 fs/nfsd/netns.h|  2 +-
 fs/nfsd/nfscache.c | 31 ---
 2 files changed, 17 insertions(+), 16 deletions(-)

diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
index f669444d5336..ab303a8b77d5 100644
--- a/fs/nfsd/netns.h
+++ b/fs/nfsd/netns.h
@@ -177,7 +177,7 @@ struct nfsd_net {
/* size of cache when we saw the longest hash chain */
unsigned int longest_chain_cachesize;
 
-   struct shrinker nfsd_reply_cache_shrinker;
+   struct shrinker *nfsd_reply_cache_shrinker;
 
/* tracking server-to-server copy mounts */
spinlock_t  nfsd_ssc_lock;
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index 80621a709510..fd56a52aa5fb 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -201,26 +201,29 @@ int nfsd_reply_cache_init(struct nfsd_net *nn)
 {
unsigned int hashsize;
unsigned int i;
-   int status = 0;
 
nn->max_drc_entries = nfsd_cache_size_limit();
atomic_set(>num_drc_entries, 0);
hashsize = nfsd_hashsize(nn->max_drc_entries);
nn->maskbits = ilog2(hashsize);
 
-   nn->nfsd_reply_cache_shrinker.scan_objects = nfsd_reply_cache_scan;
-   nn->nfsd_reply_cache_shrinker.count_objects = nfsd_reply_cache_count;
-   nn->nfsd_reply_cache_shrinker.seeks = 1;
-   status = register_shrinker(>nfsd_reply_cache_shrinker,
-  "nfsd-reply:%s", nn->nfsd_name);
-   if (status)
-   return status;
-
nn->drc_hashtbl = kvzalloc(array_size(hashsize,
sizeof(*nn->drc_hashtbl)), GFP_KERNEL);
if (!nn->drc_hashtbl)
+   return -ENOMEM;
+
+   nn->nfsd_reply_cache_shrinker = shrinker_alloc(0, "nfsd-reply:%s",
+  nn->nfsd_name);
+   if (!nn->nfsd_reply_cache_shrinker)
goto out_shrinker;
 
+   nn->nfsd_reply_cache_shrinker->scan_objects = nfsd_reply_cache_scan;
+   nn->nfsd_reply_cache_shrinker->count_objects = nfsd_reply_cache_count;
+   nn->nfsd_reply_cache_shrinker->seeks = 1;
+   nn->nfsd_reply_cache_shrinker->private_data = nn;
+
+   shrinker_register(nn->nfsd_reply_cache_shrinker);
+
for (i = 0; i < hashsize; i++) {
INIT_LIST_HEAD(>drc_hashtbl[i].lru_head);
spin_lock_init(>drc_hashtbl[i].cache_lock);
@@ -229,7 +232,7 @@ int nfsd_reply_cache_init(struct nfsd_net *nn)
 
return 0;
 out_shrinker:
-   unregister_shrinker(>nfsd_reply_cache_shrinker);
+   kvfree(nn->drc_hashtbl);
printk(KERN_ERR "nfsd: failed to allocate reply cache\n");
return -ENOMEM;
 }
@@ -239,7 +242,7 @@ void nfsd_reply_cache_shutdown(struct nfsd_net *nn)
struct nfsd_cacherep *rp;
unsigned int i;
 
-   unregister_shrinker(>nfsd_reply_cache_shrinker);
+   shrinker_free(nn->nfsd_reply_cache_shrinker);
 
for (i = 0; i < nn->drc_hashsize; i++) {
struct list_head *head = >drc_hashtbl[i].lru_head;
@@ -323,8 +326,7 @@ nfsd_prune_bucket_locked(struct nfsd_net *nn, struct 
nfsd_drc_bucket *b,
 static unsigned long
 nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc)
 {
-   struct nfsd_net *nn = container_of(shrink,
-   struct nfsd_net, nfsd_reply_cache_shrinker);
+   struct nfsd_net *nn = shrink->private_data;
 
return atomic_read(>num_drc_entries);
 }
@@ -343,8 +345,7 @@ nfsd_reply_cache_count(struct shrinker *shrink, struct 
shrink_control *sc)
 static unsigned long
 nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
 {
-   struct nfsd_net *nn = container_of(shrink,
-   struct nfsd_net, nfsd_reply_cache_shrinker);
+   struct nfsd_net *nn = shrink->private_data;
unsigned long freed = 0;
LIST_HEAD(dispose);
unsigned int i;
-- 
2.30.2



[Cluster-devel] [PATCH v4 35/48] nfsd: dynamically allocate the nfsd-client shrinker

2023-08-07 Thread Qi Zheng
In preparation for implementing lockless slab shrink, use new APIs to
dynamically allocate the nfsd-client shrinker, so that it can be freed
asynchronously using kfree_rcu(). Then it doesn't need to wait for RCU
read-side critical section when releasing the struct nfsd_net.

Signed-off-by: Qi Zheng 
Acked-by: Chuck Lever 
Acked-by: Jeff Layton 
---
 fs/nfsd/netns.h |  2 +-
 fs/nfsd/nfs4state.c | 20 
 2 files changed, 13 insertions(+), 9 deletions(-)

diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
index ec49b200b797..f669444d5336 100644
--- a/fs/nfsd/netns.h
+++ b/fs/nfsd/netns.h
@@ -195,7 +195,7 @@ struct nfsd_net {
int nfs4_max_clients;
 
atomic_tnfsd_courtesy_clients;
-   struct shrinker nfsd_client_shrinker;
+   struct shrinker *nfsd_client_shrinker;
struct work_struct  nfsd_shrinker_work;
 };
 
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 8534693eb6a4..9e401db8a6d2 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -4400,8 +4400,7 @@ static unsigned long
 nfsd4_state_shrinker_count(struct shrinker *shrink, struct shrink_control *sc)
 {
int count;
-   struct nfsd_net *nn = container_of(shrink,
-   struct nfsd_net, nfsd_client_shrinker);
+   struct nfsd_net *nn = shrink->private_data;
 
count = atomic_read(>nfsd_courtesy_clients);
if (!count)
@@ -8149,12 +8148,17 @@ static int nfs4_state_create_net(struct net *net)
INIT_WORK(>nfsd_shrinker_work, nfsd4_state_shrinker_worker);
get_net(net);
 
-   nn->nfsd_client_shrinker.scan_objects = nfsd4_state_shrinker_scan;
-   nn->nfsd_client_shrinker.count_objects = nfsd4_state_shrinker_count;
-   nn->nfsd_client_shrinker.seeks = DEFAULT_SEEKS;
-
-   if (register_shrinker(>nfsd_client_shrinker, "nfsd-client"))
+   nn->nfsd_client_shrinker = shrinker_alloc(0, "nfsd-client");
+   if (!nn->nfsd_client_shrinker)
goto err_shrinker;
+
+   nn->nfsd_client_shrinker->scan_objects = nfsd4_state_shrinker_scan;
+   nn->nfsd_client_shrinker->count_objects = nfsd4_state_shrinker_count;
+   nn->nfsd_client_shrinker->seeks = DEFAULT_SEEKS;
+   nn->nfsd_client_shrinker->private_data = nn;
+
+   shrinker_register(nn->nfsd_client_shrinker);
+
return 0;
 
 err_shrinker:
@@ -8252,7 +8256,7 @@ nfs4_state_shutdown_net(struct net *net)
struct list_head *pos, *next, reaplist;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
 
-   unregister_shrinker(>nfsd_client_shrinker);
+   shrinker_free(nn->nfsd_client_shrinker);
cancel_work(>nfsd_shrinker_work);
cancel_delayed_work_sync(>laundromat_work);
locks_end_grace(>nfsd4_manager);
-- 
2.30.2



[Cluster-devel] [PATCH v4 34/48] jbd2, ext4: dynamically allocate the jbd2-journal shrinker

2023-08-07 Thread Qi Zheng
In preparation for implementing lockless slab shrink, use new APIs to
dynamically allocate the jbd2-journal shrinker, so that it can be freed
asynchronously using kfree_rcu(). Then it doesn't need to wait for RCU
read-side critical section when releasing the struct journal_s.

Signed-off-by: Qi Zheng 
Reviewed-by: Muchun Song 
---
 fs/jbd2/journal.c| 27 +--
 include/linux/jbd2.h |  2 +-
 2 files changed, 18 insertions(+), 11 deletions(-)

diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 1b5a45ab62b0..4c421da03fee 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -1298,7 +1298,7 @@ static int jbd2_min_tag_size(void)
 static unsigned long jbd2_journal_shrink_scan(struct shrinker *shrink,
  struct shrink_control *sc)
 {
-   journal_t *journal = container_of(shrink, journal_t, j_shrinker);
+   journal_t *journal = shrink->private_data;
unsigned long nr_to_scan = sc->nr_to_scan;
unsigned long nr_shrunk;
unsigned long count;
@@ -1324,7 +1324,7 @@ static unsigned long jbd2_journal_shrink_scan(struct 
shrinker *shrink,
 static unsigned long jbd2_journal_shrink_count(struct shrinker *shrink,
   struct shrink_control *sc)
 {
-   journal_t *journal = container_of(shrink, journal_t, j_shrinker);
+   journal_t *journal = shrink->private_data;
unsigned long count;
 
count = percpu_counter_read_positive(>j_checkpoint_jh_count);
@@ -1412,19 +1412,26 @@ static journal_t *journal_init_common(struct 
block_device *bdev,
journal->j_superblock = (journal_superblock_t *)bh->b_data;
 
journal->j_shrink_transaction = NULL;
-   journal->j_shrinker.scan_objects = jbd2_journal_shrink_scan;
-   journal->j_shrinker.count_objects = jbd2_journal_shrink_count;
-   journal->j_shrinker.seeks = DEFAULT_SEEKS;
-   journal->j_shrinker.batch = journal->j_max_transaction_buffers;
 
if (percpu_counter_init(>j_checkpoint_jh_count, 0, GFP_KERNEL))
goto err_cleanup;
 
-   if (register_shrinker(>j_shrinker, "jbd2-journal:(%u:%u)",
- MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev))) {
+   journal->j_shrinker = shrinker_alloc(0, "jbd2-journal:(%u:%u)",
+MAJOR(bdev->bd_dev),
+MINOR(bdev->bd_dev));
+   if (!journal->j_shrinker) {
percpu_counter_destroy(>j_checkpoint_jh_count);
goto err_cleanup;
}
+
+   journal->j_shrinker->scan_objects = jbd2_journal_shrink_scan;
+   journal->j_shrinker->count_objects = jbd2_journal_shrink_count;
+   journal->j_shrinker->seeks = DEFAULT_SEEKS;
+   journal->j_shrinker->batch = journal->j_max_transaction_buffers;
+   journal->j_shrinker->private_data = journal;
+
+   shrinker_register(journal->j_shrinker);
+
return journal;
 
 err_cleanup:
@@ -2187,9 +2194,9 @@ int jbd2_journal_destroy(journal_t *journal)
brelse(journal->j_sb_buffer);
}
 
-   if (journal->j_shrinker.flags & SHRINKER_REGISTERED) {
+   if (journal->j_shrinker) {
percpu_counter_destroy(>j_checkpoint_jh_count);
-   unregister_shrinker(>j_shrinker);
+   shrinker_free(journal->j_shrinker);
}
if (journal->j_proc_entry)
jbd2_stats_proc_exit(journal);
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 44c298aa58d4..beb4c4586320 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -891,7 +891,7 @@ struct journal_s
 * Journal head shrinker, reclaim buffer's journal head which
 * has been written back.
 */
-   struct shrinker j_shrinker;
+   struct shrinker *j_shrinker;
 
/**
 * @j_checkpoint_jh_count:
-- 
2.30.2



[Cluster-devel] [PATCH v4 33/48] ext4: dynamically allocate the ext4-es shrinker

2023-08-07 Thread Qi Zheng
In preparation for implementing lockless slab shrink, use new APIs to
dynamically allocate the ext4-es shrinker, so that it can be freed
asynchronously using kfree_rcu(). Then it doesn't need to wait for RCU
read-side critical section when releasing the struct ext4_sb_info.

Signed-off-by: Qi Zheng 
Reviewed-by: Muchun Song 
---
 fs/ext4/ext4.h   |  2 +-
 fs/ext4/extents_status.c | 24 ++--
 2 files changed, 15 insertions(+), 11 deletions(-)

diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 481491e892df..48baf03eb1a6 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -1657,7 +1657,7 @@ struct ext4_sb_info {
__u32 s_csum_seed;
 
/* Reclaim extents from extent status tree */
-   struct shrinker s_es_shrinker;
+   struct shrinker *s_es_shrinker;
struct list_head s_es_list; /* List of inodes with reclaimable 
extents */
long s_es_nr_inode;
struct ext4_es_stats s_es_stats;
diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
index 9b5b8951afb4..0532a81a7669 100644
--- a/fs/ext4/extents_status.c
+++ b/fs/ext4/extents_status.c
@@ -1596,7 +1596,7 @@ static unsigned long ext4_es_count(struct shrinker 
*shrink,
unsigned long nr;
struct ext4_sb_info *sbi;
 
-   sbi = container_of(shrink, struct ext4_sb_info, s_es_shrinker);
+   sbi = shrink->private_data;
nr = percpu_counter_read_positive(>s_es_stats.es_stats_shk_cnt);
trace_ext4_es_shrink_count(sbi->s_sb, sc->nr_to_scan, nr);
return nr;
@@ -1605,8 +1605,7 @@ static unsigned long ext4_es_count(struct shrinker 
*shrink,
 static unsigned long ext4_es_scan(struct shrinker *shrink,
  struct shrink_control *sc)
 {
-   struct ext4_sb_info *sbi = container_of(shrink,
-   struct ext4_sb_info, s_es_shrinker);
+   struct ext4_sb_info *sbi = shrink->private_data;
int nr_to_scan = sc->nr_to_scan;
int ret, nr_shrunk;
 
@@ -1690,13 +1689,18 @@ int ext4_es_register_shrinker(struct ext4_sb_info *sbi)
if (err)
goto err3;
 
-   sbi->s_es_shrinker.scan_objects = ext4_es_scan;
-   sbi->s_es_shrinker.count_objects = ext4_es_count;
-   sbi->s_es_shrinker.seeks = DEFAULT_SEEKS;
-   err = register_shrinker(>s_es_shrinker, "ext4-es:%s",
-   sbi->s_sb->s_id);
-   if (err)
+   sbi->s_es_shrinker = shrinker_alloc(0, "ext4-es:%s", sbi->s_sb->s_id);
+   if (!sbi->s_es_shrinker) {
+   err = -ENOMEM;
goto err4;
+   }
+
+   sbi->s_es_shrinker->scan_objects = ext4_es_scan;
+   sbi->s_es_shrinker->count_objects = ext4_es_count;
+   sbi->s_es_shrinker->seeks = DEFAULT_SEEKS;
+   sbi->s_es_shrinker->private_data = sbi;
+
+   shrinker_register(sbi->s_es_shrinker);
 
return 0;
 err4:
@@ -1716,7 +1720,7 @@ void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi)
percpu_counter_destroy(>s_es_stats.es_stats_cache_misses);
percpu_counter_destroy(>s_es_stats.es_stats_all_cnt);
percpu_counter_destroy(>s_es_stats.es_stats_shk_cnt);
-   unregister_shrinker(>s_es_shrinker);
+   shrinker_free(sbi->s_es_shrinker);
 }
 
 /*
-- 
2.30.2



[Cluster-devel] [PATCH v4 32/48] mbcache: dynamically allocate the mbcache shrinker

2023-08-07 Thread Qi Zheng
In preparation for implementing lockless slab shrink, use new APIs to
dynamically allocate the mbcache shrinker, so that it can be freed
asynchronously using kfree_rcu(). Then it doesn't need to wait for RCU
read-side critical section when releasing the struct mb_cache.

Signed-off-by: Qi Zheng 
Reviewed-by: Muchun Song 
---
 fs/mbcache.c | 23 +--
 1 file changed, 13 insertions(+), 10 deletions(-)

diff --git a/fs/mbcache.c b/fs/mbcache.c
index 2a4b8b549e93..0d1e24e9a5e3 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -37,7 +37,7 @@ struct mb_cache {
struct list_headc_list;
/* Number of entries in cache */
unsigned long   c_entry_count;
-   struct shrinker c_shrink;
+   struct shrinker *c_shrink;
/* Work for shrinking when the cache has too many entries */
struct work_struct  c_shrink_work;
 };
@@ -293,8 +293,7 @@ EXPORT_SYMBOL(mb_cache_entry_touch);
 static unsigned long mb_cache_count(struct shrinker *shrink,
struct shrink_control *sc)
 {
-   struct mb_cache *cache = container_of(shrink, struct mb_cache,
- c_shrink);
+   struct mb_cache *cache = shrink->private_data;
 
return cache->c_entry_count;
 }
@@ -333,8 +332,7 @@ static unsigned long mb_cache_shrink(struct mb_cache *cache,
 static unsigned long mb_cache_scan(struct shrinker *shrink,
   struct shrink_control *sc)
 {
-   struct mb_cache *cache = container_of(shrink, struct mb_cache,
- c_shrink);
+   struct mb_cache *cache = shrink->private_data;
return mb_cache_shrink(cache, sc->nr_to_scan);
 }
 
@@ -377,15 +375,20 @@ struct mb_cache *mb_cache_create(int bucket_bits)
for (i = 0; i < bucket_count; i++)
INIT_HLIST_BL_HEAD(>c_hash[i]);
 
-   cache->c_shrink.count_objects = mb_cache_count;
-   cache->c_shrink.scan_objects = mb_cache_scan;
-   cache->c_shrink.seeks = DEFAULT_SEEKS;
-   if (register_shrinker(>c_shrink, "mbcache-shrinker")) {
+   cache->c_shrink = shrinker_alloc(0, "mbcache-shrinker");
+   if (!cache->c_shrink) {
kfree(cache->c_hash);
kfree(cache);
goto err_out;
}
 
+   cache->c_shrink->count_objects = mb_cache_count;
+   cache->c_shrink->scan_objects = mb_cache_scan;
+   cache->c_shrink->seeks = DEFAULT_SEEKS;
+   cache->c_shrink->private_data = cache;
+
+   shrinker_register(cache->c_shrink);
+
INIT_WORK(>c_shrink_work, mb_cache_shrink_worker);
 
return cache;
@@ -406,7 +409,7 @@ void mb_cache_destroy(struct mb_cache *cache)
 {
struct mb_cache_entry *entry, *next;
 
-   unregister_shrinker(>c_shrink);
+   shrinker_free(cache->c_shrink);
 
/*
 * We don't bother with any locking. Cache must not be used at this
-- 
2.30.2



[Cluster-devel] [PATCH v4 31/48] virtio_balloon: dynamically allocate the virtio-balloon shrinker

2023-08-07 Thread Qi Zheng
In preparation for implementing lockless slab shrink, use new APIs to
dynamically allocate the virtio-balloon shrinker, so that it can be freed
asynchronously using kfree_rcu(). Then it doesn't need to wait for RCU
read-side critical section when releasing the struct virtio_balloon.

Signed-off-by: Qi Zheng 
Reviewed-by: Muchun Song 
---
 drivers/virtio/virtio_balloon.c | 25 +++--
 1 file changed, 15 insertions(+), 10 deletions(-)

diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 5b15936a5214..82e6087073a9 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -111,7 +111,7 @@ struct virtio_balloon {
struct virtio_balloon_stat stats[VIRTIO_BALLOON_S_NR];
 
/* Shrinker to return free pages - VIRTIO_BALLOON_F_FREE_PAGE_HINT */
-   struct shrinker shrinker;
+   struct shrinker *shrinker;
 
/* OOM notifier to deflate on OOM - VIRTIO_BALLOON_F_DEFLATE_ON_OOM */
struct notifier_block oom_nb;
@@ -816,8 +816,7 @@ static unsigned long shrink_free_pages(struct 
virtio_balloon *vb,
 static unsigned long virtio_balloon_shrinker_scan(struct shrinker *shrinker,
  struct shrink_control *sc)
 {
-   struct virtio_balloon *vb = container_of(shrinker,
-   struct virtio_balloon, shrinker);
+   struct virtio_balloon *vb = shrinker->private_data;
 
return shrink_free_pages(vb, sc->nr_to_scan);
 }
@@ -825,8 +824,7 @@ static unsigned long virtio_balloon_shrinker_scan(struct 
shrinker *shrinker,
 static unsigned long virtio_balloon_shrinker_count(struct shrinker *shrinker,
   struct shrink_control *sc)
 {
-   struct virtio_balloon *vb = container_of(shrinker,
-   struct virtio_balloon, shrinker);
+   struct virtio_balloon *vb = shrinker->private_data;
 
return vb->num_free_page_blocks * VIRTIO_BALLOON_HINT_BLOCK_PAGES;
 }
@@ -847,16 +845,23 @@ static int virtio_balloon_oom_notify(struct 
notifier_block *nb,
 
 static void virtio_balloon_unregister_shrinker(struct virtio_balloon *vb)
 {
-   unregister_shrinker(>shrinker);
+   shrinker_free(vb->shrinker);
 }
 
 static int virtio_balloon_register_shrinker(struct virtio_balloon *vb)
 {
-   vb->shrinker.scan_objects = virtio_balloon_shrinker_scan;
-   vb->shrinker.count_objects = virtio_balloon_shrinker_count;
-   vb->shrinker.seeks = DEFAULT_SEEKS;
+   vb->shrinker = shrinker_alloc(0, "virtio-balloon");
+   if (!vb->shrinker)
+   return -ENOMEM;
 
-   return register_shrinker(>shrinker, "virtio-balloon");
+   vb->shrinker->scan_objects = virtio_balloon_shrinker_scan;
+   vb->shrinker->count_objects = virtio_balloon_shrinker_count;
+   vb->shrinker->seeks = DEFAULT_SEEKS;
+   vb->shrinker->private_data = vb;
+
+   shrinker_register(vb->shrinker);
+
+   return 0;
 }
 
 static int virtballoon_probe(struct virtio_device *vdev)
-- 
2.30.2



[Cluster-devel] [PATCH v4 30/48] vmw_balloon: dynamically allocate the vmw-balloon shrinker

2023-08-07 Thread Qi Zheng
In preparation for implementing lockless slab shrink, use new APIs to
dynamically allocate the vmw-balloon shrinker, so that it can be freed
asynchronously using kfree_rcu(). Then it doesn't need to wait for RCU
read-side critical section when releasing the struct vmballoon.

And we can simply exit vmballoon_init() when registering the shrinker
fails. So the shrinker_registered indication is redundant, just remove it.

Signed-off-by: Qi Zheng 
Reviewed-by: Muchun Song 
---
 drivers/misc/vmw_balloon.c | 38 --
 1 file changed, 12 insertions(+), 26 deletions(-)

diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 9ce9b9e0e9b6..ac2cdb6cdf74 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -380,16 +380,7 @@ struct vmballoon {
/**
 * @shrinker: shrinker interface that is used to avoid over-inflation.
 */
-   struct shrinker shrinker;
-
-   /**
-* @shrinker_registered: whether the shrinker was registered.
-*
-* The shrinker interface does not handle gracefully the removal of
-* shrinker that was not registered before. This indication allows to
-* simplify the unregistration process.
-*/
-   bool shrinker_registered;
+   struct shrinker *shrinker;
 };
 
 static struct vmballoon balloon;
@@ -1568,29 +1559,27 @@ static unsigned long vmballoon_shrinker_count(struct 
shrinker *shrinker,
 
 static void vmballoon_unregister_shrinker(struct vmballoon *b)
 {
-   if (b->shrinker_registered)
-   unregister_shrinker(>shrinker);
-   b->shrinker_registered = false;
+   shrinker_free(b->shrinker);
 }
 
 static int vmballoon_register_shrinker(struct vmballoon *b)
 {
-   int r;
-
/* Do nothing if the shrinker is not enabled */
if (!vmwballoon_shrinker_enable)
return 0;
 
-   b->shrinker.scan_objects = vmballoon_shrinker_scan;
-   b->shrinker.count_objects = vmballoon_shrinker_count;
-   b->shrinker.seeks = DEFAULT_SEEKS;
+   b->shrinker = shrinker_alloc(0, "vmw-balloon");
+   if (!b->shrinker)
+   return -ENOMEM;
 
-   r = register_shrinker(>shrinker, "vmw-balloon");
+   b->shrinker->scan_objects = vmballoon_shrinker_scan;
+   b->shrinker->count_objects = vmballoon_shrinker_count;
+   b->shrinker->seeks = DEFAULT_SEEKS;
+   b->shrinker->private_data = b;
 
-   if (r == 0)
-   b->shrinker_registered = true;
+   shrinker_register(b->shrinker);
 
-   return r;
+   return 0;
 }
 
 /*
@@ -1883,7 +1872,7 @@ static int __init vmballoon_init(void)
 
error = vmballoon_register_shrinker();
if (error)
-   goto fail;
+   return error;
 
/*
 * Initialization of compaction must be done after the call to
@@ -1905,9 +1894,6 @@ static int __init vmballoon_init(void)
vmballoon_debugfs_init();
 
return 0;
-fail:
-   vmballoon_unregister_shrinker();
-   return error;
 }
 
 /*
-- 
2.30.2



[Cluster-devel] [PATCH v4 29/48] bcache: dynamically allocate the md-bcache shrinker

2023-08-07 Thread Qi Zheng
In preparation for implementing lockless slab shrink, use new APIs to
dynamically allocate the md-bcache shrinker, so that it can be freed
asynchronously using kfree_rcu(). Then it doesn't need to wait for RCU
read-side critical section when releasing the struct cache_set.

Signed-off-by: Qi Zheng 
---
 drivers/md/bcache/bcache.h |  2 +-
 drivers/md/bcache/btree.c  | 27 ---
 drivers/md/bcache/sysfs.c  |  3 ++-
 3 files changed, 19 insertions(+), 13 deletions(-)

diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 5a79bb3c272f..c622bc50f81b 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -541,7 +541,7 @@ struct cache_set {
struct bio_set  bio_split;
 
/* For the btree cache */
-   struct shrinker shrink;
+   struct shrinker *shrink;
 
/* For the btree cache and anything allocation related */
struct mutexbucket_lock;
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index fd121a61f17c..ae5cbb55861f 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -667,7 +667,7 @@ static int mca_reap(struct btree *b, unsigned int 
min_order, bool flush)
 static unsigned long bch_mca_scan(struct shrinker *shrink,
  struct shrink_control *sc)
 {
-   struct cache_set *c = container_of(shrink, struct cache_set, shrink);
+   struct cache_set *c = shrink->private_data;
struct btree *b, *t;
unsigned long i, nr = sc->nr_to_scan;
unsigned long freed = 0;
@@ -734,7 +734,7 @@ static unsigned long bch_mca_scan(struct shrinker *shrink,
 static unsigned long bch_mca_count(struct shrinker *shrink,
   struct shrink_control *sc)
 {
-   struct cache_set *c = container_of(shrink, struct cache_set, shrink);
+   struct cache_set *c = shrink->private_data;
 
if (c->shrinker_disabled)
return 0;
@@ -752,8 +752,8 @@ void bch_btree_cache_free(struct cache_set *c)
 
closure_init_stack();
 
-   if (c->shrink.list.next)
-   unregister_shrinker(>shrink);
+   if (c->shrink)
+   shrinker_free(c->shrink);
 
mutex_lock(>bucket_lock);
 
@@ -828,14 +828,19 @@ int bch_btree_cache_alloc(struct cache_set *c)
c->verify_data = NULL;
 #endif
 
-   c->shrink.count_objects = bch_mca_count;
-   c->shrink.scan_objects = bch_mca_scan;
-   c->shrink.seeks = 4;
-   c->shrink.batch = c->btree_pages * 2;
+   c->shrink = shrinker_alloc(0, "md-bcache:%pU", c->set_uuid);
+   if (!c->shrink) {
+   pr_warn("bcache: %s: could not allocate shrinker\n", __func__);
+   return 0;
+   }
+
+   c->shrink->count_objects = bch_mca_count;
+   c->shrink->scan_objects = bch_mca_scan;
+   c->shrink->seeks = 4;
+   c->shrink->batch = c->btree_pages * 2;
+   c->shrink->private_data = c;
 
-   if (register_shrinker(>shrink, "md-bcache:%pU", c->set_uuid))
-   pr_warn("bcache: %s: could not register shrinker\n",
-   __func__);
+   shrinker_register(c->shrink);
 
return 0;
 }
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 0e2c1880f60b..45d8af755de6 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -866,7 +866,8 @@ STORE(__bch_cache_set)
 
sc.gfp_mask = GFP_KERNEL;
sc.nr_to_scan = strtoul_or_return(buf);
-   c->shrink.scan_objects(>shrink, );
+   if (c->shrink)
+   c->shrink->scan_objects(c->shrink, );
}
 
sysfs_strtoul_clamp(congested_read_threshold_us,
-- 
2.30.2



[Cluster-devel] [PATCH v4 28/48] md/raid5: dynamically allocate the md-raid5 shrinker

2023-08-07 Thread Qi Zheng
In preparation for implementing lockless slab shrink, use new APIs to
dynamically allocate the md-raid5 shrinker, so that it can be freed
asynchronously using kfree_rcu(). Then it doesn't need to wait for RCU
read-side critical section when releasing the struct r5conf.

Signed-off-by: Qi Zheng 
Reviewed-by: Muchun Song 
---
 drivers/md/raid5.c | 26 +++---
 drivers/md/raid5.h |  2 +-
 2 files changed, 16 insertions(+), 12 deletions(-)

diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 32a87193bad7..e284c2f7dbe4 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -7401,7 +7401,7 @@ static void free_conf(struct r5conf *conf)
 
log_exit(conf);
 
-   unregister_shrinker(>shrinker);
+   shrinker_free(conf->shrinker);
free_thread_groups(conf);
shrink_stripes(conf);
raid5_free_percpu(conf);
@@ -7449,7 +7449,7 @@ static int raid5_alloc_percpu(struct r5conf *conf)
 static unsigned long raid5_cache_scan(struct shrinker *shrink,
  struct shrink_control *sc)
 {
-   struct r5conf *conf = container_of(shrink, struct r5conf, shrinker);
+   struct r5conf *conf = shrink->private_data;
unsigned long ret = SHRINK_STOP;
 
if (mutex_trylock(>cache_size_mutex)) {
@@ -7470,7 +7470,7 @@ static unsigned long raid5_cache_scan(struct shrinker 
*shrink,
 static unsigned long raid5_cache_count(struct shrinker *shrink,
   struct shrink_control *sc)
 {
-   struct r5conf *conf = container_of(shrink, struct r5conf, shrinker);
+   struct r5conf *conf = shrink->private_data;
 
if (conf->max_nr_stripes < conf->min_nr_stripes)
/* unlikely, but not impossible */
@@ -7705,18 +7705,22 @@ static struct r5conf *setup_conf(struct mddev *mddev)
 * it reduces the queue depth and so can hurt throughput.
 * So set it rather large, scaled by number of devices.
 */
-   conf->shrinker.seeks = DEFAULT_SEEKS * conf->raid_disks * 4;
-   conf->shrinker.scan_objects = raid5_cache_scan;
-   conf->shrinker.count_objects = raid5_cache_count;
-   conf->shrinker.batch = 128;
-   conf->shrinker.flags = 0;
-   ret = register_shrinker(>shrinker, "md-raid5:%s", mdname(mddev));
-   if (ret) {
-   pr_warn("md/raid:%s: couldn't register shrinker.\n",
+   conf->shrinker = shrinker_alloc(0, "md-raid5:%s", mdname(mddev));
+   if (!conf->shrinker) {
+   ret = -ENOMEM;
+   pr_warn("md/raid:%s: couldn't allocate shrinker.\n",
mdname(mddev));
goto abort;
}
 
+   conf->shrinker->seeks = DEFAULT_SEEKS * conf->raid_disks * 4;
+   conf->shrinker->scan_objects = raid5_cache_scan;
+   conf->shrinker->count_objects = raid5_cache_count;
+   conf->shrinker->batch = 128;
+   conf->shrinker->private_data = conf;
+
+   shrinker_register(conf->shrinker);
+
sprintf(pers_name, "raid%d", mddev->new_level);
rcu_assign_pointer(conf->thread,
   md_register_thread(raid5d, mddev, pers_name));
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 97a795979a35..22bea20eccbd 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -670,7 +670,7 @@ struct r5conf {
wait_queue_head_t   wait_for_stripe;
wait_queue_head_t   wait_for_overlap;
unsigned long   cache_state;
-   struct shrinker shrinker;
+   struct shrinker *shrinker;
int pool_size; /* number of disks in stripeheads in 
pool */
spinlock_t  device_lock;
struct disk_info*disks;
-- 
2.30.2



[Cluster-devel] [PATCH v4 25/48] drm/panfrost: dynamically allocate the drm-panfrost shrinker

2023-08-07 Thread Qi Zheng
In preparation for implementing lockless slab shrink, use new APIs to
dynamically allocate the drm-panfrost shrinker, so that it can be freed
asynchronously using kfree_rcu(). Then it doesn't need to wait for RCU
read-side critical section when releasing the struct panfrost_device.

Signed-off-by: Qi Zheng 
Reviewed-by: Steven Price 
---
 drivers/gpu/drm/panfrost/panfrost_device.h|  2 +-
 drivers/gpu/drm/panfrost/panfrost_drv.c   |  6 +++-
 drivers/gpu/drm/panfrost/panfrost_gem.h   |  2 +-
 .../gpu/drm/panfrost/panfrost_gem_shrinker.c  | 30 +++
 4 files changed, 25 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h 
b/drivers/gpu/drm/panfrost/panfrost_device.h
index b0126b9fbadc..e667e5689353 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -118,7 +118,7 @@ struct panfrost_device {
 
struct mutex shrinker_lock;
struct list_head shrinker_list;
-   struct shrinker shrinker;
+   struct shrinker *shrinker;
 
struct panfrost_devfreq pfdevfreq;
 };
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c 
b/drivers/gpu/drm/panfrost/panfrost_drv.c
index a2ab99698ca8..e1d0e3a23757 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -601,10 +601,14 @@ static int panfrost_probe(struct platform_device *pdev)
if (err < 0)
goto err_out1;
 
-   panfrost_gem_shrinker_init(ddev);
+   err = panfrost_gem_shrinker_init(ddev);
+   if (err)
+   goto err_out2;
 
return 0;
 
+err_out2:
+   drm_dev_unregister(ddev);
 err_out1:
pm_runtime_disable(pfdev->dev);
panfrost_device_fini(pfdev);
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h 
b/drivers/gpu/drm/panfrost/panfrost_gem.h
index ad2877eeeccd..863d2ec8d4f0 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.h
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.h
@@ -81,7 +81,7 @@ panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
 void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping);
 void panfrost_gem_teardown_mappings_locked(struct panfrost_gem_object *bo);
 
-void panfrost_gem_shrinker_init(struct drm_device *dev);
+int panfrost_gem_shrinker_init(struct drm_device *dev);
 void panfrost_gem_shrinker_cleanup(struct drm_device *dev);
 
 #endif /* __PANFROST_GEM_H__ */
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c 
b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
index 6a71a2555f85..3dfe2b7ccdd9 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
@@ -18,8 +18,7 @@
 static unsigned long
 panfrost_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control 
*sc)
 {
-   struct panfrost_device *pfdev =
-   container_of(shrinker, struct panfrost_device, shrinker);
+   struct panfrost_device *pfdev = shrinker->private_data;
struct drm_gem_shmem_object *shmem;
unsigned long count = 0;
 
@@ -65,8 +64,7 @@ static bool panfrost_gem_purge(struct drm_gem_object *obj)
 static unsigned long
 panfrost_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control 
*sc)
 {
-   struct panfrost_device *pfdev =
-   container_of(shrinker, struct panfrost_device, shrinker);
+   struct panfrost_device *pfdev = shrinker->private_data;
struct drm_gem_shmem_object *shmem, *tmp;
unsigned long freed = 0;
 
@@ -97,13 +95,22 @@ panfrost_gem_shrinker_scan(struct shrinker *shrinker, 
struct shrink_control *sc)
  *
  * This function registers and sets up the panfrost shrinker.
  */
-void panfrost_gem_shrinker_init(struct drm_device *dev)
+int panfrost_gem_shrinker_init(struct drm_device *dev)
 {
struct panfrost_device *pfdev = dev->dev_private;
-   pfdev->shrinker.count_objects = panfrost_gem_shrinker_count;
-   pfdev->shrinker.scan_objects = panfrost_gem_shrinker_scan;
-   pfdev->shrinker.seeks = DEFAULT_SEEKS;
-   WARN_ON(register_shrinker(>shrinker, "drm-panfrost"));
+
+   pfdev->shrinker = shrinker_alloc(0, "drm-panfrost");
+   if (!pfdev->shrinker)
+   return -ENOMEM;
+
+   pfdev->shrinker->count_objects = panfrost_gem_shrinker_count;
+   pfdev->shrinker->scan_objects = panfrost_gem_shrinker_scan;
+   pfdev->shrinker->seeks = DEFAULT_SEEKS;
+   pfdev->shrinker->private_data = pfdev;
+
+   shrinker_register(pfdev->shrinker);
+
+   return 0;
 }
 
 /**
@@ -116,7 +123,6 @@ void panfrost_gem_shrinker_cleanup(struct drm_device *dev)
 {
struct panfrost_device *pfdev = dev->dev_private;
 
-   if (pfdev->shrinker.nr_deferred) {
-   unregister_shrinker(>shrinker);
-   }
+   if (pfdev->shrinker)
+   shrinker_free(pfdev->shrinker);
 }
-- 
2.30.2



[Cluster-devel] [PATCH v4 27/48] dm zoned: dynamically allocate the dm-zoned-meta shrinker

2023-08-07 Thread Qi Zheng
In preparation for implementing lockless slab shrink, use new APIs to
dynamically allocate the dm-zoned-meta shrinker, so that it can be freed
asynchronously using kfree_rcu(). Then it doesn't need to wait for RCU
read-side critical section when releasing the struct dmz_metadata.

Signed-off-by: Qi Zheng 
Reviewed-by: Muchun Song 
---
 drivers/md/dm-zoned-metadata.c | 29 +
 1 file changed, 17 insertions(+), 12 deletions(-)

diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 9d3cca8e3dc9..bbb0e69a7908 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -187,7 +187,7 @@ struct dmz_metadata {
struct rb_root  mblk_rbtree;
struct list_headmblk_lru_list;
struct list_headmblk_dirty_list;
-   struct shrinker mblk_shrinker;
+   struct shrinker *mblk_shrinker;
 
/* Zone allocation management */
struct mutexmap_lock;
@@ -615,7 +615,7 @@ static unsigned long dmz_shrink_mblock_cache(struct 
dmz_metadata *zmd,
 static unsigned long dmz_mblock_shrinker_count(struct shrinker *shrink,
   struct shrink_control *sc)
 {
-   struct dmz_metadata *zmd = container_of(shrink, struct dmz_metadata, 
mblk_shrinker);
+   struct dmz_metadata *zmd = shrink->private_data;
 
return atomic_read(>nr_mblks);
 }
@@ -626,7 +626,7 @@ static unsigned long dmz_mblock_shrinker_count(struct 
shrinker *shrink,
 static unsigned long dmz_mblock_shrinker_scan(struct shrinker *shrink,
  struct shrink_control *sc)
 {
-   struct dmz_metadata *zmd = container_of(shrink, struct dmz_metadata, 
mblk_shrinker);
+   struct dmz_metadata *zmd = shrink->private_data;
unsigned long count;
 
spin_lock(>mblk_lock);
@@ -2936,19 +2936,24 @@ int dmz_ctr_metadata(struct dmz_dev *dev, int num_dev,
 */
zmd->min_nr_mblks = 2 + zmd->nr_map_blocks + zmd->zone_nr_bitmap_blocks 
* 16;
zmd->max_nr_mblks = zmd->min_nr_mblks + 512;
-   zmd->mblk_shrinker.count_objects = dmz_mblock_shrinker_count;
-   zmd->mblk_shrinker.scan_objects = dmz_mblock_shrinker_scan;
-   zmd->mblk_shrinker.seeks = DEFAULT_SEEKS;
 
/* Metadata cache shrinker */
-   ret = register_shrinker(>mblk_shrinker, "dm-zoned-meta:(%u:%u)",
-   MAJOR(dev->bdev->bd_dev),
-   MINOR(dev->bdev->bd_dev));
-   if (ret) {
-   dmz_zmd_err(zmd, "Register metadata cache shrinker failed");
+   zmd->mblk_shrinker = shrinker_alloc(0,  "dm-zoned-meta:(%u:%u)",
+   MAJOR(dev->bdev->bd_dev),
+   MINOR(dev->bdev->bd_dev));
+   if (!zmd->mblk_shrinker) {
+   ret = -ENOMEM;
+   dmz_zmd_err(zmd, "Allocate metadata cache shrinker failed");
goto err;
}
 
+   zmd->mblk_shrinker->count_objects = dmz_mblock_shrinker_count;
+   zmd->mblk_shrinker->scan_objects = dmz_mblock_shrinker_scan;
+   zmd->mblk_shrinker->seeks = DEFAULT_SEEKS;
+   zmd->mblk_shrinker->private_data = zmd;
+
+   shrinker_register(zmd->mblk_shrinker);
+
dmz_zmd_info(zmd, "DM-Zoned metadata version %d", zmd->sb_version);
for (i = 0; i < zmd->nr_devs; i++)
dmz_print_dev(zmd, i);
@@ -2995,7 +3000,7 @@ int dmz_ctr_metadata(struct dmz_dev *dev, int num_dev,
  */
 void dmz_dtr_metadata(struct dmz_metadata *zmd)
 {
-   unregister_shrinker(>mblk_shrinker);
+   shrinker_free(zmd->mblk_shrinker);
dmz_cleanup_metadata(zmd);
kfree(zmd);
 }
-- 
2.30.2



[Cluster-devel] [PATCH v4 26/48] dm: dynamically allocate the dm-bufio shrinker

2023-08-07 Thread Qi Zheng
In preparation for implementing lockless slab shrink, use new APIs to
dynamically allocate the dm-bufio shrinker, so that it can be freed
asynchronously using kfree_rcu(). Then it doesn't need to wait for RCU
read-side critical section when releasing the struct dm_bufio_client.

Signed-off-by: Qi Zheng 
Reviewed-by: Muchun Song 
---
 drivers/md/dm-bufio.c | 28 +---
 1 file changed, 17 insertions(+), 11 deletions(-)

diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index bc309e41d074..62eb27639c9b 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -963,7 +963,7 @@ struct dm_bufio_client {
 
sector_t start;
 
-   struct shrinker shrinker;
+   struct shrinker *shrinker;
struct work_struct shrink_work;
atomic_long_t need_shrink;
 
@@ -2368,7 +2368,7 @@ static unsigned long dm_bufio_shrink_scan(struct shrinker 
*shrink, struct shrink
 {
struct dm_bufio_client *c;
 
-   c = container_of(shrink, struct dm_bufio_client, shrinker);
+   c = shrink->private_data;
atomic_long_add(sc->nr_to_scan, >need_shrink);
queue_work(dm_bufio_wq, >shrink_work);
 
@@ -2377,7 +2377,7 @@ static unsigned long dm_bufio_shrink_scan(struct shrinker 
*shrink, struct shrink
 
 static unsigned long dm_bufio_shrink_count(struct shrinker *shrink, struct 
shrink_control *sc)
 {
-   struct dm_bufio_client *c = container_of(shrink, struct 
dm_bufio_client, shrinker);
+   struct dm_bufio_client *c = shrink->private_data;
unsigned long count = cache_total(>cache);
unsigned long retain_target = get_retain_buffers(c);
unsigned long queued_for_cleanup = atomic_long_read(>need_shrink);
@@ -2490,14 +2490,20 @@ struct dm_bufio_client *dm_bufio_client_create(struct 
block_device *bdev, unsign
INIT_WORK(>shrink_work, shrink_work);
atomic_long_set(>need_shrink, 0);
 
-   c->shrinker.count_objects = dm_bufio_shrink_count;
-   c->shrinker.scan_objects = dm_bufio_shrink_scan;
-   c->shrinker.seeks = 1;
-   c->shrinker.batch = 0;
-   r = register_shrinker(>shrinker, "dm-bufio:(%u:%u)",
- MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
-   if (r)
+   c->shrinker = shrinker_alloc(0, "dm-bufio:(%u:%u)",
+MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
+   if (!c->shrinker) {
+   r = -ENOMEM;
goto bad;
+   }
+
+   c->shrinker->count_objects = dm_bufio_shrink_count;
+   c->shrinker->scan_objects = dm_bufio_shrink_scan;
+   c->shrinker->seeks = 1;
+   c->shrinker->batch = 0;
+   c->shrinker->private_data = c;
+
+   shrinker_register(c->shrinker);
 
mutex_lock(_bufio_clients_lock);
dm_bufio_client_count++;
@@ -2537,7 +2543,7 @@ void dm_bufio_client_destroy(struct dm_bufio_client *c)
 
drop_buffers(c);
 
-   unregister_shrinker(>shrinker);
+   shrinker_free(c->shrinker);
flush_work(>shrink_work);
 
mutex_lock(_bufio_clients_lock);
-- 
2.30.2



[Cluster-devel] [PATCH v4 24/48] drm/msm: dynamically allocate the drm-msm_gem shrinker

2023-08-07 Thread Qi Zheng
In preparation for implementing lockless slab shrink, use new APIs to
dynamically allocate the drm-msm_gem shrinker, so that it can be freed
asynchronously using kfree_rcu(). Then it doesn't need to wait for RCU
read-side critical section when releasing the struct msm_drm_private.

Signed-off-by: Qi Zheng 
Reviewed-by: Muchun Song 
---
 drivers/gpu/drm/msm/msm_drv.c  |  4 ++-
 drivers/gpu/drm/msm/msm_drv.h  |  4 +--
 drivers/gpu/drm/msm/msm_gem_shrinker.c | 34 --
 3 files changed, 26 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 4bd028fa7500..7f20249d6071 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -462,7 +462,9 @@ static int msm_drm_init(struct device *dev, const struct 
drm_driver *drv)
if (ret)
goto err_msm_uninit;
 
-   msm_gem_shrinker_init(ddev);
+   ret = msm_gem_shrinker_init(ddev);
+   if (ret)
+   goto err_msm_uninit;
 
if (priv->kms_init) {
ret = priv->kms_init(ddev);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 02fd6c7d0bb7..e2fc56f161b5 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -221,7 +221,7 @@ struct msm_drm_private {
} vram;
 
struct notifier_block vmap_notifier;
-   struct shrinker shrinker;
+   struct shrinker *shrinker;
 
struct drm_atomic_state *pm_state;
 
@@ -283,7 +283,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
 unsigned long msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long 
nr_to_scan);
 #endif
 
-void msm_gem_shrinker_init(struct drm_device *dev);
+int msm_gem_shrinker_init(struct drm_device *dev);
 void msm_gem_shrinker_cleanup(struct drm_device *dev);
 
 struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c 
b/drivers/gpu/drm/msm/msm_gem_shrinker.c
index f38296ad8743..2063e4f8 100644
--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
+++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
@@ -34,8 +34,7 @@ static bool can_block(struct shrink_control *sc)
 static unsigned long
 msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
 {
-   struct msm_drm_private *priv =
-   container_of(shrinker, struct msm_drm_private, shrinker);
+   struct msm_drm_private *priv = shrinker->private_data;
unsigned count = priv->lru.dontneed.count;
 
if (can_swap())
@@ -100,8 +99,7 @@ active_evict(struct drm_gem_object *obj)
 static unsigned long
 msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
 {
-   struct msm_drm_private *priv =
-   container_of(shrinker, struct msm_drm_private, shrinker);
+   struct msm_drm_private *priv = shrinker->private_data;
struct {
struct drm_gem_lru *lru;
bool (*shrink)(struct drm_gem_object *obj);
@@ -148,10 +146,11 @@ msm_gem_shrinker_shrink(struct drm_device *dev, unsigned 
long nr_to_scan)
struct shrink_control sc = {
.nr_to_scan = nr_to_scan,
};
-   int ret;
+   unsigned long ret = SHRINK_STOP;
 
fs_reclaim_acquire(GFP_KERNEL);
-   ret = msm_gem_shrinker_scan(>shrinker, );
+   if (priv->shrinker)
+   ret = msm_gem_shrinker_scan(priv->shrinker, );
fs_reclaim_release(GFP_KERNEL);
 
return ret;
@@ -210,16 +209,25 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned 
long event, void *ptr)
  *
  * This function registers and sets up the msm shrinker.
  */
-void msm_gem_shrinker_init(struct drm_device *dev)
+int msm_gem_shrinker_init(struct drm_device *dev)
 {
struct msm_drm_private *priv = dev->dev_private;
-   priv->shrinker.count_objects = msm_gem_shrinker_count;
-   priv->shrinker.scan_objects = msm_gem_shrinker_scan;
-   priv->shrinker.seeks = DEFAULT_SEEKS;
-   WARN_ON(register_shrinker(>shrinker, "drm-msm_gem"));
+
+   priv->shrinker = shrinker_alloc(0, "drm-msm_gem");
+   if (!priv->shrinker)
+   return -ENOMEM;
+
+   priv->shrinker->count_objects = msm_gem_shrinker_count;
+   priv->shrinker->scan_objects = msm_gem_shrinker_scan;
+   priv->shrinker->seeks = DEFAULT_SEEKS;
+   priv->shrinker->private_data = priv;
+
+   shrinker_register(priv->shrinker);
 
priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
WARN_ON(register_vmap_purge_notifier(>vmap_notifier));
+
+   return 0;
 }
 
 /**
@@ -232,8 +240,8 @@ void msm_gem_shrinker_cleanup(struct drm_device *dev)
 {
struct msm_drm_private *priv = dev->dev_private;
 
-   if (priv->shrinker.nr_deferred) {
+   if (priv->shrinker) {
WARN_ON(unregister_vmap_purge_notifier(>vmap_notifier));
-   unregister_shrinker(>shrinker);
+   

[Cluster-devel] [PATCH v4 23/48] drm/i915: dynamically allocate the i915_gem_mm shrinker

2023-08-07 Thread Qi Zheng
In preparation for implementing lockless slab shrink, use new APIs to
dynamically allocate the i915_gem_mm shrinker, so that it can be freed
asynchronously using kfree_rcu(). Then it doesn't need to wait for RCU
read-side critical section when releasing the struct drm_i915_private.

Signed-off-by: Qi Zheng 
Reviewed-by: Muchun Song 
---
 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c | 30 +++-
 drivers/gpu/drm/i915/i915_drv.h  |  2 +-
 2 files changed, 18 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 
b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index 214763942aa2..4504eb4f31d5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -284,8 +284,7 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private 
*i915)
 static unsigned long
 i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
 {
-   struct drm_i915_private *i915 =
-   container_of(shrinker, struct drm_i915_private, mm.shrinker);
+   struct drm_i915_private *i915 = shrinker->private_data;
unsigned long num_objects;
unsigned long count;
 
@@ -302,8 +301,8 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct 
shrink_control *sc)
if (num_objects) {
unsigned long avg = 2 * count / num_objects;
 
-   i915->mm.shrinker.batch =
-   max((i915->mm.shrinker.batch + avg) >> 1,
+   i915->mm.shrinker->batch =
+   max((i915->mm.shrinker->batch + avg) >> 1,
128ul /* default SHRINK_BATCH */);
}
 
@@ -313,8 +312,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct 
shrink_control *sc)
 static unsigned long
 i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
 {
-   struct drm_i915_private *i915 =
-   container_of(shrinker, struct drm_i915_private, mm.shrinker);
+   struct drm_i915_private *i915 = shrinker->private_data;
unsigned long freed;
 
sc->nr_scanned = 0;
@@ -422,12 +420,18 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, 
unsigned long event, void *ptr
 
 void i915_gem_driver_register__shrinker(struct drm_i915_private *i915)
 {
-   i915->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
-   i915->mm.shrinker.count_objects = i915_gem_shrinker_count;
-   i915->mm.shrinker.seeks = DEFAULT_SEEKS;
-   i915->mm.shrinker.batch = 4096;
-   drm_WARN_ON(>drm, register_shrinker(>mm.shrinker,
- "drm-i915_gem"));
+   i915->mm.shrinker = shrinker_alloc(0, "drm-i915_gem");
+   if (!i915->mm.shrinker) {
+   drm_WARN_ON(>drm, 1);
+   } else {
+   i915->mm.shrinker->scan_objects = i915_gem_shrinker_scan;
+   i915->mm.shrinker->count_objects = i915_gem_shrinker_count;
+   i915->mm.shrinker->seeks = DEFAULT_SEEKS;
+   i915->mm.shrinker->batch = 4096;
+   i915->mm.shrinker->private_data = i915;
+
+   shrinker_register(i915->mm.shrinker);
+   }
 
i915->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
drm_WARN_ON(>drm, register_oom_notifier(>mm.oom_notifier));
@@ -443,7 +447,7 @@ void i915_gem_driver_unregister__shrinker(struct 
drm_i915_private *i915)
unregister_vmap_purge_notifier(>mm.vmap_notifier));
drm_WARN_ON(>drm,
unregister_oom_notifier(>mm.oom_notifier));
-   unregister_shrinker(>mm.shrinker);
+   shrinker_free(i915->mm.shrinker);
 }
 
 void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 682ef2b5c7d5..389e8bf140d7 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -163,7 +163,7 @@ struct i915_gem_mm {
 
struct notifier_block oom_notifier;
struct notifier_block vmap_notifier;
-   struct shrinker shrinker;
+   struct shrinker *shrinker;
 
 #ifdef CONFIG_MMU_NOTIFIER
/**
-- 
2.30.2



[Cluster-devel] [PATCH v4 22/48] mm: workingset: dynamically allocate the mm-shadow shrinker

2023-08-07 Thread Qi Zheng
Use new APIs to dynamically allocate the mm-shadow shrinker.

Signed-off-by: Qi Zheng 
---
 mm/workingset.c | 27 ++-
 1 file changed, 14 insertions(+), 13 deletions(-)

diff --git a/mm/workingset.c b/mm/workingset.c
index da58a26d0d4d..3c53138903a7 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -763,13 +763,6 @@ static unsigned long scan_shadow_nodes(struct shrinker 
*shrinker,
NULL);
 }
 
-static struct shrinker workingset_shadow_shrinker = {
-   .count_objects = count_shadow_nodes,
-   .scan_objects = scan_shadow_nodes,
-   .seeks = 0, /* ->count reports only fully expendable nodes */
-   .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE,
-};
-
 /*
  * Our list_lru->lock is IRQ-safe as it nests inside the IRQ-safe
  * i_pages lock.
@@ -778,9 +771,10 @@ static struct lock_class_key shadow_nodes_key;
 
 static int __init workingset_init(void)
 {
+   struct shrinker *workingset_shadow_shrinker;
unsigned int timestamp_bits;
unsigned int max_order;
-   int ret;
+   int ret = -ENOMEM;
 
BUILD_BUG_ON(BITS_PER_LONG < EVICTION_SHIFT);
/*
@@ -797,17 +791,24 @@ static int __init workingset_init(void)
pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
   timestamp_bits, max_order, bucket_order);
 
-   ret = prealloc_shrinker(_shadow_shrinker, "mm-shadow");
-   if (ret)
+   workingset_shadow_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE |
+   SHRINKER_MEMCG_AWARE,
+   "mm-shadow");
+   if (!workingset_shadow_shrinker)
goto err;
+
ret = __list_lru_init(_nodes, true, _nodes_key,
- _shadow_shrinker);
+ workingset_shadow_shrinker);
if (ret)
goto err_list_lru;
-   register_shrinker_prepared(_shadow_shrinker);
+
+   workingset_shadow_shrinker->count_objects = count_shadow_nodes;
+   workingset_shadow_shrinker->scan_objects = scan_shadow_nodes;
+
+   shrinker_register(workingset_shadow_shrinker);
return 0;
 err_list_lru:
-   free_prealloced_shrinker(_shadow_shrinker);
+   shrinker_free(workingset_shadow_shrinker);
 err:
return ret;
 }
-- 
2.30.2



[Cluster-devel] [PATCH v4 21/48] sunrpc: dynamically allocate the sunrpc_cred shrinker

2023-08-07 Thread Qi Zheng
Use new APIs to dynamically allocate the sunrpc_cred shrinker.

Signed-off-by: Qi Zheng 
Reviewed-by: Muchun Song 
---
 net/sunrpc/auth.c | 21 +
 1 file changed, 13 insertions(+), 8 deletions(-)

diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 2f16f9d17966..0cc52e39f859 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -861,11 +861,7 @@ rpcauth_uptodatecred(struct rpc_task *task)
test_bit(RPCAUTH_CRED_UPTODATE, >cr_flags) != 0;
 }
 
-static struct shrinker rpc_cred_shrinker = {
-   .count_objects = rpcauth_cache_shrink_count,
-   .scan_objects = rpcauth_cache_shrink_scan,
-   .seeks = DEFAULT_SEEKS,
-};
+static struct shrinker *rpc_cred_shrinker;
 
 int __init rpcauth_init_module(void)
 {
@@ -874,9 +870,18 @@ int __init rpcauth_init_module(void)
err = rpc_init_authunix();
if (err < 0)
goto out1;
-   err = register_shrinker(_cred_shrinker, "sunrpc_cred");
-   if (err < 0)
+   rpc_cred_shrinker = shrinker_alloc(0, "sunrpc_cred");
+   if (!rpc_cred_shrinker) {
+   err = -ENOMEM;
goto out2;
+   }
+
+   rpc_cred_shrinker->count_objects = rpcauth_cache_shrink_count;
+   rpc_cred_shrinker->scan_objects = rpcauth_cache_shrink_scan;
+   rpc_cred_shrinker->seeks = DEFAULT_SEEKS;
+
+   shrinker_register(rpc_cred_shrinker);
+
return 0;
 out2:
rpc_destroy_authunix();
@@ -887,5 +892,5 @@ int __init rpcauth_init_module(void)
 void rpcauth_remove_module(void)
 {
rpc_destroy_authunix();
-   unregister_shrinker(_cred_shrinker);
+   shrinker_free(rpc_cred_shrinker);
 }
-- 
2.30.2



[Cluster-devel] [PATCH v4 20/48] mm: thp: dynamically allocate the thp-related shrinkers

2023-08-07 Thread Qi Zheng
Use new APIs to dynamically allocate the thp-zero and thp-deferred_split
shrinkers.

Signed-off-by: Qi Zheng 
---
 mm/huge_memory.c | 69 +++-
 1 file changed, 45 insertions(+), 24 deletions(-)

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 947001a7cd42..5d0c7a0b651c 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -65,7 +65,11 @@ unsigned long transparent_hugepage_flags __read_mostly =
(1scan_objects = shrink_huge_zero_page_scan;
+   huge_zero_page_shrinker->seeks = DEFAULT_SEEKS;
+   shrinker_register(huge_zero_page_shrinker);
+
+   deferred_split_shrinker->count_objects = deferred_split_count;
+   deferred_split_shrinker->scan_objects = deferred_split_scan;
+   deferred_split_shrinker->seeks = DEFAULT_SEEKS;
+   shrinker_register(deferred_split_shrinker);
+
+   return 0;
+}
+
+static void __init thp_shrinker_exit(void)
+{
+   shrinker_free(huge_zero_page_shrinker);
+   shrinker_free(deferred_split_shrinker);
+}
+
 static int __init hugepage_init(void)
 {
int err;
@@ -482,12 +516,9 @@ static int __init hugepage_init(void)
if (err)
goto err_slab;
 
-   err = register_shrinker(_zero_page_shrinker, "thp-zero");
-   if (err)
-   goto err_hzp_shrinker;
-   err = register_shrinker(_split_shrinker, "thp-deferred_split");
+   err = thp_shrinker_init();
if (err)
-   goto err_split_shrinker;
+   goto err_shrinker;
 
/*
 * By default disable transparent hugepages on smaller systems,
@@ -505,10 +536,8 @@ static int __init hugepage_init(void)
 
return 0;
 err_khugepaged:
-   unregister_shrinker(_split_shrinker);
-err_split_shrinker:
-   unregister_shrinker(_zero_page_shrinker);
-err_hzp_shrinker:
+   thp_shrinker_exit();
+err_shrinker:
khugepaged_destroy();
 err_slab:
hugepage_exit_sysfs(hugepage_kobj);
@@ -2834,7 +2863,7 @@ void deferred_split_folio(struct folio *folio)
 #ifdef CONFIG_MEMCG
if (memcg)
set_shrinker_bit(memcg, folio_nid(folio),
-deferred_split_shrinker.id);
+deferred_split_shrinker->id);
 #endif
}
spin_unlock_irqrestore(_queue->split_queue_lock, flags);
@@ -2908,14 +2937,6 @@ static unsigned long deferred_split_scan(struct shrinker 
*shrink,
return split;
 }
 
-static struct shrinker deferred_split_shrinker = {
-   .count_objects = deferred_split_count,
-   .scan_objects = deferred_split_scan,
-   .seeks = DEFAULT_SEEKS,
-   .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE |
-SHRINKER_NONSLAB,
-};
-
 #ifdef CONFIG_DEBUG_FS
 static void split_huge_pages_all(void)
 {
-- 
2.30.2



[Cluster-devel] [PATCH v4 19/48] rcu: dynamically allocate the rcu-kfree shrinker

2023-08-07 Thread Qi Zheng
Use new APIs to dynamically allocate the rcu-kfree shrinker.

Signed-off-by: Qi Zheng 
---
 kernel/rcu/tree.c | 22 +-
 1 file changed, 13 insertions(+), 9 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 7c79480bfaa0..3b20fc46c514 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3449,13 +3449,6 @@ kfree_rcu_shrink_scan(struct shrinker *shrink, struct 
shrink_control *sc)
return freed == 0 ? SHRINK_STOP : freed;
 }
 
-static struct shrinker kfree_rcu_shrinker = {
-   .count_objects = kfree_rcu_shrink_count,
-   .scan_objects = kfree_rcu_shrink_scan,
-   .batch = 0,
-   .seeks = DEFAULT_SEEKS,
-};
-
 void __init kfree_rcu_scheduler_running(void)
 {
int cpu;
@@ -4931,6 +4924,7 @@ static void __init kfree_rcu_batch_init(void)
 {
int cpu;
int i, j;
+   struct shrinker *kfree_rcu_shrinker;
 
/* Clamp it to [0:100] seconds interval. */
if (rcu_delay_page_cache_fill_msec < 0 ||
@@ -4962,8 +4956,18 @@ static void __init kfree_rcu_batch_init(void)
INIT_DELAYED_WORK(>page_cache_work, fill_page_cache_func);
krcp->initialized = true;
}
-   if (register_shrinker(_rcu_shrinker, "rcu-kfree"))
-   pr_err("Failed to register kfree_rcu() shrinker!\n");
+
+   kfree_rcu_shrinker = shrinker_alloc(0, "rcu-kfree");
+   if (!kfree_rcu_shrinker) {
+   pr_err("Failed to allocate kfree_rcu() shrinker!\n");
+   return;
+   }
+
+   kfree_rcu_shrinker->count_objects = kfree_rcu_shrink_count;
+   kfree_rcu_shrinker->scan_objects = kfree_rcu_shrink_scan;
+   kfree_rcu_shrinker->seeks = DEFAULT_SEEKS;
+
+   shrinker_register(kfree_rcu_shrinker);
 }
 
 void __init rcu_init(void)
-- 
2.30.2



[Cluster-devel] [PATCH v4 18/48] rcu: dynamically allocate the rcu-lazy shrinker

2023-08-07 Thread Qi Zheng
Use new APIs to dynamically allocate the rcu-lazy shrinker.

Signed-off-by: Qi Zheng 
---
 kernel/rcu/tree_nocb.h | 20 +++-
 1 file changed, 11 insertions(+), 9 deletions(-)

diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h
index 5598212d1f27..e1c59c33738a 100644
--- a/kernel/rcu/tree_nocb.h
+++ b/kernel/rcu/tree_nocb.h
@@ -1396,13 +1396,6 @@ lazy_rcu_shrink_scan(struct shrinker *shrink, struct 
shrink_control *sc)
 
return count ? count : SHRINK_STOP;
 }
-
-static struct shrinker lazy_rcu_shrinker = {
-   .count_objects = lazy_rcu_shrink_count,
-   .scan_objects = lazy_rcu_shrink_scan,
-   .batch = 0,
-   .seeks = DEFAULT_SEEKS,
-};
 #endif // #ifdef CONFIG_RCU_LAZY
 
 void __init rcu_init_nohz(void)
@@ -1410,6 +1403,7 @@ void __init rcu_init_nohz(void)
int cpu;
struct rcu_data *rdp;
const struct cpumask *cpumask = NULL;
+   struct shrinker * __maybe_unused lazy_rcu_shrinker;
 
 #if defined(CONFIG_NO_HZ_FULL)
if (tick_nohz_full_running && !cpumask_empty(tick_nohz_full_mask))
@@ -1436,8 +1430,16 @@ void __init rcu_init_nohz(void)
return;
 
 #ifdef CONFIG_RCU_LAZY
-   if (register_shrinker(_rcu_shrinker, "rcu-lazy"))
-   pr_err("Failed to register lazy_rcu shrinker!\n");
+   lazy_rcu_shrinker = shrinker_alloc(0, "rcu-lazy");
+   if (!lazy_rcu_shrinker) {
+   pr_err("Failed to allocate lazy_rcu shrinker!\n");
+   } else {
+   lazy_rcu_shrinker->count_objects = lazy_rcu_shrink_count;
+   lazy_rcu_shrinker->scan_objects = lazy_rcu_shrink_scan;
+   lazy_rcu_shrinker->seeks = DEFAULT_SEEKS;
+
+   shrinker_register(lazy_rcu_shrinker);
+   }
 #endif // #ifdef CONFIG_RCU_LAZY
 
if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) {
-- 
2.30.2



[Cluster-devel] [PATCH v4 17/48] ubifs: dynamically allocate the ubifs-slab shrinker

2023-08-07 Thread Qi Zheng
Use new APIs to dynamically allocate the ubifs-slab shrinker.

Signed-off-by: Qi Zheng 
Reviewed-by: Muchun Song 
---
 fs/ubifs/super.c | 22 --
 1 file changed, 12 insertions(+), 10 deletions(-)

diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index b08fb28d16b5..c690782388a8 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -54,11 +54,7 @@ module_param_cb(default_version, _default_version_ops, 
_default_vers
 static struct kmem_cache *ubifs_inode_slab;
 
 /* UBIFS TNC shrinker description */
-static struct shrinker ubifs_shrinker_info = {
-   .scan_objects = ubifs_shrink_scan,
-   .count_objects = ubifs_shrink_count,
-   .seeks = DEFAULT_SEEKS,
-};
+static struct shrinker *ubifs_shrinker_info;
 
 /**
  * validate_inode - validate inode.
@@ -2373,7 +2369,7 @@ static void inode_slab_ctor(void *obj)
 
 static int __init ubifs_init(void)
 {
-   int err;
+   int err = -ENOMEM;
 
BUILD_BUG_ON(sizeof(struct ubifs_ch) != 24);
 
@@ -2439,10 +2435,16 @@ static int __init ubifs_init(void)
if (!ubifs_inode_slab)
return -ENOMEM;
 
-   err = register_shrinker(_shrinker_info, "ubifs-slab");
-   if (err)
+   ubifs_shrinker_info = shrinker_alloc(0, "ubifs-slab");
+   if (!ubifs_shrinker_info)
goto out_slab;
 
+   ubifs_shrinker_info->count_objects = ubifs_shrink_count;
+   ubifs_shrinker_info->scan_objects = ubifs_shrink_scan;
+   ubifs_shrinker_info->seeks = DEFAULT_SEEKS;
+
+   shrinker_register(ubifs_shrinker_info);
+
err = ubifs_compressors_init();
if (err)
goto out_shrinker;
@@ -2467,7 +2469,7 @@ static int __init ubifs_init(void)
dbg_debugfs_exit();
ubifs_compressors_exit();
 out_shrinker:
-   unregister_shrinker(_shrinker_info);
+   shrinker_free(ubifs_shrinker_info);
 out_slab:
kmem_cache_destroy(ubifs_inode_slab);
return err;
@@ -2483,7 +2485,7 @@ static void __exit ubifs_exit(void)
dbg_debugfs_exit();
ubifs_sysfs_exit();
ubifs_compressors_exit();
-   unregister_shrinker(_shrinker_info);
+   shrinker_free(ubifs_shrinker_info);
 
/*
 * Make sure all delayed rcu free inodes are flushed before we
-- 
2.30.2



[Cluster-devel] [PATCH v4 16/48] quota: dynamically allocate the dquota-cache shrinker

2023-08-07 Thread Qi Zheng
Use new APIs to dynamically allocate the dquota-cache shrinker.

Signed-off-by: Qi Zheng 
Reviewed-by: Muchun Song 
---
 fs/quota/dquot.c | 18 ++
 1 file changed, 10 insertions(+), 8 deletions(-)

diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 9e72bfe8bbad..c303cffdf433 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -791,12 +791,6 @@ dqcache_shrink_count(struct shrinker *shrink, struct 
shrink_control *sc)
percpu_counter_read_positive([DQST_FREE_DQUOTS]));
 }
 
-static struct shrinker dqcache_shrinker = {
-   .count_objects = dqcache_shrink_count,
-   .scan_objects = dqcache_shrink_scan,
-   .seeks = DEFAULT_SEEKS,
-};
-
 /*
  * Safely release dquot and put reference to dquot.
  */
@@ -2956,6 +2950,7 @@ static int __init dquot_init(void)
 {
int i, ret;
unsigned long nr_hash, order;
+   struct shrinker *dqcache_shrinker;
 
printk(KERN_NOTICE "VFS: Disk quotas %s\n", __DQUOT_VERSION__);
 
@@ -2990,8 +2985,15 @@ static int __init dquot_init(void)
pr_info("VFS: Dquot-cache hash table entries: %ld (order %ld,"
" %ld bytes)\n", nr_hash, order, (PAGE_SIZE << order));
 
-   if (register_shrinker(_shrinker, "dquota-cache"))
-   panic("Cannot register dquot shrinker");
+   dqcache_shrinker = shrinker_alloc(0, "dquota-cache");
+   if (!dqcache_shrinker)
+   panic("Cannot allocate dquot shrinker");
+
+   dqcache_shrinker->count_objects = dqcache_shrink_count;
+   dqcache_shrinker->scan_objects = dqcache_shrink_scan;
+   dqcache_shrinker->seeks = DEFAULT_SEEKS;
+
+   shrinker_register(dqcache_shrinker);
 
return 0;
 }
-- 
2.30.2



[Cluster-devel] [PATCH v4 15/48] nfsd: dynamically allocate the nfsd-filecache shrinker

2023-08-07 Thread Qi Zheng
Use new APIs to dynamically allocate the nfsd-filecache shrinker.

Signed-off-by: Qi Zheng 
Reviewed-by: Muchun Song 
---
 fs/nfsd/filecache.c | 23 +--
 1 file changed, 13 insertions(+), 10 deletions(-)

diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
index ee9c923192e0..9c62b4502539 100644
--- a/fs/nfsd/filecache.c
+++ b/fs/nfsd/filecache.c
@@ -521,11 +521,7 @@ nfsd_file_lru_scan(struct shrinker *s, struct 
shrink_control *sc)
return ret;
 }
 
-static struct shrinker nfsd_file_shrinker = {
-   .scan_objects = nfsd_file_lru_scan,
-   .count_objects = nfsd_file_lru_count,
-   .seeks = 1,
-};
+static struct shrinker *nfsd_file_shrinker;
 
 /**
  * nfsd_file_cond_queue - conditionally unhash and queue a nfsd_file
@@ -746,12 +742,19 @@ nfsd_file_cache_init(void)
goto out_err;
}
 
-   ret = register_shrinker(_file_shrinker, "nfsd-filecache");
-   if (ret) {
-   pr_err("nfsd: failed to register nfsd_file_shrinker: %d\n", 
ret);
+   nfsd_file_shrinker = shrinker_alloc(0, "nfsd-filecache");
+   if (!nfsd_file_shrinker) {
+   ret = -ENOMEM;
+   pr_err("nfsd: failed to allocate nfsd_file_shrinker\n");
goto out_lru;
}
 
+   nfsd_file_shrinker->count_objects = nfsd_file_lru_count;
+   nfsd_file_shrinker->scan_objects = nfsd_file_lru_scan;
+   nfsd_file_shrinker->seeks = 1;
+
+   shrinker_register(nfsd_file_shrinker);
+
ret = lease_register_notifier(_file_lease_notifier);
if (ret) {
pr_err("nfsd: unable to register lease notifier: %d\n", ret);
@@ -774,7 +777,7 @@ nfsd_file_cache_init(void)
 out_notifier:
lease_unregister_notifier(_file_lease_notifier);
 out_shrinker:
-   unregister_shrinker(_file_shrinker);
+   shrinker_free(nfsd_file_shrinker);
 out_lru:
list_lru_destroy(_file_lru);
 out_err:
@@ -891,7 +894,7 @@ nfsd_file_cache_shutdown(void)
return;
 
lease_unregister_notifier(_file_lease_notifier);
-   unregister_shrinker(_file_shrinker);
+   shrinker_free(nfsd_file_shrinker);
/*
 * make sure all callers of nfsd_file_lru_cb are done before
 * calling nfsd_file_cache_purge
-- 
2.30.2



[Cluster-devel] [PATCH v4 14/48] nfs: dynamically allocate the nfs-acl shrinker

2023-08-07 Thread Qi Zheng
Use new APIs to dynamically allocate the nfs-acl shrinker.

Signed-off-by: Qi Zheng 
Reviewed-by: Muchun Song 
---
 fs/nfs/super.c | 22 ++
 1 file changed, 14 insertions(+), 8 deletions(-)

diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 2284f749d892..1b5cd0444dda 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -129,11 +129,7 @@ static void nfs_ssc_unregister_ops(void)
 }
 #endif /* CONFIG_NFS_V4_2 */
 
-static struct shrinker acl_shrinker = {
-   .count_objects  = nfs_access_cache_count,
-   .scan_objects   = nfs_access_cache_scan,
-   .seeks  = DEFAULT_SEEKS,
-};
+static struct shrinker *acl_shrinker;
 
 /*
  * Register the NFS filesystems
@@ -153,9 +149,19 @@ int __init register_nfs_fs(void)
ret = nfs_register_sysctl();
if (ret < 0)
goto error_2;
-   ret = register_shrinker(_shrinker, "nfs-acl");
-   if (ret < 0)
+
+   acl_shrinker = shrinker_alloc(0, "nfs-acl");
+   if (!acl_shrinker) {
+   ret = -ENOMEM;
goto error_3;
+   }
+
+   acl_shrinker->count_objects = nfs_access_cache_count;
+   acl_shrinker->scan_objects = nfs_access_cache_scan;
+   acl_shrinker->seeks = DEFAULT_SEEKS;
+
+   shrinker_register(acl_shrinker);
+
 #ifdef CONFIG_NFS_V4_2
nfs_ssc_register_ops();
 #endif
@@ -175,7 +181,7 @@ int __init register_nfs_fs(void)
  */
 void __exit unregister_nfs_fs(void)
 {
-   unregister_shrinker(_shrinker);
+   shrinker_free(acl_shrinker);
nfs_unregister_sysctl();
unregister_nfs4_fs();
 #ifdef CONFIG_NFS_V4_2
-- 
2.30.2



[Cluster-devel] [PATCH v4 13/48] NFSv4.2: dynamically allocate the nfs-xattr shrinkers

2023-08-07 Thread Qi Zheng
Use new APIs to dynamically allocate the nfs-xattr shrinkers.

Signed-off-by: Qi Zheng 
Reviewed-by: Muchun Song 
---
 fs/nfs/nfs42xattr.c | 87 +++--
 1 file changed, 44 insertions(+), 43 deletions(-)

diff --git a/fs/nfs/nfs42xattr.c b/fs/nfs/nfs42xattr.c
index 911f634ba3da..2ad66a8922f4 100644
--- a/fs/nfs/nfs42xattr.c
+++ b/fs/nfs/nfs42xattr.c
@@ -796,28 +796,9 @@ static unsigned long nfs4_xattr_cache_scan(struct shrinker 
*shrink,
 static unsigned long nfs4_xattr_entry_scan(struct shrinker *shrink,
   struct shrink_control *sc);
 
-static struct shrinker nfs4_xattr_cache_shrinker = {
-   .count_objects  = nfs4_xattr_cache_count,
-   .scan_objects   = nfs4_xattr_cache_scan,
-   .seeks  = DEFAULT_SEEKS,
-   .flags  = SHRINKER_MEMCG_AWARE,
-};
-
-static struct shrinker nfs4_xattr_entry_shrinker = {
-   .count_objects  = nfs4_xattr_entry_count,
-   .scan_objects   = nfs4_xattr_entry_scan,
-   .seeks  = DEFAULT_SEEKS,
-   .batch  = 512,
-   .flags  = SHRINKER_MEMCG_AWARE,
-};
-
-static struct shrinker nfs4_xattr_large_entry_shrinker = {
-   .count_objects  = nfs4_xattr_entry_count,
-   .scan_objects   = nfs4_xattr_entry_scan,
-   .seeks  = 1,
-   .batch  = 512,
-   .flags  = SHRINKER_MEMCG_AWARE,
-};
+static struct shrinker *nfs4_xattr_cache_shrinker;
+static struct shrinker *nfs4_xattr_entry_shrinker;
+static struct shrinker *nfs4_xattr_large_entry_shrinker;
 
 static enum lru_status
 cache_lru_isolate(struct list_head *item,
@@ -943,7 +924,7 @@ nfs4_xattr_entry_scan(struct shrinker *shrink, struct 
shrink_control *sc)
struct nfs4_xattr_entry *entry;
struct list_lru *lru;
 
-   lru = (shrink == _xattr_large_entry_shrinker) ?
+   lru = (shrink == nfs4_xattr_large_entry_shrinker) ?
_xattr_large_entry_lru : _xattr_entry_lru;
 
freed = list_lru_shrink_walk(lru, sc, entry_lru_isolate, );
@@ -971,7 +952,7 @@ nfs4_xattr_entry_count(struct shrinker *shrink, struct 
shrink_control *sc)
unsigned long count;
struct list_lru *lru;
 
-   lru = (shrink == _xattr_large_entry_shrinker) ?
+   lru = (shrink == nfs4_xattr_large_entry_shrinker) ?
_xattr_large_entry_lru : _xattr_entry_lru;
 
count = list_lru_shrink_count(lru, sc);
@@ -991,18 +972,34 @@ static void nfs4_xattr_cache_init_once(void *p)
INIT_LIST_HEAD(>dispose);
 }
 
-static int nfs4_xattr_shrinker_init(struct shrinker *shrinker,
-   struct list_lru *lru, const char *name)
+typedef unsigned long (*count_objects_cb)(struct shrinker *s,
+ struct shrink_control *sc);
+typedef unsigned long (*scan_objects_cb)(struct shrinker *s,
+struct shrink_control *sc);
+
+static int __init nfs4_xattr_shrinker_init(struct shrinker **shrinker,
+  struct list_lru *lru, const char 
*name,
+  count_objects_cb count,
+  scan_objects_cb scan, long batch, 
int seeks)
 {
-   int ret = 0;
+   int ret;
 
-   ret = register_shrinker(shrinker, name);
-   if (ret)
+   *shrinker = shrinker_alloc(SHRINKER_MEMCG_AWARE, name);
+   if (!*shrinker)
+   return -ENOMEM;
+
+   ret = list_lru_init_memcg(lru, *shrinker);
+   if (ret) {
+   shrinker_free(*shrinker);
return ret;
+   }
 
-   ret = list_lru_init_memcg(lru, shrinker);
-   if (ret)
-   unregister_shrinker(shrinker);
+   (*shrinker)->count_objects = count;
+   (*shrinker)->scan_objects = scan;
+   (*shrinker)->batch = batch;
+   (*shrinker)->seeks = seeks;
+
+   shrinker_register(*shrinker);
 
return ret;
 }
@@ -1010,7 +1007,7 @@ static int nfs4_xattr_shrinker_init(struct shrinker 
*shrinker,
 static void nfs4_xattr_shrinker_destroy(struct shrinker *shrinker,
struct list_lru *lru)
 {
-   unregister_shrinker(shrinker);
+   shrinker_free(shrinker);
list_lru_destroy(lru);
 }
 
@@ -1026,27 +1023,31 @@ int __init nfs4_xattr_cache_init(void)
return -ENOMEM;
 
ret = nfs4_xattr_shrinker_init(_xattr_cache_shrinker,
-  _xattr_cache_lru,
-  "nfs-xattr_cache");
+  _xattr_cache_lru, "nfs-xattr_cache",
+  nfs4_xattr_cache_count,
+  nfs4_xattr_cache_scan, 0, DEFAULT_SEEKS);
if (ret)
goto out1;
 
ret = nfs4_xattr_shrinker_init(_xattr_entry_shrinker,
-  _xattr_entry_lru,
-

[Cluster-devel] [PATCH v4 12/48] gfs2: dynamically allocate the gfs2-qd shrinker

2023-08-07 Thread Qi Zheng
Use new APIs to dynamically allocate the gfs2-qd shrinker.

Signed-off-by: Qi Zheng 
---
 fs/gfs2/main.c  |  6 +++---
 fs/gfs2/quota.c | 26 --
 fs/gfs2/quota.h |  3 ++-
 3 files changed, 25 insertions(+), 10 deletions(-)

diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index afcb32854f14..e47b1cc79f59 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -147,7 +147,7 @@ static int __init init_gfs2_fs(void)
if (!gfs2_trans_cachep)
goto fail_cachep8;
 
-   error = register_shrinker(_qd_shrinker, "gfs2-qd");
+   error = gfs2_qd_shrinker_init();
if (error)
goto fail_shrinker;
 
@@ -196,7 +196,7 @@ static int __init init_gfs2_fs(void)
 fail_wq2:
destroy_workqueue(gfs_recovery_wq);
 fail_wq1:
-   unregister_shrinker(_qd_shrinker);
+   gfs2_qd_shrinker_exit();
 fail_shrinker:
kmem_cache_destroy(gfs2_trans_cachep);
 fail_cachep8:
@@ -229,7 +229,7 @@ static int __init init_gfs2_fs(void)
 
 static void __exit exit_gfs2_fs(void)
 {
-   unregister_shrinker(_qd_shrinker);
+   gfs2_qd_shrinker_exit();
gfs2_glock_exit();
gfs2_unregister_debugfs();
unregister_filesystem(_fs_type);
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 632806c5ed67..d1e4d8ab8fa1 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -186,13 +186,27 @@ static unsigned long gfs2_qd_shrink_count(struct shrinker 
*shrink,
return vfs_pressure_ratio(list_lru_shrink_count(_qd_lru, sc));
 }
 
-struct shrinker gfs2_qd_shrinker = {
-   .count_objects = gfs2_qd_shrink_count,
-   .scan_objects = gfs2_qd_shrink_scan,
-   .seeks = DEFAULT_SEEKS,
-   .flags = SHRINKER_NUMA_AWARE,
-};
+static struct shrinker *gfs2_qd_shrinker;
+
+int __init gfs2_qd_shrinker_init(void)
+{
+   gfs2_qd_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE, "gfs2-qd");
+   if (!gfs2_qd_shrinker)
+   return -ENOMEM;
+
+   gfs2_qd_shrinker->count_objects = gfs2_qd_shrink_count;
+   gfs2_qd_shrinker->scan_objects = gfs2_qd_shrink_scan;
+   gfs2_qd_shrinker->seeks = DEFAULT_SEEKS;
+
+   shrinker_register(gfs2_qd_shrinker);
 
+   return 0;
+}
+
+void gfs2_qd_shrinker_exit(void)
+{
+   shrinker_free(gfs2_qd_shrinker);
+}
 
 static u64 qd2index(struct gfs2_quota_data *qd)
 {
diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h
index 21ada332d555..f0d54dcbbc75 100644
--- a/fs/gfs2/quota.h
+++ b/fs/gfs2/quota.h
@@ -59,7 +59,8 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip,
 }
 
 extern const struct quotactl_ops gfs2_quotactl_ops;
-extern struct shrinker gfs2_qd_shrinker;
+int __init gfs2_qd_shrinker_init(void);
+void gfs2_qd_shrinker_exit(void);
 extern struct list_lru gfs2_qd_lru;
 extern void __init gfs2_quota_hash_init(void);
 
-- 
2.30.2



[Cluster-devel] [PATCH v4 11/48] gfs2: dynamically allocate the gfs2-glock shrinker

2023-08-07 Thread Qi Zheng
Use new APIs to dynamically allocate the gfs2-glock shrinker.

Signed-off-by: Qi Zheng 
Reviewed-by: Muchun Song 
---
 fs/gfs2/glock.c | 20 +++-
 1 file changed, 11 insertions(+), 9 deletions(-)

diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 1438e7465e30..8d582ba7514f 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -2046,11 +2046,7 @@ static unsigned long gfs2_glock_shrink_count(struct 
shrinker *shrink,
return vfs_pressure_ratio(atomic_read(_count));
 }
 
-static struct shrinker glock_shrinker = {
-   .seeks = DEFAULT_SEEKS,
-   .count_objects = gfs2_glock_shrink_count,
-   .scan_objects = gfs2_glock_shrink_scan,
-};
+static struct shrinker *glock_shrinker;
 
 /**
  * glock_hash_walk - Call a function for glock in a hash bucket
@@ -2472,13 +2468,19 @@ int __init gfs2_glock_init(void)
return -ENOMEM;
}
 
-   ret = register_shrinker(_shrinker, "gfs2-glock");
-   if (ret) {
+   glock_shrinker = shrinker_alloc(0, "gfs2-glock");
+   if (!glock_shrinker) {
destroy_workqueue(glock_workqueue);
rhashtable_destroy(_hash_table);
-   return ret;
+   return -ENOMEM;
}
 
+   glock_shrinker->count_objects = gfs2_glock_shrink_count;
+   glock_shrinker->scan_objects = gfs2_glock_shrink_scan;
+   glock_shrinker->seeks = DEFAULT_SEEKS;
+
+   shrinker_register(glock_shrinker);
+
for (i = 0; i < GLOCK_WAIT_TABLE_SIZE; i++)
init_waitqueue_head(glock_wait_table + i);
 
@@ -2487,7 +2489,7 @@ int __init gfs2_glock_init(void)
 
 void gfs2_glock_exit(void)
 {
-   unregister_shrinker(_shrinker);
+   shrinker_free(glock_shrinker);
rhashtable_destroy(_hash_table);
destroy_workqueue(glock_workqueue);
 }
-- 
2.30.2



[Cluster-devel] [PATCH v4 10/48] f2fs: dynamically allocate the f2fs-shrinker

2023-08-07 Thread Qi Zheng
Use new APIs to dynamically allocate the f2fs-shrinker.

Signed-off-by: Qi Zheng 
Reviewed-by: Muchun Song 
---
 fs/f2fs/super.c | 32 
 1 file changed, 24 insertions(+), 8 deletions(-)

diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index aa1f9a3a8037..9092310582aa 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -83,11 +83,27 @@ void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, 
unsigned int rate,
 #endif
 
 /* f2fs-wide shrinker description */
-static struct shrinker f2fs_shrinker_info = {
-   .scan_objects = f2fs_shrink_scan,
-   .count_objects = f2fs_shrink_count,
-   .seeks = DEFAULT_SEEKS,
-};
+static struct shrinker *f2fs_shrinker_info;
+
+static int __init f2fs_init_shrinker(void)
+{
+   f2fs_shrinker_info = shrinker_alloc(0, "f2fs-shrinker");
+   if (!f2fs_shrinker_info)
+   return -ENOMEM;
+
+   f2fs_shrinker_info->count_objects = f2fs_shrink_count;
+   f2fs_shrinker_info->scan_objects = f2fs_shrink_scan;
+   f2fs_shrinker_info->seeks = DEFAULT_SEEKS;
+
+   shrinker_register(f2fs_shrinker_info);
+
+   return 0;
+}
+
+static void f2fs_exit_shrinker(void)
+{
+   shrinker_free(f2fs_shrinker_info);
+}
 
 enum {
Opt_gc_background,
@@ -4940,7 +4956,7 @@ static int __init init_f2fs_fs(void)
err = f2fs_init_sysfs();
if (err)
goto free_garbage_collection_cache;
-   err = register_shrinker(_shrinker_info, "f2fs-shrinker");
+   err = f2fs_init_shrinker();
if (err)
goto free_sysfs;
err = register_filesystem(_fs_type);
@@ -4985,7 +5001,7 @@ static int __init init_f2fs_fs(void)
f2fs_destroy_root_stats();
unregister_filesystem(_fs_type);
 free_shrinker:
-   unregister_shrinker(_shrinker_info);
+   f2fs_exit_shrinker();
 free_sysfs:
f2fs_exit_sysfs();
 free_garbage_collection_cache:
@@ -5017,7 +5033,7 @@ static void __exit exit_f2fs_fs(void)
f2fs_destroy_post_read_processing();
f2fs_destroy_root_stats();
unregister_filesystem(_fs_type);
-   unregister_shrinker(_shrinker_info);
+   f2fs_exit_shrinker();
f2fs_exit_sysfs();
f2fs_destroy_garbage_collection_cache();
f2fs_destroy_extent_cache();
-- 
2.30.2



[Cluster-devel] [PATCH v4 09/48] erofs: dynamically allocate the erofs-shrinker

2023-08-07 Thread Qi Zheng
Use new APIs to dynamically allocate the erofs-shrinker.

Signed-off-by: Qi Zheng 
Reviewed-by: Muchun Song 
---
 fs/erofs/utils.c | 20 +---
 1 file changed, 13 insertions(+), 7 deletions(-)

diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c
index cc6fb9e98899..6e1a828e6ca3 100644
--- a/fs/erofs/utils.c
+++ b/fs/erofs/utils.c
@@ -270,19 +270,25 @@ static unsigned long erofs_shrink_scan(struct shrinker 
*shrink,
return freed;
 }
 
-static struct shrinker erofs_shrinker_info = {
-   .scan_objects = erofs_shrink_scan,
-   .count_objects = erofs_shrink_count,
-   .seeks = DEFAULT_SEEKS,
-};
+static struct shrinker *erofs_shrinker_info;
 
 int __init erofs_init_shrinker(void)
 {
-   return register_shrinker(_shrinker_info, "erofs-shrinker");
+   erofs_shrinker_info = shrinker_alloc(0, "erofs-shrinker");
+   if (!erofs_shrinker_info)
+   return -ENOMEM;
+
+   erofs_shrinker_info->count_objects = erofs_shrink_count;
+   erofs_shrinker_info->scan_objects = erofs_shrink_scan;
+   erofs_shrinker_info->seeks = DEFAULT_SEEKS;
+
+   shrinker_register(erofs_shrinker_info);
+
+   return 0;
 }
 
 void erofs_exit_shrinker(void)
 {
-   unregister_shrinker(_shrinker_info);
+   shrinker_free(erofs_shrinker_info);
 }
 #endif /* !CONFIG_EROFS_FS_ZIP */
-- 
2.30.2



[Cluster-devel] [PATCH v4 08/48] xenbus/backend: dynamically allocate the xen-backend shrinker

2023-08-07 Thread Qi Zheng
Use new APIs to dynamically allocate the xen-backend shrinker.

Signed-off-by: Qi Zheng 
Reviewed-by: Muchun Song 
---
 drivers/xen/xenbus/xenbus_probe_backend.c | 18 +++---
 1 file changed, 11 insertions(+), 7 deletions(-)

diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c 
b/drivers/xen/xenbus/xenbus_probe_backend.c
index da96c260e26b..929c41a5ccee 100644
--- a/drivers/xen/xenbus/xenbus_probe_backend.c
+++ b/drivers/xen/xenbus/xenbus_probe_backend.c
@@ -284,13 +284,9 @@ static unsigned long backend_shrink_memory_count(struct 
shrinker *shrinker,
return 0;
 }
 
-static struct shrinker backend_memory_shrinker = {
-   .count_objects = backend_shrink_memory_count,
-   .seeks = DEFAULT_SEEKS,
-};
-
 static int __init xenbus_probe_backend_init(void)
 {
+   struct shrinker *backend_memory_shrinker;
static struct notifier_block xenstore_notifier = {
.notifier_call = backend_probe_and_watch
};
@@ -305,8 +301,16 @@ static int __init xenbus_probe_backend_init(void)
 
register_xenstore_notifier(_notifier);
 
-   if (register_shrinker(_memory_shrinker, "xen-backend"))
-   pr_warn("shrinker registration failed\n");
+   backend_memory_shrinker = shrinker_alloc(0, "xen-backend");
+   if (!backend_memory_shrinker) {
+   pr_warn("shrinker allocation failed\n");
+   return 0;
+   }
+
+   backend_memory_shrinker->count_objects = backend_shrink_memory_count;
+   backend_memory_shrinker->seeks = DEFAULT_SEEKS;
+
+   shrinker_register(backend_memory_shrinker);
 
return 0;
 }
-- 
2.30.2



[Cluster-devel] [PATCH v4 07/48] drm/ttm: dynamically allocate the drm-ttm_pool shrinker

2023-08-07 Thread Qi Zheng
Use new APIs to dynamically allocate the drm-ttm_pool shrinker.

Signed-off-by: Qi Zheng 
Reviewed-by: Muchun Song 
---
 drivers/gpu/drm/ttm/ttm_pool.c | 23 +++
 1 file changed, 15 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index cddb9151d20f..c9c9618c0dce 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -73,7 +73,7 @@ static struct ttm_pool_type global_dma32_uncached[MAX_ORDER + 
1];
 
 static spinlock_t shrinker_lock;
 static struct list_head shrinker_list;
-static struct shrinker mm_shrinker;
+static struct shrinker *mm_shrinker;
 
 /* Allocate pages of size 1 << order with the given gfp_flags */
 static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
@@ -734,8 +734,8 @@ static int ttm_pool_debugfs_shrink_show(struct seq_file *m, 
void *data)
struct shrink_control sc = { .gfp_mask = GFP_NOFS };
 
fs_reclaim_acquire(GFP_KERNEL);
-   seq_printf(m, "%lu/%lu\n", ttm_pool_shrinker_count(_shrinker, ),
-  ttm_pool_shrinker_scan(_shrinker, ));
+   seq_printf(m, "%lu/%lu\n", ttm_pool_shrinker_count(mm_shrinker, ),
+  ttm_pool_shrinker_scan(mm_shrinker, ));
fs_reclaim_release(GFP_KERNEL);
 
return 0;
@@ -779,10 +779,17 @@ int ttm_pool_mgr_init(unsigned long num_pages)
_pool_debugfs_shrink_fops);
 #endif
 
-   mm_shrinker.count_objects = ttm_pool_shrinker_count;
-   mm_shrinker.scan_objects = ttm_pool_shrinker_scan;
-   mm_shrinker.seeks = 1;
-   return register_shrinker(_shrinker, "drm-ttm_pool");
+   mm_shrinker = shrinker_alloc(0, "drm-ttm_pool");
+   if (!mm_shrinker)
+   return -ENOMEM;
+
+   mm_shrinker->count_objects = ttm_pool_shrinker_count;
+   mm_shrinker->scan_objects = ttm_pool_shrinker_scan;
+   mm_shrinker->seeks = 1;
+
+   shrinker_register(mm_shrinker);
+
+   return 0;
 }
 
 /**
@@ -802,6 +809,6 @@ void ttm_pool_mgr_fini(void)
ttm_pool_type_fini(_dma32_uncached[i]);
}
 
-   unregister_shrinker(_shrinker);
+   shrinker_free(mm_shrinker);
WARN_ON(!list_empty(_list));
 }
-- 
2.30.2



[Cluster-devel] [PATCH v4 06/48] binder: dynamically allocate the android-binder shrinker

2023-08-07 Thread Qi Zheng
Use new APIs to dynamically allocate the android-binder shrinker.

Signed-off-by: Qi Zheng 
---
 drivers/android/binder_alloc.c | 31 +++
 1 file changed, 19 insertions(+), 12 deletions(-)

diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index e3db8297095a..62675cedd38e 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -1053,11 +1053,7 @@ binder_shrink_scan(struct shrinker *shrink, struct 
shrink_control *sc)
NULL, sc->nr_to_scan);
 }
 
-static struct shrinker binder_shrinker = {
-   .count_objects = binder_shrink_count,
-   .scan_objects = binder_shrink_scan,
-   .seeks = DEFAULT_SEEKS,
-};
+static struct shrinker *binder_shrinker;
 
 /**
  * binder_alloc_init() - called by binder_open() for per-proc initialization
@@ -1077,19 +1073,30 @@ void binder_alloc_init(struct binder_alloc *alloc)
 
 int binder_alloc_shrinker_init(void)
 {
-   int ret = list_lru_init(_alloc_lru);
+   int ret;
 
-   if (ret == 0) {
-   ret = register_shrinker(_shrinker, "android-binder");
-   if (ret)
-   list_lru_destroy(_alloc_lru);
+   ret = list_lru_init(_alloc_lru);
+   if (ret)
+   return ret;
+
+   binder_shrinker = shrinker_alloc(0, "android-binder");
+   if (!binder_shrinker) {
+   list_lru_destroy(_alloc_lru);
+   return -ENOMEM;
}
-   return ret;
+
+   binder_shrinker->count_objects = binder_shrink_count;
+   binder_shrinker->scan_objects = binder_shrink_scan;
+   binder_shrinker->seeks = DEFAULT_SEEKS;
+
+   shrinker_register(binder_shrinker);
+
+   return 0;
 }
 
 void binder_alloc_shrinker_exit(void)
 {
-   unregister_shrinker(_shrinker);
+   shrinker_free(binder_shrinker);
list_lru_destroy(_alloc_lru);
 }
 
-- 
2.30.2



[Cluster-devel] [PATCH v4 05/48] kvm: mmu: dynamically allocate the x86-mmu shrinker

2023-08-07 Thread Qi Zheng
Use new APIs to dynamically allocate the x86-mmu shrinker.

Signed-off-by: Qi Zheng 
Reviewed-by: Muchun Song 
---
 arch/x86/kvm/mmu/mmu.c | 18 ++
 1 file changed, 10 insertions(+), 8 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 9e4cd8b4a202..0386d5ec97b0 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -6804,11 +6804,7 @@ static unsigned long mmu_shrink_count(struct shrinker 
*shrink,
return percpu_counter_read_positive(_total_used_mmu_pages);
 }
 
-static struct shrinker mmu_shrinker = {
-   .count_objects = mmu_shrink_count,
-   .scan_objects = mmu_shrink_scan,
-   .seeks = DEFAULT_SEEKS * 10,
-};
+static struct shrinker *mmu_shrinker;
 
 static void mmu_destroy_caches(void)
 {
@@ -6941,10 +6937,16 @@ int kvm_mmu_vendor_module_init(void)
if (percpu_counter_init(_total_used_mmu_pages, 0, GFP_KERNEL))
goto out;
 
-   ret = register_shrinker(_shrinker, "x86-mmu");
-   if (ret)
+   mmu_shrinker = shrinker_alloc(0, "x86-mmu");
+   if (!mmu_shrinker)
goto out_shrinker;
 
+   mmu_shrinker->count_objects = mmu_shrink_count;
+   mmu_shrinker->scan_objects = mmu_shrink_scan;
+   mmu_shrinker->seeks = DEFAULT_SEEKS * 10;
+
+   shrinker_register(mmu_shrinker);
+
return 0;
 
 out_shrinker:
@@ -6966,7 +6968,7 @@ void kvm_mmu_vendor_module_exit(void)
 {
mmu_destroy_caches();
percpu_counter_destroy(_total_used_mmu_pages);
-   unregister_shrinker(_shrinker);
+   shrinker_free(mmu_shrinker);
 }
 
 /*
-- 
2.30.2



[Cluster-devel] [PATCH v4 04/48] mm: shrinker: add infrastructure for dynamically allocating shrinker

2023-08-07 Thread Qi Zheng
Currently, the shrinker instances can be divided into the following three
types:

a) global shrinker instance statically defined in the kernel, such as
   workingset_shadow_shrinker.

b) global shrinker instance statically defined in the kernel modules, such
   as mmu_shrinker in x86.

c) shrinker instance embedded in other structures.

For case a, the memory of shrinker instance is never freed. For case b,
the memory of shrinker instance will be freed after synchronize_rcu() when
the module is unloaded. For case c, the memory of shrinker instance will
be freed along with the structure it is embedded in.

In preparation for implementing lockless slab shrink, we need to
dynamically allocate those shrinker instances in case c, then the memory
can be dynamically freed alone by calling kfree_rcu().

So this commit adds the following new APIs for dynamically allocating
shrinker, and add a private_data field to struct shrinker to record and
get the original embedded structure.

1. shrinker_alloc()

Used to allocate shrinker instance itself and related memory, it will
return a pointer to the shrinker instance on success and NULL on failure.

2. shrinker_register()

Used to register the shrinker instance, which is same as the current
register_shrinker_prepared().

3. shrinker_free()

Used to unregister (if needed) and free the shrinker instance.

In order to simplify shrinker-related APIs and make shrinker more
independent of other kernel mechanisms, subsequent submissions will use
the above API to convert all shrinkers (including case a and b) to
dynamically allocated, and then remove all existing APIs.

This will also have another advantage mentioned by Dave Chinner:

```
The other advantage of this is that it will break all the existing
out of tree code and third party modules using the old API and will
no longer work with a kernel using lockless slab shrinkers. They
need to break (both at the source and binary levels) to stop bad
things from happening due to using unconverted shrinkers in the new
setup.
```

Signed-off-by: Qi Zheng 
---
 include/linux/shrinker.h |   7 +++
 mm/internal.h|  11 +
 mm/shrinker.c| 101 +++
 mm/shrinker_debug.c  |  17 ++-
 4 files changed, 134 insertions(+), 2 deletions(-)

diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index 8dc15aa37410..cc23ff0aee20 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -70,6 +70,8 @@ struct shrinker {
int seeks;  /* seeks to recreate an obj */
unsigned flags;
 
+   void *private_data;
+
/* These are for internal use */
struct list_head list;
 #ifdef CONFIG_MEMCG
@@ -95,6 +97,11 @@ struct shrinker {
  * non-MEMCG_AWARE shrinker should not have this flag set.
  */
 #define SHRINKER_NONSLAB   (1 << 3)
+#define SHRINKER_ALLOCATED (1 << 4)
+
+struct shrinker *shrinker_alloc(unsigned int flags, const char *fmt, ...);
+void shrinker_register(struct shrinker *shrinker);
+void shrinker_free(struct shrinker *shrinker);
 
 extern int __printf(2, 3) prealloc_shrinker(struct shrinker *shrinker,
const char *fmt, ...);
diff --git a/mm/internal.h b/mm/internal.h
index b98c29f0a471..7b882b903b82 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1152,6 +1152,9 @@ unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct 
mem_cgroup *memcg,
 
 #ifdef CONFIG_SHRINKER_DEBUG
 extern int shrinker_debugfs_add(struct shrinker *shrinker);
+extern int shrinker_debugfs_name_alloc(struct shrinker *shrinker,
+  const char *fmt, va_list ap);
+extern void shrinker_debugfs_name_free(struct shrinker *shrinker);
 extern struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
  int *debugfs_id);
 extern void shrinker_debugfs_remove(struct dentry *debugfs_entry,
@@ -1161,6 +1164,14 @@ static inline int shrinker_debugfs_add(struct shrinker 
*shrinker)
 {
return 0;
 }
+static inline int shrinker_debugfs_name_alloc(struct shrinker *shrinker,
+ const char *fmt, va_list ap)
+{
+   return 0;
+}
+static inline void shrinker_debugfs_name_free(struct shrinker *shrinker)
+{
+}
 static inline struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
 int *debugfs_id)
 {
diff --git a/mm/shrinker.c b/mm/shrinker.c
index 043c87ccfab4..43a375f954f3 100644
--- a/mm/shrinker.c
+++ b/mm/shrinker.c
@@ -550,6 +550,107 @@ unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct 
mem_cgroup *memcg,
return freed;
 }
 
+struct shrinker *shrinker_alloc(unsigned int flags, const char *fmt, ...)
+{
+   struct shrinker *shrinker;
+   unsigned int size;
+   va_list ap;
+   int err;
+
+   shrinker = kzalloc(sizeof(struct shrinker), GFP_KERNEL);
+   if (!shrinker)
+   

[Cluster-devel] [PATCH v4 02/48] mm: vmscan: move shrinker-related code into a separate file

2023-08-07 Thread Qi Zheng
The mm/vmscan.c file is too large, so separate the shrinker-related
code from it into a separate file. No functional changes.

Signed-off-by: Qi Zheng 
---
 mm/Makefile   |   4 +-
 mm/internal.h |   2 +
 mm/shrinker.c | 709 ++
 mm/vmscan.c   | 701 -
 4 files changed, 713 insertions(+), 703 deletions(-)
 create mode 100644 mm/shrinker.c

diff --git a/mm/Makefile b/mm/Makefile
index ec65984e2ade..33873c8aedb3 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -48,8 +48,8 @@ endif
 
 obj-y  := filemap.o mempool.o oom_kill.o fadvise.o \
   maccess.o page-writeback.o folio-compat.o \
-  readahead.o swap.o truncate.o vmscan.o shmem.o \
-  util.o mmzone.o vmstat.o backing-dev.o \
+  readahead.o swap.o truncate.o vmscan.o shrinker.o \
+  shmem.o util.o mmzone.o vmstat.o backing-dev.o \
   mm_init.o percpu.o slab_common.o \
   compaction.o show_mem.o shmem_quota.o\
   interval_tree.o list_lru.o workingset.o \
diff --git a/mm/internal.h b/mm/internal.h
index 6f21926393af..b98c29f0a471 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1147,6 +1147,8 @@ struct vma_prepare {
 /*
  * shrinker related functions
  */
+unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg,
+ int priority);
 
 #ifdef CONFIG_SHRINKER_DEBUG
 extern int shrinker_debugfs_add(struct shrinker *shrinker);
diff --git a/mm/shrinker.c b/mm/shrinker.c
new file mode 100644
index ..043c87ccfab4
--- /dev/null
+++ b/mm/shrinker.c
@@ -0,0 +1,709 @@
+// SPDX-License-Identifier: GPL-2.0
+#include 
+#include 
+#include 
+#include 
+
+#include "internal.h"
+
+LIST_HEAD(shrinker_list);
+DECLARE_RWSEM(shrinker_rwsem);
+
+#ifdef CONFIG_MEMCG
+static int shrinker_nr_max;
+
+/* The shrinker_info is expanded in a batch of BITS_PER_LONG */
+static inline int shrinker_map_size(int nr_items)
+{
+   return (DIV_ROUND_UP(nr_items, BITS_PER_LONG) * sizeof(unsigned long));
+}
+
+static inline int shrinker_defer_size(int nr_items)
+{
+   return (round_up(nr_items, BITS_PER_LONG) * sizeof(atomic_long_t));
+}
+
+void free_shrinker_info(struct mem_cgroup *memcg)
+{
+   struct mem_cgroup_per_node *pn;
+   struct shrinker_info *info;
+   int nid;
+
+   for_each_node(nid) {
+   pn = memcg->nodeinfo[nid];
+   info = rcu_dereference_protected(pn->shrinker_info, true);
+   kvfree(info);
+   rcu_assign_pointer(pn->shrinker_info, NULL);
+   }
+}
+
+int alloc_shrinker_info(struct mem_cgroup *memcg)
+{
+   struct shrinker_info *info;
+   int nid, size, ret = 0;
+   int map_size, defer_size = 0;
+
+   down_write(_rwsem);
+   map_size = shrinker_map_size(shrinker_nr_max);
+   defer_size = shrinker_defer_size(shrinker_nr_max);
+   size = map_size + defer_size;
+   for_each_node(nid) {
+   info = kvzalloc_node(sizeof(*info) + size, GFP_KERNEL, nid);
+   if (!info) {
+   free_shrinker_info(memcg);
+   ret = -ENOMEM;
+   break;
+   }
+   info->nr_deferred = (atomic_long_t *)(info + 1);
+   info->map = (void *)info->nr_deferred + defer_size;
+   info->map_nr_max = shrinker_nr_max;
+   rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, info);
+   }
+   up_write(_rwsem);
+
+   return ret;
+}
+
+static struct shrinker_info *shrinker_info_protected(struct mem_cgroup *memcg,
+int nid)
+{
+   return rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info,
+lockdep_is_held(_rwsem));
+}
+
+static int expand_one_shrinker_info(struct mem_cgroup *memcg,
+   int map_size, int defer_size,
+   int old_map_size, int old_defer_size,
+   int new_nr_max)
+{
+   struct shrinker_info *new, *old;
+   struct mem_cgroup_per_node *pn;
+   int nid;
+   int size = map_size + defer_size;
+
+   for_each_node(nid) {
+   pn = memcg->nodeinfo[nid];
+   old = shrinker_info_protected(memcg, nid);
+   /* Not yet online memcg */
+   if (!old)
+   return 0;
+
+   /* Already expanded this shrinker_info */
+   if (new_nr_max <= old->map_nr_max)
+   continue;
+
+   new = kvmalloc_node(sizeof(*new) + size, GFP_KERNEL, nid);
+   if (!new)
+   return -ENOMEM;
+
+   new->nr_deferred = (atomic_long_t *)(new + 1);
+   new->map = 

[Cluster-devel] [PATCH v4 03/48] mm: shrinker: remove redundant shrinker_rwsem in debugfs operations

2023-08-07 Thread Qi Zheng
The debugfs_remove_recursive() will wait for debugfs_file_put() to return,
so the shrinker will not be freed when doing debugfs operations (such as
shrinker_debugfs_count_show() and shrinker_debugfs_scan_write()), so there
is no need to hold shrinker_rwsem during debugfs operations.

Signed-off-by: Qi Zheng 
Reviewed-by: Muchun Song 
---
 mm/shrinker_debug.c | 16 +---
 1 file changed, 1 insertion(+), 15 deletions(-)

diff --git a/mm/shrinker_debug.c b/mm/shrinker_debug.c
index 3ab53fad8876..61702bdc1af4 100644
--- a/mm/shrinker_debug.c
+++ b/mm/shrinker_debug.c
@@ -49,17 +49,12 @@ static int shrinker_debugfs_count_show(struct seq_file *m, 
void *v)
struct mem_cgroup *memcg;
unsigned long total;
bool memcg_aware;
-   int ret, nid;
+   int ret = 0, nid;
 
count_per_node = kcalloc(nr_node_ids, sizeof(unsigned long), 
GFP_KERNEL);
if (!count_per_node)
return -ENOMEM;
 
-   ret = down_read_killable(_rwsem);
-   if (ret) {
-   kfree(count_per_node);
-   return ret;
-   }
rcu_read_lock();
 
memcg_aware = shrinker->flags & SHRINKER_MEMCG_AWARE;
@@ -92,7 +87,6 @@ static int shrinker_debugfs_count_show(struct seq_file *m, 
void *v)
} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
 
rcu_read_unlock();
-   up_read(_rwsem);
 
kfree(count_per_node);
return ret;
@@ -117,7 +111,6 @@ static ssize_t shrinker_debugfs_scan_write(struct file 
*file,
struct mem_cgroup *memcg = NULL;
int nid;
char kbuf[72];
-   ssize_t ret;
 
read_len = size < (sizeof(kbuf) - 1) ? size : (sizeof(kbuf) - 1);
if (copy_from_user(kbuf, buf, read_len))
@@ -146,12 +139,6 @@ static ssize_t shrinker_debugfs_scan_write(struct file 
*file,
return -EINVAL;
}
 
-   ret = down_read_killable(_rwsem);
-   if (ret) {
-   mem_cgroup_put(memcg);
-   return ret;
-   }
-
sc.nid = nid;
sc.memcg = memcg;
sc.nr_to_scan = nr_to_scan;
@@ -159,7 +146,6 @@ static ssize_t shrinker_debugfs_scan_write(struct file 
*file,
 
shrinker->scan_objects(shrinker, );
 
-   up_read(_rwsem);
mem_cgroup_put(memcg);
 
return size;
-- 
2.30.2



[Cluster-devel] [PATCH v4 01/48] mm: move some shrinker-related function declarations to mm/internal.h

2023-08-07 Thread Qi Zheng
The following functions are only used inside the mm subsystem, so it's
better to move their declarations to the mm/internal.h file.

1. shrinker_debugfs_add()
2. shrinker_debugfs_detach()
3. shrinker_debugfs_remove()

Signed-off-by: Qi Zheng 
---
 include/linux/shrinker.h | 19 ---
 mm/internal.h| 28 
 2 files changed, 28 insertions(+), 19 deletions(-)

diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index 224293b2dd06..8dc15aa37410 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -106,28 +106,9 @@ extern void free_prealloced_shrinker(struct shrinker 
*shrinker);
 extern void synchronize_shrinkers(void);
 
 #ifdef CONFIG_SHRINKER_DEBUG
-extern int shrinker_debugfs_add(struct shrinker *shrinker);
-extern struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
- int *debugfs_id);
-extern void shrinker_debugfs_remove(struct dentry *debugfs_entry,
-   int debugfs_id);
 extern int __printf(2, 3) shrinker_debugfs_rename(struct shrinker *shrinker,
  const char *fmt, ...);
 #else /* CONFIG_SHRINKER_DEBUG */
-static inline int shrinker_debugfs_add(struct shrinker *shrinker)
-{
-   return 0;
-}
-static inline struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
-int *debugfs_id)
-{
-   *debugfs_id = -1;
-   return NULL;
-}
-static inline void shrinker_debugfs_remove(struct dentry *debugfs_entry,
-  int debugfs_id)
-{
-}
 static inline __printf(2, 3)
 int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...)
 {
diff --git a/mm/internal.h b/mm/internal.h
index 154da4f0d557..6f21926393af 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1143,4 +1143,32 @@ struct vma_prepare {
struct vm_area_struct *remove;
struct vm_area_struct *remove2;
 };
+
+/*
+ * shrinker related functions
+ */
+
+#ifdef CONFIG_SHRINKER_DEBUG
+extern int shrinker_debugfs_add(struct shrinker *shrinker);
+extern struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
+ int *debugfs_id);
+extern void shrinker_debugfs_remove(struct dentry *debugfs_entry,
+   int debugfs_id);
+#else /* CONFIG_SHRINKER_DEBUG */
+static inline int shrinker_debugfs_add(struct shrinker *shrinker)
+{
+   return 0;
+}
+static inline struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
+int *debugfs_id)
+{
+   *debugfs_id = -1;
+   return NULL;
+}
+static inline void shrinker_debugfs_remove(struct dentry *debugfs_entry,
+  int debugfs_id)
+{
+}
+#endif /* CONFIG_SHRINKER_DEBUG */
+
 #endif /* __MM_INTERNAL_H */
-- 
2.30.2



[Cluster-devel] [PATCH v4 00/48] use refcount+RCU method to implement lockless slab shrink

2023-08-07 Thread Qi Zheng
   8.21%  [kernel][k] idr_find
   2.71%  [kernel][k] _find_next_bit
   1.36%  [kernel][k] shrink_node
   0.81%  [kernel][k] shrink_lruvec
   0.80%  [kernel][k] __radix_tree_lookup
   0.50%  [kernel][k] do_shrink_slab
   0.21%  [kernel][k] list_lru_count_one
   0.16%  [kernel][k] mem_cgroup_iter

2) After applying this patchset:

  60.17%  [kernel]   [k] shrink_slab
  20.42%  [kernel]   [k] pv_native_safe_halt
   3.03%  [kernel]   [k] do_shrink_slab
   2.73%  [kernel]   [k] shrink_node
   2.27%  [kernel]   [k] shrink_lruvec
   2.00%  [kernel]   [k] __rcu_read_unlock
   1.92%  [kernel]   [k] mem_cgroup_iter
   0.98%  [kernel]   [k] __rcu_read_lock
   0.91%  [kernel]   [k] osq_lock
   0.63%  [kernel]   [k] mem_cgroup_calculate_protection
   0.55%  [kernel]   [k] shrinker_put
   0.46%  [kernel]   [k] list_lru_count_one

We can see that the first perf hotspot becomes shrink_slab, which is what we
expect.

3.2 registeration and unregisteration stress test
-

Run the command below to test:

stress-ng --timeout 60 --times --verify --metrics-brief --ramfs 9 &

1) Before applying this patchset:

setting to a 60 second run per stressor
dispatching hogs: 9 ramfs
stressor   bogo ops real time  usr time  sys time   bogo ops/s bogo 
ops/s
  (secs)(secs)(secs)   (real time) (usr+sys 
time)
ramfs735238 60.00 12.37363.70 12253.05
1955.08
for a 60.01s run time:
   1440.27s available CPU time
 12.36s user time   (  0.86%)
363.70s system time ( 25.25%)
376.06s total time  ( 26.11%)
load average: 10.79 4.47 1.69
passed: 9: ramfs (9)
failed: 0
skipped: 0
successful run completed in 60.01s (1 min, 0.01 secs)

2) After applying this patchset:

setting to a 60 second run per stressor
dispatching hogs: 9 ramfs
stressor   bogo ops real time  usr time  sys time   bogo ops/s bogo 
ops/s
  (secs)(secs)(secs)   (real time) (usr+sys 
time)
ramfs746698 60.00 12.45376.16 12444.02
1921.47
for a 60.01s run time:
   1440.28s available CPU time
 12.44s user time   (  0.86%)
376.16s system time ( 26.12%)
388.60s total time  ( 26.98%)
load average: 9.01 3.85 1.49
passed: 9: ramfs (9)
failed: 0
skipped: 0
successful run completed in 60.01s (1 min, 0.01 secs)

We can see that the ops/s has hardly changed.

This series is based on next-20230807.

Comments and suggestions are welcome.

Thanks,
Qi

Changelog in v3 -> v4:
 - [PATCH v3 01/49] has been merged, so discard it.
 - fix wrong return value in patch v3 15\16\22\27\28\29\34\40.
   (pointed by Damien Le Moal)
 - fix uninitialized variable in [PATCH v3 04/49]
   (pointed by Simon Horman)
 - fix typo in [PATCH v3 05/49] (pointed by Simon Horman)
 - rebase onto the next-20230807.

Changelog in v2 -> v3:
 - add the patch that [PATCH v3 07/49] depends on
 - move some shrinker-related function declarations to mm/internal.h
   (suggested by Muchun Song)
 - combine shrinker_free_non_registered() and shrinker_unregister() into
   shrinker_free() (suggested by Dave Chinner)
 - add missing __init and fix return value in bch_btree_cache_alloc()
   (pointed by Muchun Song)
 - remove unnecessary WARN_ON() (pointed by Steven Price)
 - go back to use completion to implement lockless slab shrink
   (pointed by Dave Chinner)
 - collect Acked-bys and Reviewed-bys
 - rebase onto the next-20230726.

Changelog in v1 -> v2:
 - implement the new APIs and convert all shrinkers to use it.
   (suggested by Dave Chinner)
 - fix UAF in PATCH [05/29] (pointed by Steven Price)
 - add a secondary array for shrinker_info::{map, nr_deferred}
 - re-implement the lockless slab shrink
   (Since unifying the processing of global and memcg slab shrink needs to
modify the startup sequence (As I mentioned in 
https://lore.kernel.org/lkml/38b14080-4ce5-d300-8a0a-c630bca68...@bytedance.com/),
I finally choose to process them separately.)
 - collect Acked-bys

Qi Zheng (48):
  mm: move some shrinker-related function declarations to mm/internal.h
  mm: vmscan: move shrinker-related code into a separate file
  mm: shrinker: remove redundant shrinker_rwsem in debugfs operations
  mm: shrinker: add infrastructure for dynamically allocating shrinker
  kvm: mmu: dynamically allocate the x86-mmu shrinker
  binder: dynamically allocate the android-binder shrinker
  drm/ttm: dynamically allocate the drm-ttm_pool shrinker
  xenbus/backend: dynamically allocate the xen-backend shrinker
  erofs: dynamically allocate the erofs-shrinker
  f2fs: dynamically allocate the f2fs-shrinker
  gfs2: dynamically allocate the gfs2-glock shrinker
  gfs2: dynamically allocate the gfs2-qd shrinker
  NFSv4.2: dynamically allocate the nfs-xattr

[Cluster-devel] [syzbot] [gfs2?] general protection fault in gfs2_lookup_simple

2023-08-07 Thread syzbot
Hello,

syzbot found the following issue on:

HEAD commit:a73466257270 Add linux-next specific files for 20230801
git tree:   linux-next
console+strace: https://syzkaller.appspot.com/x/log.txt?x=17a48e75a8
kernel config:  https://syzkaller.appspot.com/x/.config?x=8b55cb25bac8948c
dashboard link: https://syzkaller.appspot.com/bug?extid=57e590d90f42e6e925df
compiler:   gcc (Debian 12.2.0-14) 12.2.0, GNU ld (GNU Binutils for Debian) 
2.40
syz repro:  https://syzkaller.appspot.com/x/repro.syz?x=1263b929a8
C reproducer:   https://syzkaller.appspot.com/x/repro.c?x=160bbe31a8

Downloadable assets:
disk image: 
https://storage.googleapis.com/syzbot-assets/d893efe5006c/disk-a7346625.raw.xz
vmlinux: 
https://storage.googleapis.com/syzbot-assets/5a2ea2e3ba30/vmlinux-a7346625.xz
kernel image: 
https://storage.googleapis.com/syzbot-assets/66f8ff91348f/bzImage-a7346625.xz
mounted in repro: 
https://storage.googleapis.com/syzbot-assets/e94e695a9f21/mount_0.gz

The issue was bisected to:

commit 8f18190e31734e434a650d3435da072f03fe485f
Author: Andreas Gruenbacher 
Date:   Wed Jul 26 21:17:53 2023 +

gfs2: Use mapping->gfp_mask for metadata inodes

bisection log:  https://syzkaller.appspot.com/x/bisect.txt?x=1338d136a8
final oops: https://syzkaller.appspot.com/x/report.txt?x=10b8d136a8
console output: https://syzkaller.appspot.com/x/log.txt?x=1738d136a8

IMPORTANT: if you fix the issue, please add the following tag to the commit:
Reported-by: syzbot+57e590d90f42e6e92...@syzkaller.appspotmail.com
Fixes: 8f18190e3173 ("gfs2: Use mapping->gfp_mask for metadata inodes")

gfs2: fsid=no�Šar?d: Trying to join cluster "lock_nolock", "no�Šar?d"
gfs2: fsid=no�Šar?d: Now mounting FS (format 1801)...
syz-executor418: attempt to access beyond end of device
loop0: rw=12288, sector=131072, nr_sectors = 8 limit=32768
general protection fault, probably for non-canonical address 
0xdc05:  [#1] PREEMPT SMP KASAN
KASAN: null-ptr-deref in range [0x0028-0x002f]
CPU: 1 PID: 5032 Comm: syz-executor418 Not tainted 
6.5.0-rc4-next-20230801-syzkaller #0
Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 
07/12/2023
RIP: 0010:gfs2_lookup_simple+0xc6/0x160 fs/gfs2/inode.c:286
Code: 74 24 20 f7 d0 89 44 24 20 e8 66 d3 ff ff 48 85 c0 0f 84 85 00 00 00 48 
89 c3 e8 e5 01 e3 fd 48 8d 7b 30 48 89 f8 48 c1 e8 03 <42> 80 3c 20 00 75 7b 48 
b8 00 00 00 00 00 fc ff df 4c 8b 63 30 49
RSP: 0018:c900039ef848 EFLAGS: 00010206
RAX: 0005 RBX: fffb RCX: 
RDX: 888015bf8000 RSI: 83a38d4b RDI: 002b
RBP: 19200073df09 R08: 0005 R09: 
R10: 0001 R11: 0001 R12: dc00
R13: 8ab99700 R14: 888019f94000 R15: 8880783f06b8
FS:  558fa380() GS:8880b990() knlGS:
CS:  0010 DS:  ES:  CR0: 80050033
CR2: 557976244798 CR3: 74978000 CR4: 003506e0
DR0:  DR1:  DR2: 
DR3:  DR6: fffe0ff0 DR7: 0400
Call Trace:
 
 init_journal fs/gfs2/ops_fstype.c:742 [inline]
 init_inodes+0x495/0x2e30 fs/gfs2/ops_fstype.c:885
 gfs2_fill_super+0x1a9e/0x2b10 fs/gfs2/ops_fstype.c:1248
 get_tree_bdev+0x390/0x6a0 fs/super.c:1345
 gfs2_get_tree+0x4e/0x280 fs/gfs2/ops_fstype.c:1333
 vfs_get_tree+0x88/0x350 fs/super.c:1521
 do_new_mount fs/namespace.c:3335 [inline]
 path_mount+0x1492/0x1ed0 fs/namespace.c:3662
 do_mount fs/namespace.c:3675 [inline]
 __do_sys_mount fs/namespace.c:3884 [inline]
 __se_sys_mount fs/namespace.c:3861 [inline]
 __x64_sys_mount+0x293/0x310 fs/namespace.c:3861
 do_syscall_x64 arch/x86/entry/common.c:50 [inline]
 do_syscall_64+0x38/0xb0 arch/x86/entry/common.c:80
 entry_SYSCALL_64_after_hwframe+0x63/0xcd
RIP: 0033:0x7f6d772a2c3a
Code: d8 64 89 02 48 c7 c0 ff ff ff ff eb a6 e8 5e 04 00 00 66 2e 0f 1f 84 00 
00 00 00 00 0f 1f 40 00 49 89 ca b8 a5 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 01 
c3 48 c7 c1 b8 ff ff ff f7 d8 64 89 01 48
RSP: 002b:7fff3af18918 EFLAGS: 0282 ORIG_RAX: 00a5
RAX: ffda RBX: 7fff3af18920 RCX: 7f6d772a2c3a
RDX: 2000 RSI: 2040 RDI: 7fff3af18920
RBP: 0004 R08: 7fff3af18960 R09: 000125fe
R10: 0819 R11: 0282 R12: 7fff3af18960
R13: 0003 R14: 0100 R15: 0001
 
Modules linked in:
---[ end trace  ]---
RIP: 0010:gfs2_lookup_simple+0xc6/0x160 fs/gfs2/inode.c:286
Code: 74 24 20 f7 d0 89 44 24 20 e8 66 d3 ff ff 48 85 c0 0f 84 85 00 00 00 48 
89 c3 e8 e5 01 e3 fd 48 8d 7b 30 48 89 f8 48 c1 e8 03 <42> 80 3c 20 00 75 7b 48 
b8 00 00 00 00 00 fc ff df 4c 8b 63 30 49
RSP: 0018:c900039ef848 EFLAGS: 00010206
RAX: 0005 RBX: fffb RCX: 
RDX: 888015bf8000 RSI: