On Thu, Mar 03, 2016 at 11:46:00PM +0900, Sergey Senozhatsky wrote:
> Do not register shrinker compaction callbacks anymore, since
> now we shedule class compaction work each time its fragmentation
> value goes above the watermark.

I suggested to remove shrinker compaction but while I review your
first patch in this thread, I thought we need upper-bound to
compact zspage so background work can bail out for latency easily.
IOW, the work could give up the job. In such case, we might need
fall-back scheme to continue the job. And I think that could be
a shrinker.

What do you think?

> 
> Signed-off-by: Sergey Senozhatsky <sergey.senozhat...@gmail.com>
> ---
>  mm/zsmalloc.c | 72 
> -----------------------------------------------------------
>  1 file changed, 72 deletions(-)
> 
> diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
> index a4ef7e7..0bb060f 100644
> --- a/mm/zsmalloc.c
> +++ b/mm/zsmalloc.c
> @@ -256,13 +256,6 @@ struct zs_pool {
>  
>       struct zs_pool_stats stats;
>  
> -     /* Compact classes */
> -     struct shrinker shrinker;
> -     /*
> -      * To signify that register_shrinker() was successful
> -      * and unregister_shrinker() will not Oops.
> -      */
> -     bool shrinker_enabled;
>  #ifdef CONFIG_ZSMALLOC_STAT
>       struct dentry *stat_dentry;
>  #endif
> @@ -1848,64 +1841,6 @@ void zs_pool_stats(struct zs_pool *pool, struct 
> zs_pool_stats *stats)
>  }
>  EXPORT_SYMBOL_GPL(zs_pool_stats);
>  
> -static unsigned long zs_shrinker_scan(struct shrinker *shrinker,
> -             struct shrink_control *sc)
> -{
> -     unsigned long pages_freed;
> -     struct zs_pool *pool = container_of(shrinker, struct zs_pool,
> -                     shrinker);
> -
> -     pages_freed = pool->stats.pages_compacted;
> -     /*
> -      * Compact classes and calculate compaction delta.
> -      * Can run concurrently with a manually triggered
> -      * (by user) compaction.
> -      */
> -     pages_freed = zs_compact(pool) - pages_freed;
> -
> -     return pages_freed ? pages_freed : SHRINK_STOP;
> -}
> -
> -static unsigned long zs_shrinker_count(struct shrinker *shrinker,
> -             struct shrink_control *sc)
> -{
> -     int i;
> -     struct size_class *class;
> -     unsigned long pages_to_free = 0;
> -     struct zs_pool *pool = container_of(shrinker, struct zs_pool,
> -                     shrinker);
> -
> -     for (i = zs_size_classes - 1; i >= 0; i--) {
> -             class = pool->size_class[i];
> -             if (!class)
> -                     continue;
> -             if (class->index != i)
> -                     continue;
> -
> -             pages_to_free += zs_can_compact(class);
> -     }
> -
> -     return pages_to_free;
> -}
> -
> -static void zs_unregister_shrinker(struct zs_pool *pool)
> -{
> -     if (pool->shrinker_enabled) {
> -             unregister_shrinker(&pool->shrinker);
> -             pool->shrinker_enabled = false;
> -     }
> -}
> -
> -static int zs_register_shrinker(struct zs_pool *pool)
> -{
> -     pool->shrinker.scan_objects = zs_shrinker_scan;
> -     pool->shrinker.count_objects = zs_shrinker_count;
> -     pool->shrinker.batch = 0;
> -     pool->shrinker.seeks = DEFAULT_SEEKS;
> -
> -     return register_shrinker(&pool->shrinker);
> -}
> -
>  /**
>   * zs_create_pool - Creates an allocation pool to work from.
>   * @flags: allocation flags used to allocate pool metadata
> @@ -1994,12 +1929,6 @@ struct zs_pool *zs_create_pool(const char *name, gfp_t 
> flags)
>       if (zs_pool_stat_create(name, pool))
>               goto err;
>  
> -     /*
> -      * Not critical, we still can use the pool
> -      * and user can trigger compaction manually.
> -      */
> -     if (zs_register_shrinker(pool) == 0)
> -             pool->shrinker_enabled = true;
>       return pool;
>  
>  err:
> @@ -2012,7 +1941,6 @@ void zs_destroy_pool(struct zs_pool *pool)
>  {
>       int i;
>  
> -     zs_unregister_shrinker(pool);
>       zs_pool_stat_destroy(pool);
>  
>       for (i = 0; i < zs_size_classes; i++) {
> -- 
> 2.8.0.rc0
> 

Reply via email to