Re: [PATCH 16/22] staging: erofs: tidy up decompression frontend

2019-07-31 Thread Chao Yu
On 2019/7/29 14:51, Gao Xiang wrote:
> Although this patch has an amount of changes, it is hard to
> separate into smaller patches.
> 
> Most changes are due to structure renaming for better understand
> and straightforward,
>  z_erofs_vle_workgroup to z_erofs_pcluster
>  since it represents a physical cluster;
>  z_erofs_vle_work to z_erofs_collection
>  since it represents a collection of logical pages;
>  z_erofs_vle_work_builder to z_erofs_collector
>  since it's used to fill z_erofs_{pcluster,collection}.
> 
> struct z_erofs_vle_work_finder has no extra use compared with
> struct z_erofs_collector, delete it.
> 
> FULL_LENGTH bit is integrated into .length of pcluster so that it
> can be updated with the corresponding length change in atomic.
> 
> Minor, add comments for better description.
> 
> Signed-off-by: Gao Xiang 

I hope I don't miss anything, since this is so huge cleanup...

Reviewed-by: Chao Yu 

Thanks,
___
devel mailing list
de...@linuxdriverproject.org
http://driverdev.linuxdriverproject.org/mailman/listinfo/driverdev-devel


[PATCH 16/22] staging: erofs: tidy up decompression frontend

2019-07-29 Thread Gao Xiang
Although this patch has an amount of changes, it is hard to
separate into smaller patches.

Most changes are due to structure renaming for better understand
and straightforward,
 z_erofs_vle_workgroup to z_erofs_pcluster
 since it represents a physical cluster;
 z_erofs_vle_work to z_erofs_collection
 since it represents a collection of logical pages;
 z_erofs_vle_work_builder to z_erofs_collector
 since it's used to fill z_erofs_{pcluster,collection}.

struct z_erofs_vle_work_finder has no extra use compared with
struct z_erofs_collector, delete it.

FULL_LENGTH bit is integrated into .length of pcluster so that it
can be updated with the corresponding length change in atomic.

Minor, add comments for better description.

Signed-off-by: Gao Xiang 
---
 drivers/staging/erofs/zdata.c | 983 +++---
 drivers/staging/erofs/zdata.h |  96 ++--
 2 files changed, 465 insertions(+), 614 deletions(-)

diff --git a/drivers/staging/erofs/zdata.c b/drivers/staging/erofs/zdata.c
index 29900ca7c9d4..34ee19b4721d 100644
--- a/drivers/staging/erofs/zdata.c
+++ b/drivers/staging/erofs/zdata.c
@@ -18,7 +18,7 @@
  */
 #define PAGE_UNALLOCATED ((void *)0x5F0E4B1D)
 
-/* how to allocate cached pages for a workgroup */
+/* how to allocate cached pages for a pcluster */
 enum z_erofs_cache_alloctype {
DONTALLOC,  /* don't allocate any cached pages */
DELAYEDALLOC,   /* delayed allocation (at the time of submitting io) */
@@ -34,156 +34,158 @@ typedef tagptr1_t compressed_page_t;
tagptr_fold(compressed_page_t, page, 1)
 
 static struct workqueue_struct *z_erofs_workqueue __read_mostly;
-static struct kmem_cache *z_erofs_workgroup_cachep __read_mostly;
+static struct kmem_cache *pcluster_cachep __read_mostly;
 
 void z_erofs_exit_zip_subsystem(void)
 {
destroy_workqueue(z_erofs_workqueue);
-   kmem_cache_destroy(z_erofs_workgroup_cachep);
+   kmem_cache_destroy(pcluster_cachep);
 }
 
 static inline int init_unzip_workqueue(void)
 {
const unsigned int onlinecpus = num_possible_cpus();
+   const unsigned int flags = WQ_UNBOUND | WQ_HIGHPRI | WQ_CPU_INTENSIVE;
 
/*
-* we don't need too many threads, limiting threads
-* could improve scheduling performance.
+* no need to spawn too many threads, limiting threads could minimum
+* scheduling overhead, perhaps per-CPU threads should be better?
 */
-   z_erofs_workqueue =
-   alloc_workqueue("erofs_unzipd",
-   WQ_UNBOUND | WQ_HIGHPRI | WQ_CPU_INTENSIVE,
-   onlinecpus + onlinecpus / 4);
-
+   z_erofs_workqueue = alloc_workqueue("erofs_unzipd", flags,
+   onlinecpus + onlinecpus / 4);
return z_erofs_workqueue ? 0 : -ENOMEM;
 }
 
 static void init_once(void *ptr)
 {
-   struct z_erofs_vle_workgroup *grp = ptr;
-   struct z_erofs_vle_work *const work =
-   z_erofs_vle_grab_primary_work(grp);
+   struct z_erofs_pcluster *pcl = ptr;
+   struct z_erofs_collection *cl = z_erofs_primarycollection(pcl);
unsigned int i;
 
-   mutex_init(>lock);
-   work->nr_pages = 0;
-   work->vcnt = 0;
+   mutex_init(>lock);
+   cl->nr_pages = 0;
+   cl->vcnt = 0;
for (i = 0; i < Z_EROFS_CLUSTER_MAX_PAGES; ++i)
-   grp->compressed_pages[i] = NULL;
+   pcl->compressed_pages[i] = NULL;
 }
 
-static void init_always(struct z_erofs_vle_workgroup *grp)
+static void init_always(struct z_erofs_pcluster *pcl)
 {
-   struct z_erofs_vle_work *const work =
-   z_erofs_vle_grab_primary_work(grp);
+   struct z_erofs_collection *cl = z_erofs_primarycollection(pcl);
 
-   atomic_set(>obj.refcount, 1);
-   grp->flags = 0;
+   atomic_set(>obj.refcount, 1);
 
-   DBG_BUGON(work->nr_pages);
-   DBG_BUGON(work->vcnt);
+   DBG_BUGON(cl->nr_pages);
+   DBG_BUGON(cl->vcnt);
 }
 
 int __init z_erofs_init_zip_subsystem(void)
 {
-   z_erofs_workgroup_cachep =
-   kmem_cache_create("erofs_compress",
- Z_EROFS_WORKGROUP_SIZE, 0,
- SLAB_RECLAIM_ACCOUNT, init_once);
-
-   if (z_erofs_workgroup_cachep) {
+   pcluster_cachep = kmem_cache_create("erofs_compress",
+   Z_EROFS_WORKGROUP_SIZE, 0,
+   SLAB_RECLAIM_ACCOUNT, init_once);
+   if (pcluster_cachep) {
if (!init_unzip_workqueue())
return 0;
 
-   kmem_cache_destroy(z_erofs_workgroup_cachep);
+   kmem_cache_destroy(pcluster_cachep);
}
return -ENOMEM;
 }
 
-enum z_erofs_vle_work_role {
-   Z_EROFS_VLE_WORK_SECONDARY,
-   Z_EROFS_VLE_WORK_PRIMARY,
+enum z_erofs_collectmode {
+   COLLECT_SECONDARY,
+