For now, enqueuing and dequeuing on-demand requests all start from
idx 0, this makes request distribution unfair. In the weighty
concurrent I/O scenario, the request stored in higher idx will starve.

Searching requests cyclically in cachefiles_ondemand_daemon_read,
makes distribution fairer.

Fixes: c8383054506c ("cachefiles: notify the user daemon when looking up 
cookie")
Reported-by: Yongqing Li <liyongq...@bytedance.com>
Signed-off-by: Xin Yin <yinxi...@bytedance.com>
Reviewed-by: Jeffle Xu <jeffl...@linux.alibaba.com>
Reviewed-by: Gao Xiang <hsiang...@linux.alibaba.com>
---
changes form v1:
add Reviewed-by for Jeffle and Gao
add Fixes line
---
 fs/cachefiles/internal.h |  1 +
 fs/cachefiles/ondemand.c | 12 +++++++++---
 2 files changed, 10 insertions(+), 3 deletions(-)

diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
index 6cba2c6de2f9..2ad58c465208 100644
--- a/fs/cachefiles/internal.h
+++ b/fs/cachefiles/internal.h
@@ -111,6 +111,7 @@ struct cachefiles_cache {
        char                            *tag;           /* cache binding tag */
        refcount_t                      unbind_pincount;/* refcount to do 
daemon unbind */
        struct xarray                   reqs;           /* xarray of pending 
on-demand requests */
+       unsigned long                   req_id_next;
        struct xarray                   ondemand_ids;   /* xarray for 
ondemand_id allocation */
        u32                             ondemand_id_next;
 };
diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c
index 1fee702d5529..247961d65369 100644
--- a/fs/cachefiles/ondemand.c
+++ b/fs/cachefiles/ondemand.c
@@ -238,14 +238,19 @@ ssize_t cachefiles_ondemand_daemon_read(struct 
cachefiles_cache *cache,
        unsigned long id = 0;
        size_t n;
        int ret = 0;
-       XA_STATE(xas, &cache->reqs, 0);
+       XA_STATE(xas, &cache->reqs, cache->req_id_next);
 
        /*
-        * Search for a request that has not ever been processed, to prevent
-        * requests from being processed repeatedly.
+        * Cyclically search for a request that has not ever been processed,
+        * to prevent requests from being processed repeatedly, and make
+        * request distribution fair.
         */
        xa_lock(&cache->reqs);
        req = xas_find_marked(&xas, UINT_MAX, CACHEFILES_REQ_NEW);
+       if (!req && cache->req_id_next > 0) {
+               xas_set(&xas, 0);
+               req = xas_find_marked(&xas, cache->req_id_next - 1, 
CACHEFILES_REQ_NEW);
+       }
        if (!req) {
                xa_unlock(&cache->reqs);
                return 0;
@@ -260,6 +265,7 @@ ssize_t cachefiles_ondemand_daemon_read(struct 
cachefiles_cache *cache,
        }
 
        xas_clear_mark(&xas, CACHEFILES_REQ_NEW);
+       cache->req_id_next = xas.xa_index + 1;
        xa_unlock(&cache->reqs);
 
        id = xas.xa_index;
-- 
2.25.1

--
Linux-cachefs mailing list
Linux-cachefs@redhat.com
https://listman.redhat.com/mailman/listinfo/linux-cachefs

Reply via email to