Previously, @ondemand_id field was used not only to identify ondemand
state of the object, but also to represent the index of the xarray.
This commit introduces @state field to decouple the role of @ondemand_id
and adds helpers to access it.

Signed-off-by: Jia Zhu <zhujia...@bytedance.com>
Reviewed-by: Xin Yin <yinxi...@bytedance.com>
---
 fs/cachefiles/internal.h | 33 +++++++++++++++++++++++++++++++++
 fs/cachefiles/ondemand.c | 15 +++++++++------
 2 files changed, 42 insertions(+), 6 deletions(-)

diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
index 6cba2c6de2f9..6661b3e361da 100644
--- a/fs/cachefiles/internal.h
+++ b/fs/cachefiles/internal.h
@@ -17,6 +17,7 @@
 #include <linux/security.h>
 #include <linux/xarray.h>
 #include <linux/cachefiles.h>
+#include <linux/atomic.h>
 
 #define CACHEFILES_DIO_BLOCK_SIZE 4096
 
@@ -44,6 +45,11 @@ struct cachefiles_volume {
        struct dentry                   *fanout[256];   /* Fanout subdirs */
 };
 
+enum cachefiles_object_state {
+       CACHEFILES_ONDEMAND_OBJSTATE_close, /* Anonymous fd closed by daemon or 
initial state */
+       CACHEFILES_ONDEMAND_OBJSTATE_open, /* Anonymous fd associated with 
object is available */
+};
+
 /*
  * Backing file state.
  */
@@ -62,6 +68,7 @@ struct cachefiles_object {
 #define CACHEFILES_OBJECT_USING_TMPFILE        0               /* Have an 
unlinked tmpfile */
 #ifdef CONFIG_CACHEFILES_ONDEMAND
        int                             ondemand_id;
+       enum cachefiles_object_state    state;
 #endif
 };
 
@@ -295,6 +302,32 @@ extern void cachefiles_ondemand_clean_object(struct 
cachefiles_object *object);
 extern int cachefiles_ondemand_read(struct cachefiles_object *object,
                                    loff_t pos, size_t len);
 
+#define CACHEFILES_OBJECT_STATE_FUNCS(_state)  \
+static inline bool                                                             
\
+cachefiles_ondemand_object_is_##_state(const struct cachefiles_object *object) 
\
+{                                                                              
                \
+       /*
+        * Pairs with smp_store_release() in set_object_##_state()
+        * I.e. another task can publish state concurrently, by executing
+        * a RELEASE barrier. We need to use smp_load_acquire() here
+        * to safely ACQUIRE the memory the other task published.
+        */                                                                     
                \
+       return smp_load_acquire(&object->state) == 
CACHEFILES_ONDEMAND_OBJSTATE_##_state; \
+}                                                                              
                \
+                                                                               
                \
+static inline void                                                             
\
+cachefiles_ondemand_set_object_##_state(struct cachefiles_object *object) \
+{                                                                              
                \
+       /*
+        * Pairs with smp_load_acquire() in object_is_##_state()
+        * I.e. here we publish a state with a RELEASE barrier
+        * so that concurrent tasks can ACQUIRE it.
+        */                                                                     
                \
+       smp_store_release(&object->state, 
CACHEFILES_ONDEMAND_OBJSTATE_##_state); \
+}
+
+CACHEFILES_OBJECT_STATE_FUNCS(open);
+CACHEFILES_OBJECT_STATE_FUNCS(close);
 #else
 static inline ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache 
*cache,
                                        char __user *_buffer, size_t buflen)
diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c
index 1fee702d5529..e3155a5f32e4 100644
--- a/fs/cachefiles/ondemand.c
+++ b/fs/cachefiles/ondemand.c
@@ -15,6 +15,7 @@ static int cachefiles_ondemand_fd_release(struct inode *inode,
 
        xa_lock(&cache->reqs);
        object->ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED;
+       cachefiles_ondemand_set_object_close(object);
 
        /*
         * Flush all pending READ requests since their completion depends on
@@ -172,6 +173,8 @@ int cachefiles_ondemand_copen(struct cachefiles_cache 
*cache, char *args)
                set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
        trace_cachefiles_ondemand_copen(req->object, id, size);
 
+       cachefiles_ondemand_set_object_open(req->object);
+
 out:
        complete(&req->done);
        return ret;
@@ -353,7 +356,8 @@ static int cachefiles_ondemand_send_req(struct 
cachefiles_object *object,
                /* coupled with the barrier in cachefiles_flush_reqs() */
                smp_mb();
 
-               if (opcode != CACHEFILES_OP_OPEN && object->ondemand_id <= 0) {
+               if (opcode != CACHEFILES_OP_OPEN &&
+                       !cachefiles_ondemand_object_is_open(object)) {
                        WARN_ON_ONCE(object->ondemand_id == 0);
                        xas_unlock(&xas);
                        ret = -EIO;
@@ -420,7 +424,6 @@ static int cachefiles_ondemand_init_close_req(struct 
cachefiles_req *req,
                                              void *private)
 {
        struct cachefiles_object *object = req->object;
-       int object_id = object->ondemand_id;
 
        /*
         * It's possible that object id is still 0 if the cookie looking up
@@ -428,10 +431,10 @@ static int cachefiles_ondemand_init_close_req(struct 
cachefiles_req *req,
         * sending CLOSE request for CACHEFILES_ONDEMAND_ID_CLOSED, which means
         * anon_fd has already been closed.
         */
-       if (object_id <= 0)
+       if (!cachefiles_ondemand_object_is_open(object))
                return -ENOENT;
 
-       req->msg.object_id = object_id;
+       req->msg.object_id = object->ondemand_id;
        trace_cachefiles_ondemand_close(object, &req->msg);
        return 0;
 }
@@ -450,7 +453,7 @@ static int cachefiles_ondemand_init_read_req(struct 
cachefiles_req *req,
        int object_id = object->ondemand_id;
 
        /* Stop enqueuing requests when daemon has closed anon_fd. */
-       if (object_id <= 0) {
+       if (!cachefiles_ondemand_object_is_open(object)) {
                WARN_ON_ONCE(object_id == 0);
                pr_info_once("READ: anonymous fd closed prematurely.\n");
                return -EIO;
@@ -475,7 +478,7 @@ int cachefiles_ondemand_init_object(struct 
cachefiles_object *object)
         * creating a new tmpfile as the cache file. Reuse the previously
         * allocated object ID if any.
         */
-       if (object->ondemand_id > 0)
+       if (cachefiles_ondemand_object_is_open(object))
                return 0;
 
        volume_key_size = volume->key[0] + 1;
-- 
2.20.1

--
Linux-cachefs mailing list
Linux-cachefs@redhat.com
https://listman.redhat.com/mailman/listinfo/linux-cachefs

Reply via email to