Make the refcounting of netfs_begin_read() easier to use by not eating the
caller's ref on the netfs_io_request it's given.  This makes it easier to
use when we need to look in the request struct after.

Signed-off-by: David Howells <dhowe...@redhat.com>
cc: Jeff Layton <jlay...@kernel.org>
cc: linux-cachefs@redhat.com
cc: linux-fsde...@vger.kernel.org
cc: linux...@kvack.org
---
 fs/netfs/buffered_read.c     |  6 +++++-
 fs/netfs/io.c                | 28 +++++++++++++---------------
 include/trace/events/netfs.h |  9 +++++----
 3 files changed, 23 insertions(+), 20 deletions(-)

diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c
index 3b7eb706f2fe..05824f73cfc7 100644
--- a/fs/netfs/buffered_read.c
+++ b/fs/netfs/buffered_read.c
@@ -217,6 +217,7 @@ void netfs_readahead(struct readahead_control *ractl)
                ;
 
        netfs_begin_read(rreq, false);
+       netfs_put_request(rreq, false, netfs_rreq_trace_put_return);
        return;
 
 cleanup_free:
@@ -267,7 +268,9 @@ int netfs_read_folio(struct file *file, struct folio *folio)
        iov_iter_xarray(&rreq->iter, ITER_DEST, &mapping->i_pages,
                        rreq->start, rreq->len);
 
-       return netfs_begin_read(rreq, true);
+       ret = netfs_begin_read(rreq, true);
+       netfs_put_request(rreq, false, netfs_rreq_trace_put_return);
+       return ret;
 
 discard:
        netfs_put_request(rreq, false, netfs_rreq_trace_put_discard);
@@ -436,6 +439,7 @@ int netfs_write_begin(struct netfs_inode *ctx,
        ret = netfs_begin_read(rreq, true);
        if (ret < 0)
                goto error;
+       netfs_put_request(rreq, false, netfs_rreq_trace_put_return);
 
 have_folio:
        ret = folio_wait_fscache_killable(folio);
diff --git a/fs/netfs/io.c b/fs/netfs/io.c
index c80b8eed1209..1795f8679be9 100644
--- a/fs/netfs/io.c
+++ b/fs/netfs/io.c
@@ -362,6 +362,7 @@ static void netfs_rreq_assess(struct netfs_io_request 
*rreq, bool was_async)
 
        netfs_rreq_unlock_folios(rreq);
 
+       trace_netfs_rreq(rreq, netfs_rreq_trace_wake_ip);
        clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
        wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);
 
@@ -657,7 +658,6 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool 
sync)
 
        if (rreq->len == 0) {
                pr_err("Zero-sized read [R=%x]\n", rreq->debug_id);
-               netfs_put_request(rreq, false, netfs_rreq_trace_put_zero_len);
                return -EIO;
        }
 
@@ -669,12 +669,10 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool 
sync)
 
        INIT_WORK(&rreq->work, netfs_rreq_work);
 
-       if (sync)
-               netfs_get_request(rreq, netfs_rreq_trace_get_hold);
-
        /* Chop the read into slices according to what the cache and the netfs
         * want and submit each one.
         */
+       netfs_get_request(rreq, netfs_rreq_trace_get_for_outstanding);
        atomic_set(&rreq->nr_outstanding, 1);
        io_iter = rreq->io_iter;
        do {
@@ -684,25 +682,25 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool 
sync)
        } while (rreq->submitted < rreq->len);
 
        if (sync) {
-               /* Keep nr_outstanding incremented so that the ref always 
belongs to
-                * us, and the service code isn't punted off to a random thread 
pool to
-                * process.
+               /* Keep nr_outstanding incremented so that the ref always
+                * belongs to us, and the service code isn't punted off to a
+                * random thread pool to process.  Note that this might start
+                * further work, such as writing to the cache.
                 */
-               for (;;) {
-                       wait_var_event(&rreq->nr_outstanding,
-                                      atomic_read(&rreq->nr_outstanding) == 1);
+               wait_var_event(&rreq->nr_outstanding,
+                              atomic_read(&rreq->nr_outstanding) == 1);
+               if (atomic_dec_and_test(&rreq->nr_outstanding))
                        netfs_rreq_assess(rreq, false);
-                       if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags))
-                               break;
-                       cond_resched();
-               }
+
+               trace_netfs_rreq(rreq, netfs_rreq_trace_wait_ip);
+               wait_on_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS,
+                           TASK_UNINTERRUPTIBLE);
 
                ret = rreq->error;
                if (ret == 0 && rreq->submitted < rreq->len) {
                        trace_netfs_failure(rreq, NULL, ret, 
netfs_fail_short_read);
                        ret = -EIO;
                }
-               netfs_put_request(rreq, false, netfs_rreq_trace_put_hold);
        } else {
                /* If we decrement nr_outstanding to 0, the ref belongs to us. 
*/
                if (atomic_dec_and_test(&rreq->nr_outstanding))
diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h
index 4ea4e34d279f..6daadf2aac8a 100644
--- a/include/trace/events/netfs.h
+++ b/include/trace/events/netfs.h
@@ -34,7 +34,9 @@
        EM(netfs_rreq_trace_free,               "FREE   ")      \
        EM(netfs_rreq_trace_resubmit,           "RESUBMT")      \
        EM(netfs_rreq_trace_unlock,             "UNLOCK ")      \
-       E_(netfs_rreq_trace_unmark,             "UNMARK ")
+       EM(netfs_rreq_trace_unmark,             "UNMARK ")      \
+       EM(netfs_rreq_trace_wait_ip,            "WAIT-IP")      \
+       E_(netfs_rreq_trace_wake_ip,            "WAKE-IP")
 
 #define netfs_sreq_sources                                     \
        EM(NETFS_FILL_WITH_ZEROES,              "ZERO")         \
@@ -65,14 +67,13 @@
        E_(netfs_fail_prepare_write,            "prep-write")
 
 #define netfs_rreq_ref_traces                                  \
-       EM(netfs_rreq_trace_get_hold,           "GET HOLD   ")  \
+       EM(netfs_rreq_trace_get_for_outstanding,"GET OUTSTND")  \
        EM(netfs_rreq_trace_get_subreq,         "GET SUBREQ ")  \
        EM(netfs_rreq_trace_put_complete,       "PUT COMPLT ")  \
        EM(netfs_rreq_trace_put_discard,        "PUT DISCARD")  \
        EM(netfs_rreq_trace_put_failed,         "PUT FAILED ")  \
-       EM(netfs_rreq_trace_put_hold,           "PUT HOLD   ")  \
+       EM(netfs_rreq_trace_put_return,         "PUT RETURN ")  \
        EM(netfs_rreq_trace_put_subreq,         "PUT SUBREQ ")  \
-       EM(netfs_rreq_trace_put_zero_len,       "PUT ZEROLEN")  \
        E_(netfs_rreq_trace_new,                "NEW        ")
 
 #define netfs_sreq_ref_traces                                  \
--
Linux-cachefs mailing list
Linux-cachefs@redhat.com
https://listman.redhat.com/mailman/listinfo/linux-cachefs

Reply via email to