On Wed, Mar 10, 2021 at 04:56:13PM +0000, David Howells wrote:
> +void netfs_readahead(struct readahead_control *ractl,
> +                  const struct netfs_read_request_ops *ops,
> +                  void *netfs_priv)
> +{
> +     struct netfs_read_request *rreq;
> +     struct page *page;
> +     unsigned int debug_index = 0;
> +
> +     _enter("%lx,%x", readahead_index(ractl), readahead_count(ractl));
> +
> +     if (readahead_count(ractl) == 0)
> +             goto cleanup;
> +
> +     rreq = netfs_alloc_read_request(ops, netfs_priv, ractl->file);
> +     if (!rreq)
> +             goto cleanup;
> +     rreq->mapping   = ractl->mapping;
> +     rreq->start     = readahead_pos(ractl);
> +     rreq->len       = readahead_length(ractl);
> +
> +     netfs_rreq_expand(rreq, ractl);
> +
> +     atomic_set(&rreq->nr_rd_ops, 1);
> +     do {
> +             if (!netfs_rreq_submit_slice(rreq, &debug_index))
> +                     break;
> +
> +     } while (rreq->submitted < rreq->len);
> +
> +     while ((page = readahead_page(ractl)))
> +             put_page(page);

You don't need this pair of lines (unless I'm missing something).
read_pages() in mm/readahead.c puts the reference and unlocks any
pages which are not read by the readahead op.  Indeed, I think doing
this is buggy because you don't unlock the page.

> +     /* If we decrement nr_rd_ops to 0, the ref belongs to us. */
> +     if (atomic_dec_and_test(&rreq->nr_rd_ops))
> +             netfs_rreq_assess(rreq, false);
> +     return;
> +
> +cleanup:
> +     if (netfs_priv)
> +             ops->cleanup(ractl->mapping, netfs_priv);
> +     return;
> +}
> +EXPORT_SYMBOL(netfs_readahead);

> +int netfs_readpage(struct file *file,
> +                struct page *page,
> +                const struct netfs_read_request_ops *ops,
> +                void *netfs_priv)
> +{
> +     struct netfs_read_request *rreq;
> +     unsigned int debug_index = 0;
> +     int ret;
> +
> +     _enter("%lx", page->index);
> +
> +     rreq = netfs_alloc_read_request(ops, netfs_priv, file);
> +     if (!rreq) {
> +             if (netfs_priv)
> +                     ops->cleanup(netfs_priv, page->mapping);
> +             unlock_page(page);
> +             return -ENOMEM;
> +     }
> +     rreq->mapping   = page->mapping;

FYI, this isn't going to work with swap-over-NFS.  You have to use
page_file_mapping().

> +     rreq->start     = page->index * PAGE_SIZE;

and page_index() here.

I rather dislike it that swap-over-NFS uses readpage which makes this
need to exist.  If somebody were to switch SWP_FS_OPS to using kiocbs,
some of this pain could go away.

Reply via email to