Provide an entry point to delegate a filesystem's ->page_mkwrite() to.
This checks for conflicting writes, then attached any netfs-specific group
marking (e.g. ceph snap) to the page to be considered dirty.

Signed-off-by: David Howells <dhowe...@redhat.com>
cc: Jeff Layton <jlay...@kernel.org>
cc: linux-cachefs@redhat.com
cc: linux-fsde...@vger.kernel.org
cc: linux...@kvack.org
---
 fs/netfs/buffered_write.c | 59 +++++++++++++++++++++++++++++++++++++++
 include/linux/netfs.h     |  4 +++
 2 files changed, 63 insertions(+)

diff --git a/fs/netfs/buffered_write.c b/fs/netfs/buffered_write.c
index 60e7da53cbd2..3c1f26f32351 100644
--- a/fs/netfs/buffered_write.c
+++ b/fs/netfs/buffered_write.c
@@ -413,3 +413,62 @@ ssize_t netfs_file_write_iter(struct kiocb *iocb, struct 
iov_iter *from)
        return ret;
 }
 EXPORT_SYMBOL(netfs_file_write_iter);
+
+/*
+ * Notification that a previously read-only page is about to become writable.
+ * Note that the caller indicates a single page of a multipage folio.
+ */
+vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group 
*netfs_group)
+{
+       struct folio *folio = page_folio(vmf->page);
+       struct file *file = vmf->vma->vm_file;
+       struct inode *inode = file_inode(file);
+       vm_fault_t ret = VM_FAULT_RETRY;
+       int err;
+
+       _enter("%lx", folio->index);
+
+       sb_start_pagefault(inode->i_sb);
+
+       if (folio_wait_writeback_killable(folio))
+               goto out;
+
+       if (folio_lock_killable(folio) < 0)
+               goto out;
+
+       /* Can we see a streaming write here? */
+       if (WARN_ON(!folio_test_uptodate(folio))) {
+               ret = VM_FAULT_SIGBUS | VM_FAULT_LOCKED;
+               goto out;
+       }
+
+       if (netfs_folio_group(folio) != netfs_group) {
+               folio_unlock(folio);
+               err = filemap_fdatawait_range(inode->i_mapping,
+                                             folio_pos(folio),
+                                             folio_pos(folio) + 
folio_size(folio));
+               switch (err) {
+               case 0:
+                       ret = VM_FAULT_RETRY;
+                       goto out;
+               case -ENOMEM:
+                       ret = VM_FAULT_OOM;
+                       goto out;
+               default:
+                       ret = VM_FAULT_SIGBUS;
+                       goto out;
+               }
+       }
+
+       if (folio_test_dirty(folio))
+               trace_netfs_folio(folio, netfs_folio_trace_mkwrite_plus);
+       else
+               trace_netfs_folio(folio, netfs_folio_trace_mkwrite);
+       netfs_set_group(folio, netfs_group);
+       file_update_time(file);
+       ret = VM_FAULT_LOCKED;
+out:
+       sb_end_pagefault(inode->i_sb);
+       return ret;
+}
+EXPORT_SYMBOL(netfs_page_mkwrite);
diff --git a/include/linux/netfs.h b/include/linux/netfs.h
index d1dc7ba62f17..e2a5a441b7fc 100644
--- a/include/linux/netfs.h
+++ b/include/linux/netfs.h
@@ -403,6 +403,10 @@ int netfs_write_begin(struct netfs_inode *, struct file *,
 void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length);
 bool netfs_release_folio(struct folio *folio, gfp_t gfp);
 
+/* VMA operations API. */
+vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group 
*netfs_group);
+
+/* (Sub)request management API. */
 void netfs_subreq_terminated(struct netfs_io_subrequest *, ssize_t, bool);
 void netfs_get_subrequest(struct netfs_io_subrequest *subreq,
                          enum netfs_sreq_ref_trace what);
--
Linux-cachefs mailing list
Linux-cachefs@redhat.com
https://listman.redhat.com/mailman/listinfo/linux-cachefs

Reply via email to