[patch 9/10] mm: generic_file_buffered_write iovec cleanup

2007-01-12 Thread Nick Piggin
Hide some of the open-coded nr_segs tests into the iovec helpers. This is
all to simplify generic_file_buffered_write, because that gets more complex
in the next patch.

Signed-off-by: Nick Piggin <[EMAIL PROTECTED]>

Index: linux-2.6/mm/filemap.h
===
--- linux-2.6.orig/mm/filemap.h
+++ linux-2.6/mm/filemap.h
@@ -22,82 +22,82 @@ __filemap_copy_from_user_iovec_inatomic(
 
 /*
  * Copy as much as we can into the page and return the number of bytes which
- * were sucessfully copied.  If a fault is encountered then clear the page
- * out to (offset+bytes) and return the number of bytes which were copied.
- *
- * NOTE: For this to work reliably we really want 
copy_from_user_inatomic_nocache
- * to *NOT* zero any tail of the buffer that it failed to copy.  If it does,
- * and if the following non-atomic copy succeeds, then there is a small window
- * where the target page contains neither the data before the write, nor the
- * data after the write (it contains zero).  A read at this time will see
- * data that is inconsistent with any ordering of the read and the write.
- * (This has been detected in practice).
+ * were sucessfully copied.  If a fault is encountered then return the number 
of
+ * bytes which were copied.
  */
 static inline size_t
-filemap_copy_from_user(struct page *page, unsigned long offset,
-   const char __user *buf, unsigned bytes)
+filemap_copy_from_user_atomic(struct page *page, unsigned long offset,
+   const struct iovec *iov, unsigned long nr_segs,
+   size_t base, size_t bytes)
 {
char *kaddr;
-   int left;
+   size_t copied;
 
kaddr = kmap_atomic(page, KM_USER0);
-   left = __copy_from_user_inatomic_nocache(kaddr + offset, buf, bytes);
+   if (likely(nr_segs == 1)) {
+   int left;
+   char __user *buf = iov->iov_base + base;
+   left = __copy_from_user_inatomic_nocache(kaddr + offset,
+   buf, bytes);
+   copied = bytes - left;
+   } else {
+   copied = __filemap_copy_from_user_iovec_inatomic(kaddr + offset,
+   iov, base, bytes);
+   }
kunmap_atomic(kaddr, KM_USER0);
 
-   if (left != 0) {
-   /* Do it the slow way */
-   kaddr = kmap(page);
-   left = __copy_from_user_nocache(kaddr + offset, buf, bytes);
-   kunmap(page);
-   }
-   return bytes - left;
+   return copied;
 }
 
 /*
- * This has the same sideeffects and return value as filemap_copy_from_user().
- * The difference is that on a fault we need to memset the remainder of the
- * page (out to offset+bytes), to emulate filemap_copy_from_user()'s
- * single-segment behaviour.
+ * This has the same sideeffects and return value as
+ * filemap_copy_from_user_atomic().
+ * The difference is that it attempts to resolve faults.
  */
 static inline size_t
-filemap_copy_from_user_iovec(struct page *page, unsigned long offset,
-   const struct iovec *iov, size_t base, size_t bytes)
+filemap_copy_from_user(struct page *page, unsigned long offset,
+   const struct iovec *iov, unsigned long nr_segs,
+size_t base, size_t bytes)
 {
char *kaddr;
size_t copied;
 
-   kaddr = kmap_atomic(page, KM_USER0);
-   copied = __filemap_copy_from_user_iovec_inatomic(kaddr + offset, iov,
-base, bytes);
-   kunmap_atomic(kaddr, KM_USER0);
-   if (copied != bytes) {
-   kaddr = kmap(page);
-   copied = __filemap_copy_from_user_iovec_inatomic(kaddr + 
offset, iov,
-base, bytes);
-   if (bytes - copied)
-   memset(kaddr + offset + copied, 0, bytes - copied);
-   kunmap(page);
+   kaddr = kmap(page);
+   if (likely(nr_segs == 1)) {
+   int left;
+   char __user *buf = iov->iov_base + base;
+   left = __copy_from_user_nocache(kaddr + offset, buf, bytes);
+   copied = bytes - left;
+   } else {
+   copied = __filemap_copy_from_user_iovec_inatomic(kaddr + offset,
+   iov, base, bytes);
}
+   kunmap(page);
return copied;
 }
 
 static inline void
-filemap_set_next_iovec(const struct iovec **iovp, size_t *basep, size_t bytes)
+filemap_set_next_iovec(const struct iovec **iovp, unsigned long nr_segs,
+size_t *basep, size_t bytes)
 {
-   const struct iovec *iov = *iovp;
-   size_t base = *basep;
-
-   while (bytes) {
-   int copy = min(bytes, iov->iov_len - base);
-
-   

[patch 9/10] mm: generic_file_buffered_write iovec cleanup

2007-01-12 Thread Nick Piggin
Hide some of the open-coded nr_segs tests into the iovec helpers. This is
all to simplify generic_file_buffered_write, because that gets more complex
in the next patch.

Signed-off-by: Nick Piggin [EMAIL PROTECTED]

Index: linux-2.6/mm/filemap.h
===
--- linux-2.6.orig/mm/filemap.h
+++ linux-2.6/mm/filemap.h
@@ -22,82 +22,82 @@ __filemap_copy_from_user_iovec_inatomic(
 
 /*
  * Copy as much as we can into the page and return the number of bytes which
- * were sucessfully copied.  If a fault is encountered then clear the page
- * out to (offset+bytes) and return the number of bytes which were copied.
- *
- * NOTE: For this to work reliably we really want 
copy_from_user_inatomic_nocache
- * to *NOT* zero any tail of the buffer that it failed to copy.  If it does,
- * and if the following non-atomic copy succeeds, then there is a small window
- * where the target page contains neither the data before the write, nor the
- * data after the write (it contains zero).  A read at this time will see
- * data that is inconsistent with any ordering of the read and the write.
- * (This has been detected in practice).
+ * were sucessfully copied.  If a fault is encountered then return the number 
of
+ * bytes which were copied.
  */
 static inline size_t
-filemap_copy_from_user(struct page *page, unsigned long offset,
-   const char __user *buf, unsigned bytes)
+filemap_copy_from_user_atomic(struct page *page, unsigned long offset,
+   const struct iovec *iov, unsigned long nr_segs,
+   size_t base, size_t bytes)
 {
char *kaddr;
-   int left;
+   size_t copied;
 
kaddr = kmap_atomic(page, KM_USER0);
-   left = __copy_from_user_inatomic_nocache(kaddr + offset, buf, bytes);
+   if (likely(nr_segs == 1)) {
+   int left;
+   char __user *buf = iov-iov_base + base;
+   left = __copy_from_user_inatomic_nocache(kaddr + offset,
+   buf, bytes);
+   copied = bytes - left;
+   } else {
+   copied = __filemap_copy_from_user_iovec_inatomic(kaddr + offset,
+   iov, base, bytes);
+   }
kunmap_atomic(kaddr, KM_USER0);
 
-   if (left != 0) {
-   /* Do it the slow way */
-   kaddr = kmap(page);
-   left = __copy_from_user_nocache(kaddr + offset, buf, bytes);
-   kunmap(page);
-   }
-   return bytes - left;
+   return copied;
 }
 
 /*
- * This has the same sideeffects and return value as filemap_copy_from_user().
- * The difference is that on a fault we need to memset the remainder of the
- * page (out to offset+bytes), to emulate filemap_copy_from_user()'s
- * single-segment behaviour.
+ * This has the same sideeffects and return value as
+ * filemap_copy_from_user_atomic().
+ * The difference is that it attempts to resolve faults.
  */
 static inline size_t
-filemap_copy_from_user_iovec(struct page *page, unsigned long offset,
-   const struct iovec *iov, size_t base, size_t bytes)
+filemap_copy_from_user(struct page *page, unsigned long offset,
+   const struct iovec *iov, unsigned long nr_segs,
+size_t base, size_t bytes)
 {
char *kaddr;
size_t copied;
 
-   kaddr = kmap_atomic(page, KM_USER0);
-   copied = __filemap_copy_from_user_iovec_inatomic(kaddr + offset, iov,
-base, bytes);
-   kunmap_atomic(kaddr, KM_USER0);
-   if (copied != bytes) {
-   kaddr = kmap(page);
-   copied = __filemap_copy_from_user_iovec_inatomic(kaddr + 
offset, iov,
-base, bytes);
-   if (bytes - copied)
-   memset(kaddr + offset + copied, 0, bytes - copied);
-   kunmap(page);
+   kaddr = kmap(page);
+   if (likely(nr_segs == 1)) {
+   int left;
+   char __user *buf = iov-iov_base + base;
+   left = __copy_from_user_nocache(kaddr + offset, buf, bytes);
+   copied = bytes - left;
+   } else {
+   copied = __filemap_copy_from_user_iovec_inatomic(kaddr + offset,
+   iov, base, bytes);
}
+   kunmap(page);
return copied;
 }
 
 static inline void
-filemap_set_next_iovec(const struct iovec **iovp, size_t *basep, size_t bytes)
+filemap_set_next_iovec(const struct iovec **iovp, unsigned long nr_segs,
+size_t *basep, size_t bytes)
 {
-   const struct iovec *iov = *iovp;
-   size_t base = *basep;
-
-   while (bytes) {
-   int copy = min(bytes, iov-iov_len - base);
-
-