On Thu, Jan 26, 2017 at 02:57:54PM +0300, Kirill A. Shutemov wrote:
> Do not assume length of bio segment is never larger than PAGE_SIZE.
> With huge pages it's HPAGE_PMD_SIZE (2M on x86-64).

I don't think we even need hugepages for BRD to be buggy.  I think there are
already places which allocate compound pages (not in highmem, obviously ...)
and put them in biovecs.  So this is pure and simple a bugfix.

That said, I find the current code in brd a bit inelegant, and I don't
think this patch helps... indeed, I think it's buggy:

> @@ -202,12 +202,15 @@ static int copy_to_brd_setup(struct brd_device *brd, 
> sector_t sector, size_t n)
>       size_t copy;
>  
>       copy = min_t(size_t, n, PAGE_SIZE - offset);
> +     n -= copy;
>       if (!brd_insert_page(brd, sector))
>               return -ENOSPC;
> -     if (copy < n) {
> +     while (n) {
>               sector += copy >> SECTOR_SHIFT;
>               if (!brd_insert_page(brd, sector))
>                       return -ENOSPC;
> +             copy = min_t(size_t, n, PAGE_SIZE);
> +             n -= copy;
>       }

We're decrementing 'n' to 0, then testing it, so we never fill in the
last page ... right?

Anyway, here's my effort.  Untested.

diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 3adc32a3153b..0802a6abcd81 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -202,12 +202,14 @@ static int copy_to_brd_setup(struct brd_device *brd, 
sector_t sector, size_t n)
        size_t copy;
 
        copy = min_t(size_t, n, PAGE_SIZE - offset);
-       if (!brd_insert_page(brd, sector))
-               return -ENOSPC;
-       if (copy < n) {
-               sector += copy >> SECTOR_SHIFT;
+       for (;;) {
                if (!brd_insert_page(brd, sector))
                        return -ENOSPC;
+               n -= copy;
+               if (!n)
+                       break;
+               sector += copy >> SECTOR_SHIFT;
+               copy = min_t(size_t, n, PAGE_SIZE);
        }
        return 0;
 }
@@ -239,26 +241,23 @@ static void copy_to_brd(struct brd_device *brd, const 
void *src,
        struct page *page;
        void *dst;
        unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
-       size_t copy;
+       size_t copy = min_t(size_t, n, PAGE_SIZE - offset);
 
-       copy = min_t(size_t, n, PAGE_SIZE - offset);
-       page = brd_lookup_page(brd, sector);
-       BUG_ON(!page);
-
-       dst = kmap_atomic(page);
-       memcpy(dst + offset, src, copy);
-       kunmap_atomic(dst);
-
-       if (copy < n) {
-               src += copy;
-               sector += copy >> SECTOR_SHIFT;
-               copy = n - copy;
+       for (;;) {
                page = brd_lookup_page(brd, sector);
                BUG_ON(!page);
 
                dst = kmap_atomic(page);
-               memcpy(dst, src, copy);
+               memcpy(dst + offset, src, copy);
                kunmap_atomic(dst);
+
+               n -= copy;
+               if (!n)
+                       break;
+               src += copy;
+               sector += copy >> SECTOR_SHIFT;
+               offset = 0;
+               copy = min_t(size_t, n, PAGE_SIZE);
        }
 }
 
@@ -271,28 +270,24 @@ static void copy_from_brd(void *dst, struct brd_device 
*brd,
        struct page *page;
        void *src;
        unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
-       size_t copy;
+       size_t copy = min_t(size_t, n, PAGE_SIZE - offset);
 
-       copy = min_t(size_t, n, PAGE_SIZE - offset);
-       page = brd_lookup_page(brd, sector);
-       if (page) {
-               src = kmap_atomic(page);
-               memcpy(dst, src + offset, copy);
-               kunmap_atomic(src);
-       } else
-               memset(dst, 0, copy);
-
-       if (copy < n) {
-               dst += copy;
-               sector += copy >> SECTOR_SHIFT;
-               copy = n - copy;
+       for (;;) {
                page = brd_lookup_page(brd, sector);
                if (page) {
                        src = kmap_atomic(page);
-                       memcpy(dst, src, copy);
+                       memcpy(dst, src + offset, copy);
                        kunmap_atomic(src);
                } else
                        memset(dst, 0, copy);
+
+               n -= copy;
+               if (!n)
+                       break;
+               dst += copy;
+               sector += copy >> SECTOR_SHIFT;
+               offset = 0;
+               copy = min_t(size_t, n, PAGE_SIZE);
        }
 }
 

Reply via email to