Re: [Intel-gfx] [PATCH 2/4] lib/scatterlist: Avoid potential scatterlist entry overflow

2017-07-28 Thread Chris Wilson
Quoting Tvrtko Ursulin (2017-07-27 10:05:02)
> From: Tvrtko Ursulin 
> 
> Since the scatterlist length field is an unsigned int, make
> sure that sg_alloc_table_from_pages does not overflow it while
> coallescing pages to a single entry.
> 
> v2: Drop reference to future use. Use UINT_MAX.
> v3: max_segment must be page aligned.
> v4: Do not rely on compiler to optimise out the rounddown.
> (Joonas Lahtinen)
> v5: Simplified loops and use post-increments rather than
> pre-increments. Use PAGE_MASK and fix comment typo.
> (Andy Shevchenko)
> 
> Signed-off-by: Tvrtko Ursulin 
> Cc: Masahiro Yamada 
> Cc: linux-ker...@vger.kernel.org
> Reviewed-by: Chris Wilson  (v2)
> Cc: Joonas Lahtinen 
> Cc: Andy Shevchenko 
> ---
>  include/linux/scatterlist.h |  6 ++
>  lib/scatterlist.c   | 31 ---
>  2 files changed, 26 insertions(+), 11 deletions(-)
> 
> diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
> index 205aefb4ed93..6dd2ddbc6230 100644
> --- a/include/linux/scatterlist.h
> +++ b/include/linux/scatterlist.h
> @@ -21,6 +21,12 @@ struct scatterlist {
>  };
>  
>  /*
> + * Since the above length field is an unsigned int, below we define the 
> maximum
> + * length in bytes that can be stored in one scatterlist entry.
> + */
> +#define SCATTERLIST_MAX_SEGMENT (UINT_MAX & PAGE_MASK)
> +
> +/*
>   * These macros should be used after a dma_map_sg call has been done
>   * to get bus addresses of each of the SG entries and their lengths.
>   * You should only work with the number of sg entries dma_map_sg
> diff --git a/lib/scatterlist.c b/lib/scatterlist.c
> index dee0c5004e2f..7b2e74da2c44 100644
> --- a/lib/scatterlist.c
> +++ b/lib/scatterlist.c
> @@ -394,17 +394,22 @@ int sg_alloc_table_from_pages(struct sg_table *sgt,
> unsigned int offset, unsigned long size,
> gfp_t gfp_mask)
>  {
> -   unsigned int chunks;
> -   unsigned int i;
> -   unsigned int cur_page;
> +   const unsigned int max_segment = SCATTERLIST_MAX_SEGMENT;
> +   unsigned int chunks, cur_page, seg_len, i;
> int ret;
> struct scatterlist *s;
>  
> /* compute number of contiguous chunks */
> chunks = 1;
> -   for (i = 1; i < n_pages; ++i)
> -   if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1)
> -   ++chunks;
> +   seg_len = 0;
> +   for (i = 1; i < n_pages; i++) {
> +   seg_len += PAGE_SIZE;
> +   if (seg_len >= max_segment ||
> +   page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) {
> +   chunks++;
> +   seg_len = 0;
> +   }
> +   }

Ok. Took a moment to realise that it works correctly for a chunk on last
page.

> ret = sg_alloc_table(sgt, chunks, gfp_mask);
> if (unlikely(ret))
> @@ -413,17 +418,21 @@ int sg_alloc_table_from_pages(struct sg_table *sgt,
> /* merging chunks and putting them into the scatterlist */
> cur_page = 0;
> for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
> -   unsigned long chunk_size;
> -   unsigned int j;
> +   unsigned int j, chunk_size;
>  
> /* look for the end of the current chunk */
> -   for (j = cur_page + 1; j < n_pages; ++j)
> -   if (page_to_pfn(pages[j]) !=
> +   seg_len = 0;
> +   for (j = cur_page + 1; j < n_pages; j++) {
> +   seg_len += PAGE_SIZE;
> +   if (seg_len >= max_segment ||
> +   page_to_pfn(pages[j]) !=
> page_to_pfn(pages[j - 1]) + 1)
> break;
> +   }

Ok.

>  
> chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
> -   sg_set_page(s, pages[cur_page], min(size, chunk_size), 
> offset);
> +   sg_set_page(s, pages[cur_page],
> +   min_t(unsigned long, size, chunk_size), offset);
> size -= chunk_size;
> offset = 0;
> cur_page = j;

Reviewed-by: Chris Wilson 
-Chris
___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx


[Intel-gfx] [PATCH 2/4] lib/scatterlist: Avoid potential scatterlist entry overflow

2017-07-27 Thread Tvrtko Ursulin
From: Tvrtko Ursulin 

Since the scatterlist length field is an unsigned int, make
sure that sg_alloc_table_from_pages does not overflow it while
coallescing pages to a single entry.

v2: Drop reference to future use. Use UINT_MAX.
v3: max_segment must be page aligned.
v4: Do not rely on compiler to optimise out the rounddown.
(Joonas Lahtinen)
v5: Simplified loops and use post-increments rather than
pre-increments. Use PAGE_MASK and fix comment typo.
(Andy Shevchenko)

Signed-off-by: Tvrtko Ursulin 
Cc: Masahiro Yamada 
Cc: linux-ker...@vger.kernel.org
Reviewed-by: Chris Wilson  (v2)
Cc: Joonas Lahtinen 
Cc: Andy Shevchenko 
---
 include/linux/scatterlist.h |  6 ++
 lib/scatterlist.c   | 31 ---
 2 files changed, 26 insertions(+), 11 deletions(-)

diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 205aefb4ed93..6dd2ddbc6230 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -21,6 +21,12 @@ struct scatterlist {
 };
 
 /*
+ * Since the above length field is an unsigned int, below we define the maximum
+ * length in bytes that can be stored in one scatterlist entry.
+ */
+#define SCATTERLIST_MAX_SEGMENT (UINT_MAX & PAGE_MASK)
+
+/*
  * These macros should be used after a dma_map_sg call has been done
  * to get bus addresses of each of the SG entries and their lengths.
  * You should only work with the number of sg entries dma_map_sg
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index dee0c5004e2f..7b2e74da2c44 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -394,17 +394,22 @@ int sg_alloc_table_from_pages(struct sg_table *sgt,
unsigned int offset, unsigned long size,
gfp_t gfp_mask)
 {
-   unsigned int chunks;
-   unsigned int i;
-   unsigned int cur_page;
+   const unsigned int max_segment = SCATTERLIST_MAX_SEGMENT;
+   unsigned int chunks, cur_page, seg_len, i;
int ret;
struct scatterlist *s;
 
/* compute number of contiguous chunks */
chunks = 1;
-   for (i = 1; i < n_pages; ++i)
-   if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1)
-   ++chunks;
+   seg_len = 0;
+   for (i = 1; i < n_pages; i++) {
+   seg_len += PAGE_SIZE;
+   if (seg_len >= max_segment ||
+   page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) {
+   chunks++;
+   seg_len = 0;
+   }
+   }
 
ret = sg_alloc_table(sgt, chunks, gfp_mask);
if (unlikely(ret))
@@ -413,17 +418,21 @@ int sg_alloc_table_from_pages(struct sg_table *sgt,
/* merging chunks and putting them into the scatterlist */
cur_page = 0;
for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
-   unsigned long chunk_size;
-   unsigned int j;
+   unsigned int j, chunk_size;
 
/* look for the end of the current chunk */
-   for (j = cur_page + 1; j < n_pages; ++j)
-   if (page_to_pfn(pages[j]) !=
+   seg_len = 0;
+   for (j = cur_page + 1; j < n_pages; j++) {
+   seg_len += PAGE_SIZE;
+   if (seg_len >= max_segment ||
+   page_to_pfn(pages[j]) !=
page_to_pfn(pages[j - 1]) + 1)
break;
+   }
 
chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
-   sg_set_page(s, pages[cur_page], min(size, chunk_size), offset);
+   sg_set_page(s, pages[cur_page],
+   min_t(unsigned long, size, chunk_size), offset);
size -= chunk_size;
offset = 0;
cur_page = j;
-- 
2.9.4

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx


[Intel-gfx] [PATCH 2/4] lib/scatterlist: Avoid potential scatterlist entry overflow

2017-05-04 Thread Tvrtko Ursulin
From: Tvrtko Ursulin 

Since the scatterlist length field is an unsigned int, make
sure that sg_alloc_table_from_pages does not overflow it while
coallescing pages to a single entry.

v2: Drop reference to future use. Use UINT_MAX.
v3: max_segment must be page aligned.
v4: Do not rely on compiler to optimise out the rounddown.
(Joonas Lahtinen)
v5: Simplified loops and use post-increments rather than
pre-increments. Use PAGE_MASK and fix comment typo.
(Andy Shevchenko)

Signed-off-by: Tvrtko Ursulin 
Cc: Masahiro Yamada 
Cc: linux-ker...@vger.kernel.org
Reviewed-by: Chris Wilson  (v2)
Cc: Joonas Lahtinen 
Cc: Andy Shevchenko 
---
 include/linux/scatterlist.h |  6 ++
 lib/scatterlist.c   | 31 ---
 2 files changed, 26 insertions(+), 11 deletions(-)

diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index c981bee1a3ae..4768eeeb7054 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -21,6 +21,12 @@ struct scatterlist {
 };
 
 /*
+ * Since the above length field is an unsigned int, below we define the maximum
+ * length in bytes that can be stored in one scatterlist entry.
+ */
+#define SCATTERLIST_MAX_SEGMENT (UINT_MAX & PAGE_MASK)
+
+/*
  * These macros should be used after a dma_map_sg call has been done
  * to get bus addresses of each of the SG entries and their lengths.
  * You should only work with the number of sg entries dma_map_sg
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 11f172c383cb..ca4ccd8c80b9 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -394,17 +394,22 @@ int sg_alloc_table_from_pages(struct sg_table *sgt,
unsigned int offset, unsigned long size,
gfp_t gfp_mask)
 {
-   unsigned int chunks;
-   unsigned int i;
-   unsigned int cur_page;
+   const unsigned int max_segment = SCATTERLIST_MAX_SEGMENT;
+   unsigned int chunks, cur_page, seg_len, i;
int ret;
struct scatterlist *s;
 
/* compute number of contiguous chunks */
chunks = 1;
-   for (i = 1; i < n_pages; ++i)
-   if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1)
-   ++chunks;
+   seg_len = 0;
+   for (i = 1; i < n_pages; i++) {
+   seg_len += PAGE_SIZE;
+   if (seg_len >= max_segment ||
+   page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) {
+   chunks++;
+   seg_len = 0;
+   }
+   }
 
ret = sg_alloc_table(sgt, chunks, gfp_mask);
if (unlikely(ret))
@@ -413,17 +418,21 @@ int sg_alloc_table_from_pages(struct sg_table *sgt,
/* merging chunks and putting them into the scatterlist */
cur_page = 0;
for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
-   unsigned long chunk_size;
-   unsigned int j;
+   unsigned int j, chunk_size;
 
/* look for the end of the current chunk */
-   for (j = cur_page + 1; j < n_pages; ++j)
-   if (page_to_pfn(pages[j]) !=
+   seg_len = 0;
+   for (j = cur_page + 1; j < n_pages; j++) {
+   seg_len += PAGE_SIZE;
+   if (seg_len >= max_segment ||
+   page_to_pfn(pages[j]) !=
page_to_pfn(pages[j - 1]) + 1)
break;
+   }
 
chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
-   sg_set_page(s, pages[cur_page], min(size, chunk_size), offset);
+   sg_set_page(s, pages[cur_page],
+   min_t(unsigned long, size, chunk_size), offset);
size -= chunk_size;
offset = 0;
cur_page = j;
-- 
2.9.3

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx


Re: [Intel-gfx] [PATCH 2/4] lib/scatterlist: Avoid potential scatterlist entry overflow

2017-03-07 Thread Tvrtko Ursulin


Hi David,

Chris noticed your "scatterlist: don't overflow length field" patch and 
pinged me, so I am copying you on another thread which tries to solve 
the same problem.


My latest series is here: 
https://patchwork.freedesktop.org/series/18062/, but it has been going 
from some time November last year.


I like your BUILD_BUG_ON safety, but otherwise our patches are pretty 
similar.


i915 driver also benefits from the ability to create large sg chunks 
which saves us a few megabytes of RAM at runtime, but we do have to 
degrade to smaller chunks when running under a hypervisor. For that we 
are using the swiotlb_max_segment API Konrad recently added for this 
purpose.


So what I did in addition to fixing the overflow is exported a new 
flavour of sg_alloc_table_from_pages which allows you to control the 
maximum chunk.


Maybe you can have a look at my series and see if it would work for you? 
I've been trying to gain some traction for it for some months now.


Regards,

Tvrtko

On 07/03/2017 08:58, Tvrtko Ursulin wrote:


Hi,

On 16/01/2017 14:12, Tvrtko Ursulin wrote:

From: Tvrtko Ursulin 

Since the scatterlist length field is an unsigned int, make
sure that sg_alloc_table_from_pages does not overflow it while
coallescing pages to a single entry.

v2: Drop reference to future use. Use UINT_MAX.
v3: max_segment must be page aligned.
v4: Do not rely on compiler to optimise out the rounddown.
(Joonas Lahtinen)
v5: Simplified loops and use post-increments rather than
pre-increments. Use PAGE_MASK and fix comment typo.
(Andy Shevchenko)
Signed-off-by: Tvrtko Ursulin 
Cc: Masahiro Yamada 
Cc: linux-ker...@vger.kernel.org
Reviewed-by: Chris Wilson  (v2)
Cc: Joonas Lahtinen 
Cc: Andy Shevchenko 


Anyone in the mood for reviewing from here to the end of the series?

Regards,

Tvrtko


---
 include/linux/scatterlist.h |  6 ++
 lib/scatterlist.c   | 31 ---
 2 files changed, 26 insertions(+), 11 deletions(-)

diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index c981bee1a3ae..4768eeeb7054 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -21,6 +21,12 @@ struct scatterlist {
 };

 /*
+ * Since the above length field is an unsigned int, below we define
the maximum
+ * length in bytes that can be stored in one scatterlist entry.
+ */
+#define SCATTERLIST_MAX_SEGMENT (UINT_MAX & PAGE_MASK)
+
+/*
  * These macros should be used after a dma_map_sg call has been done
  * to get bus addresses of each of the SG entries and their lengths.
  * You should only work with the number of sg entries dma_map_sg
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index e05e7fc98892..65f375645df5 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -394,17 +394,22 @@ int sg_alloc_table_from_pages(struct sg_table *sgt,
 unsigned int offset, unsigned long size,
 gfp_t gfp_mask)
 {
-unsigned int chunks;
-unsigned int i;
-unsigned int cur_page;
+const unsigned int max_segment = SCATTERLIST_MAX_SEGMENT;
+unsigned int chunks, cur_page, seg_len, i;
 int ret;
 struct scatterlist *s;

 /* compute number of contiguous chunks */
 chunks = 1;
-for (i = 1; i < n_pages; ++i)
-if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1)
-++chunks;
+seg_len = 0;
+for (i = 1; i < n_pages; i++) {
+seg_len += PAGE_SIZE;
+if (seg_len >= max_segment ||
+page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) {
+chunks++;
+seg_len = 0;
+}
+}

 ret = sg_alloc_table(sgt, chunks, gfp_mask);
 if (unlikely(ret))
@@ -413,17 +418,21 @@ int sg_alloc_table_from_pages(struct sg_table *sgt,
 /* merging chunks and putting them into the scatterlist */
 cur_page = 0;
 for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
-unsigned long chunk_size;
-unsigned int j;
+unsigned int j, chunk_size;

 /* look for the end of the current chunk */
-for (j = cur_page + 1; j < n_pages; ++j)
-if (page_to_pfn(pages[j]) !=
+seg_len = 0;
+for (j = cur_page + 1; j < n_pages; j++) {
+seg_len += PAGE_SIZE;
+if (seg_len >= max_segment ||
+page_to_pfn(pages[j]) !=
 page_to_pfn(pages[j - 1]) + 1)
 break;
+}

 chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
-sg_set_page(s, pages[cur_page], min(size, chunk_size), offset);
+sg_set_page(s, pages[cur_page],
+min_t(unsigned long, size, chunk_size), offset);
 size -= chunk_size;
 offset = 0;
 cur_page = j;


___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
ht

Re: [Intel-gfx] [PATCH 2/4] lib/scatterlist: Avoid potential scatterlist entry overflow

2017-03-07 Thread Tvrtko Ursulin


Hi,

On 16/01/2017 14:12, Tvrtko Ursulin wrote:

From: Tvrtko Ursulin 

Since the scatterlist length field is an unsigned int, make
sure that sg_alloc_table_from_pages does not overflow it while
coallescing pages to a single entry.

v2: Drop reference to future use. Use UINT_MAX.
v3: max_segment must be page aligned.
v4: Do not rely on compiler to optimise out the rounddown.
(Joonas Lahtinen)
v5: Simplified loops and use post-increments rather than
pre-increments. Use PAGE_MASK and fix comment typo.
(Andy Shevchenko)
Signed-off-by: Tvrtko Ursulin 
Cc: Masahiro Yamada 
Cc: linux-ker...@vger.kernel.org
Reviewed-by: Chris Wilson  (v2)
Cc: Joonas Lahtinen 
Cc: Andy Shevchenko 


Anyone in the mood for reviewing from here to the end of the series?

Regards,

Tvrtko


---
 include/linux/scatterlist.h |  6 ++
 lib/scatterlist.c   | 31 ---
 2 files changed, 26 insertions(+), 11 deletions(-)

diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index c981bee1a3ae..4768eeeb7054 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -21,6 +21,12 @@ struct scatterlist {
 };

 /*
+ * Since the above length field is an unsigned int, below we define the maximum
+ * length in bytes that can be stored in one scatterlist entry.
+ */
+#define SCATTERLIST_MAX_SEGMENT (UINT_MAX & PAGE_MASK)
+
+/*
  * These macros should be used after a dma_map_sg call has been done
  * to get bus addresses of each of the SG entries and their lengths.
  * You should only work with the number of sg entries dma_map_sg
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index e05e7fc98892..65f375645df5 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -394,17 +394,22 @@ int sg_alloc_table_from_pages(struct sg_table *sgt,
unsigned int offset, unsigned long size,
gfp_t gfp_mask)
 {
-   unsigned int chunks;
-   unsigned int i;
-   unsigned int cur_page;
+   const unsigned int max_segment = SCATTERLIST_MAX_SEGMENT;
+   unsigned int chunks, cur_page, seg_len, i;
int ret;
struct scatterlist *s;

/* compute number of contiguous chunks */
chunks = 1;
-   for (i = 1; i < n_pages; ++i)
-   if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1)
-   ++chunks;
+   seg_len = 0;
+   for (i = 1; i < n_pages; i++) {
+   seg_len += PAGE_SIZE;
+   if (seg_len >= max_segment ||
+   page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) {
+   chunks++;
+   seg_len = 0;
+   }
+   }

ret = sg_alloc_table(sgt, chunks, gfp_mask);
if (unlikely(ret))
@@ -413,17 +418,21 @@ int sg_alloc_table_from_pages(struct sg_table *sgt,
/* merging chunks and putting them into the scatterlist */
cur_page = 0;
for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
-   unsigned long chunk_size;
-   unsigned int j;
+   unsigned int j, chunk_size;

/* look for the end of the current chunk */
-   for (j = cur_page + 1; j < n_pages; ++j)
-   if (page_to_pfn(pages[j]) !=
+   seg_len = 0;
+   for (j = cur_page + 1; j < n_pages; j++) {
+   seg_len += PAGE_SIZE;
+   if (seg_len >= max_segment ||
+   page_to_pfn(pages[j]) !=
page_to_pfn(pages[j - 1]) + 1)
break;
+   }

chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
-   sg_set_page(s, pages[cur_page], min(size, chunk_size), offset);
+   sg_set_page(s, pages[cur_page],
+   min_t(unsigned long, size, chunk_size), offset);
size -= chunk_size;
offset = 0;
cur_page = j;


___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx


[Intel-gfx] [PATCH 2/4] lib/scatterlist: Avoid potential scatterlist entry overflow

2017-01-16 Thread Tvrtko Ursulin
From: Tvrtko Ursulin 

Since the scatterlist length field is an unsigned int, make
sure that sg_alloc_table_from_pages does not overflow it while
coallescing pages to a single entry.

v2: Drop reference to future use. Use UINT_MAX.
v3: max_segment must be page aligned.
v4: Do not rely on compiler to optimise out the rounddown.
(Joonas Lahtinen)
v5: Simplified loops and use post-increments rather than
pre-increments. Use PAGE_MASK and fix comment typo.
(Andy Shevchenko)

Signed-off-by: Tvrtko Ursulin 
Cc: Masahiro Yamada 
Cc: linux-ker...@vger.kernel.org
Reviewed-by: Chris Wilson  (v2)
Cc: Joonas Lahtinen 
Cc: Andy Shevchenko 
---
 include/linux/scatterlist.h |  6 ++
 lib/scatterlist.c   | 31 ---
 2 files changed, 26 insertions(+), 11 deletions(-)

diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index c981bee1a3ae..4768eeeb7054 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -21,6 +21,12 @@ struct scatterlist {
 };
 
 /*
+ * Since the above length field is an unsigned int, below we define the maximum
+ * length in bytes that can be stored in one scatterlist entry.
+ */
+#define SCATTERLIST_MAX_SEGMENT (UINT_MAX & PAGE_MASK)
+
+/*
  * These macros should be used after a dma_map_sg call has been done
  * to get bus addresses of each of the SG entries and their lengths.
  * You should only work with the number of sg entries dma_map_sg
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index e05e7fc98892..65f375645df5 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -394,17 +394,22 @@ int sg_alloc_table_from_pages(struct sg_table *sgt,
unsigned int offset, unsigned long size,
gfp_t gfp_mask)
 {
-   unsigned int chunks;
-   unsigned int i;
-   unsigned int cur_page;
+   const unsigned int max_segment = SCATTERLIST_MAX_SEGMENT;
+   unsigned int chunks, cur_page, seg_len, i;
int ret;
struct scatterlist *s;
 
/* compute number of contiguous chunks */
chunks = 1;
-   for (i = 1; i < n_pages; ++i)
-   if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1)
-   ++chunks;
+   seg_len = 0;
+   for (i = 1; i < n_pages; i++) {
+   seg_len += PAGE_SIZE;
+   if (seg_len >= max_segment ||
+   page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) {
+   chunks++;
+   seg_len = 0;
+   }
+   }
 
ret = sg_alloc_table(sgt, chunks, gfp_mask);
if (unlikely(ret))
@@ -413,17 +418,21 @@ int sg_alloc_table_from_pages(struct sg_table *sgt,
/* merging chunks and putting them into the scatterlist */
cur_page = 0;
for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
-   unsigned long chunk_size;
-   unsigned int j;
+   unsigned int j, chunk_size;
 
/* look for the end of the current chunk */
-   for (j = cur_page + 1; j < n_pages; ++j)
-   if (page_to_pfn(pages[j]) !=
+   seg_len = 0;
+   for (j = cur_page + 1; j < n_pages; j++) {
+   seg_len += PAGE_SIZE;
+   if (seg_len >= max_segment ||
+   page_to_pfn(pages[j]) !=
page_to_pfn(pages[j - 1]) + 1)
break;
+   }
 
chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
-   sg_set_page(s, pages[cur_page], min(size, chunk_size), offset);
+   sg_set_page(s, pages[cur_page],
+   min_t(unsigned long, size, chunk_size), offset);
size -= chunk_size;
offset = 0;
cur_page = j;
-- 
2.7.4

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx


[Intel-gfx] [PATCH 2/4] lib/scatterlist: Avoid potential scatterlist entry overflow

2017-01-11 Thread Tvrtko Ursulin
From: Tvrtko Ursulin 

Since the scatterlist length field is an unsigned int, make
sure that sg_alloc_table_from_pages does not overflow it while
coallescing pages to a single entry.

v2: Drop reference to future use. Use UINT_MAX.
v3: max_segment must be page aligned.

Signed-off-by: Tvrtko Ursulin 
Cc: Masahiro Yamada 
Cc: linux-ker...@vger.kernel.org
Reviewed-by: Chris Wilson  (v2)
---
 lib/scatterlist.c | 25 +++--
 1 file changed, 19 insertions(+), 6 deletions(-)

diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index e05e7fc98892..4fc54801cd29 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -394,7 +394,8 @@ int sg_alloc_table_from_pages(struct sg_table *sgt,
unsigned int offset, unsigned long size,
gfp_t gfp_mask)
 {
-   unsigned int chunks;
+   const unsigned int max_segment = rounddown(UINT_MAX, PAGE_SIZE);
+   unsigned int seg_len, chunks;
unsigned int i;
unsigned int cur_page;
int ret;
@@ -402,9 +403,16 @@ int sg_alloc_table_from_pages(struct sg_table *sgt,
 
/* compute number of contiguous chunks */
chunks = 1;
-   for (i = 1; i < n_pages; ++i)
-   if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1)
+   seg_len = PAGE_SIZE;
+   for (i = 1; i < n_pages; ++i) {
+   if (seg_len >= max_segment ||
+   page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) {
++chunks;
+   seg_len = PAGE_SIZE;
+   } else {
+   seg_len += PAGE_SIZE;
+   }
+   }
 
ret = sg_alloc_table(sgt, chunks, gfp_mask);
if (unlikely(ret))
@@ -413,17 +421,22 @@ int sg_alloc_table_from_pages(struct sg_table *sgt,
/* merging chunks and putting them into the scatterlist */
cur_page = 0;
for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
-   unsigned long chunk_size;
+   unsigned int chunk_size;
unsigned int j;
 
/* look for the end of the current chunk */
+   seg_len = PAGE_SIZE;
for (j = cur_page + 1; j < n_pages; ++j)
-   if (page_to_pfn(pages[j]) !=
+   if (seg_len >= max_segment ||
+   page_to_pfn(pages[j]) !=
page_to_pfn(pages[j - 1]) + 1)
break;
+   else
+   seg_len += PAGE_SIZE;
 
chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
-   sg_set_page(s, pages[cur_page], min(size, chunk_size), offset);
+   sg_set_page(s, pages[cur_page],
+   min_t(unsigned long, size, chunk_size), offset);
size -= chunk_size;
offset = 0;
cur_page = j;
-- 
2.7.4

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx


Re: [Intel-gfx] [PATCH 2/4] lib/scatterlist: Avoid potential scatterlist entry overflow

2016-11-11 Thread Chris Wilson
On Fri, Nov 11, 2016 at 08:50:18AM +, Tvrtko Ursulin wrote:
> From: Tvrtko Ursulin 
> 
> Since the scatterlist length field is an unsigned int, make
> sure that sg_alloc_table_from_pages does not overflow it while
> coallescing pages to a single entry.
> 
> v2: Drop reference to future use. Use UINT_MAX.
> 
> Signed-off-by: Tvrtko Ursulin 
> Cc: Masahiro Yamada 
> Cc: linux-ker...@vger.kernel.org
> ---
>  lib/scatterlist.c | 25 +++--
>  1 file changed, 19 insertions(+), 6 deletions(-)
> 
> diff --git a/lib/scatterlist.c b/lib/scatterlist.c
> index e05e7fc98892..de15f369b317 100644
> --- a/lib/scatterlist.c
> +++ b/lib/scatterlist.c
> @@ -394,7 +394,8 @@ int sg_alloc_table_from_pages(struct sg_table *sgt,
>   unsigned int offset, unsigned long size,
>   gfp_t gfp_mask)
>  {
> - unsigned int chunks;
> + const unsigned int max_segment = UINT_MAX;
> + unsigned int seg_len, chunks;
>   unsigned int i;
>   unsigned int cur_page;
>   int ret;
> @@ -402,9 +403,16 @@ int sg_alloc_table_from_pages(struct sg_table *sgt,
>  
>   /* compute number of contiguous chunks */
>   chunks = 1;
> - for (i = 1; i < n_pages; ++i)
> - if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1)
> + seg_len = PAGE_SIZE;
> + for (i = 1; i < n_pages; ++i) {
> + if (seg_len >= max_segment ||
> + page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) {
>   ++chunks;
> + seg_len = PAGE_SIZE;
> + } else {
> + seg_len += PAGE_SIZE;
> + }
> + }
>  
>   ret = sg_alloc_table(sgt, chunks, gfp_mask);
>   if (unlikely(ret))
> @@ -413,17 +421,22 @@ int sg_alloc_table_from_pages(struct sg_table *sgt,
>   /* merging chunks and putting them into the scatterlist */
>   cur_page = 0;
>   for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
> - unsigned long chunk_size;
> + unsigned int chunk_size;
>   unsigned int j;
>  
>   /* look for the end of the current chunk */
> + seg_len = PAGE_SIZE;
>   for (j = cur_page + 1; j < n_pages; ++j)
> - if (page_to_pfn(pages[j]) !=
> + if (seg_len >= max_segment ||
> + page_to_pfn(pages[j]) !=
>   page_to_pfn(pages[j - 1]) + 1)
>   break;
> + else
> + seg_len += PAGE_SIZE;
>  
>   chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;

chunk_size = seg_len - offset;
?

> - sg_set_page(s, pages[cur_page], min(size, chunk_size), offset);
> + sg_set_page(s, pages[cur_page],
> + min_t(unsigned long, size, chunk_size), offset);
>   size -= chunk_size;
>   offset = 0;
>   cur_page = j;

Could BUG_ON(size); afterwards and BUG_ON(!size) inside? Although that
requires the caller didn't make a mistake in n_pages vs offset+size.

Minor comments,
Reviewed-by: Chris Wilson 
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx


[Intel-gfx] [PATCH 2/4] lib/scatterlist: Avoid potential scatterlist entry overflow

2016-11-11 Thread Tvrtko Ursulin
From: Tvrtko Ursulin 

Since the scatterlist length field is an unsigned int, make
sure that sg_alloc_table_from_pages does not overflow it while
coallescing pages to a single entry.

v2: Drop reference to future use. Use UINT_MAX.

Signed-off-by: Tvrtko Ursulin 
Cc: Masahiro Yamada 
Cc: linux-ker...@vger.kernel.org
---
 lib/scatterlist.c | 25 +++--
 1 file changed, 19 insertions(+), 6 deletions(-)

diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index e05e7fc98892..de15f369b317 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -394,7 +394,8 @@ int sg_alloc_table_from_pages(struct sg_table *sgt,
unsigned int offset, unsigned long size,
gfp_t gfp_mask)
 {
-   unsigned int chunks;
+   const unsigned int max_segment = UINT_MAX;
+   unsigned int seg_len, chunks;
unsigned int i;
unsigned int cur_page;
int ret;
@@ -402,9 +403,16 @@ int sg_alloc_table_from_pages(struct sg_table *sgt,
 
/* compute number of contiguous chunks */
chunks = 1;
-   for (i = 1; i < n_pages; ++i)
-   if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1)
+   seg_len = PAGE_SIZE;
+   for (i = 1; i < n_pages; ++i) {
+   if (seg_len >= max_segment ||
+   page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) {
++chunks;
+   seg_len = PAGE_SIZE;
+   } else {
+   seg_len += PAGE_SIZE;
+   }
+   }
 
ret = sg_alloc_table(sgt, chunks, gfp_mask);
if (unlikely(ret))
@@ -413,17 +421,22 @@ int sg_alloc_table_from_pages(struct sg_table *sgt,
/* merging chunks and putting them into the scatterlist */
cur_page = 0;
for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
-   unsigned long chunk_size;
+   unsigned int chunk_size;
unsigned int j;
 
/* look for the end of the current chunk */
+   seg_len = PAGE_SIZE;
for (j = cur_page + 1; j < n_pages; ++j)
-   if (page_to_pfn(pages[j]) !=
+   if (seg_len >= max_segment ||
+   page_to_pfn(pages[j]) !=
page_to_pfn(pages[j - 1]) + 1)
break;
+   else
+   seg_len += PAGE_SIZE;
 
chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
-   sg_set_page(s, pages[cur_page], min(size, chunk_size), offset);
+   sg_set_page(s, pages[cur_page],
+   min_t(unsigned long, size, chunk_size), offset);
size -= chunk_size;
offset = 0;
cur_page = j;
-- 
2.7.4

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx