[PATCH] include/linux/gfp.h: fix typo

2018-12-27 Thread Kyle Spiers
Fix misspelled "satisfied"

Signed-off-by: Kyle Spiers 
---
 include/linux/gfp.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 0705164f928c..5f5e25fd6149 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -81,7 +81,7 @@ struct vm_area_struct;
  *
  * %__GFP_HARDWALL enforces the cpuset memory allocation policy.
  *
- * %__GFP_THISNODE forces the allocation to be satisified from the requested
+ * %__GFP_THISNODE forces the allocation to be satisfied from the requested
  * node with no fallbacks or placement policy enforcements.
  *
  * %__GFP_ACCOUNT causes the allocation to be accounted to kmemcg.
-- 
2.20.1.415.g653613c723-goog




[PATCH v2] async_pq: Remove VLA usage

2018-05-05 Thread Kyle Spiers
In the quest to remove VLAs from the kernel[1], this moves the
allocation of coefs and blocks from the stack to being kmalloc()ed.

[1] https://lkml.org/lkml/2018/3/7/621

Signed-off-by: Kyle Spiers <ksspi...@google.com>
---
Forgot to add slab.h
---
 crypto/async_tx/async_pq.c  | 18 ++
 crypto/async_tx/raid6test.c |  9 -
 2 files changed, 22 insertions(+), 5 deletions(-)

diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index 56bd612927ab..af1912313a23 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -194,9 +194,9 @@ async_gen_syndrome(struct page **blocks, unsigned int 
offset, int disks,
(src_cnt <= dma_maxpq(device, 0) ||
 dma_maxpq(device, DMA_PREP_CONTINUE) > 0) &&
is_dma_pq_aligned(device, offset, 0, len)) {
-   struct dma_async_tx_descriptor *tx;
+   struct dma_async_tx_descriptor *tx = NULL;
enum dma_ctrl_flags dma_flags = 0;
-   unsigned char coefs[src_cnt];
+   unsigned char *coefs;
int i, j;
 
/* run the p+q asynchronously */
@@ -207,6 +207,9 @@ async_gen_syndrome(struct page **blocks, unsigned int 
offset, int disks,
 * sources and update the coefficients accordingly
 */
unmap->len = len;
+   coefs = kmalloc_array(src_cnt, sizeof(*coefs), GFP_KERNEL);
+   if (!coefs)
+   goto out;
for (i = 0, j = 0; i < src_cnt; i++) {
if (blocks[i] == NULL)
continue;
@@ -240,7 +243,9 @@ async_gen_syndrome(struct page **blocks, unsigned int 
offset, int disks,
}
 
tx = do_async_gen_syndrome(chan, coefs, j, unmap, dma_flags, 
submit);
+out:
dmaengine_unmap_put(unmap);
+   kfree(coefs);
return tx;
}
 
@@ -298,8 +303,8 @@ async_syndrome_val(struct page **blocks, unsigned int 
offset, int disks,
 {
struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len);
struct dma_device *device = chan ? chan->device : NULL;
-   struct dma_async_tx_descriptor *tx;
-   unsigned char coefs[disks-2];
+   struct dma_async_tx_descriptor *tx = NULL;
+   unsigned char *coefs = NULL;
enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
struct dmaengine_unmap_data *unmap = NULL;
 
@@ -318,6 +323,9 @@ async_syndrome_val(struct page **blocks, unsigned int 
offset, int disks,
 __func__, disks, len);
 
unmap->len = len;
+   coefs = kmalloc_array(disks - 2, sizeof(*coefs), GFP_KERNEL);
+   if (!coefs)
+   goto out;
for (i = 0; i < disks-2; i++)
if (likely(blocks[i])) {
unmap->addr[j] = dma_map_page(dev, blocks[i],
@@ -423,6 +431,8 @@ async_syndrome_val(struct page **blocks, unsigned int 
offset, int disks,
async_tx_sync_epilog(submit);
tx = NULL;
}
+out:
+   kfree(coefs);
dmaengine_unmap_put(unmap);
 
return tx;
diff --git a/crypto/async_tx/raid6test.c b/crypto/async_tx/raid6test.c
index dad95f45b88f..4237a5ae8f42 100644
--- a/crypto/async_tx/raid6test.c
+++ b/crypto/async_tx/raid6test.c
@@ -24,6 +24,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #undef pr
 #define pr(fmt, args...) pr_info("raid6test: " fmt, ##args)
@@ -81,11 +82,16 @@ static void raid6_dual_recov(int disks, size_t bytes, int 
faila, int failb, stru
init_async_submit(, 0, NULL, NULL, NULL, 
addr_conv);
tx = async_gen_syndrome(ptrs, 0, disks, bytes, );
} else {
-   struct page *blocks[disks];
+   struct page **blocks;
struct page *dest;
int count = 0;
int i;
 
+   blocks = kmalloc_array(disks, sizeof(*blocks),
+   GFP_KERNEL);
+   if (!blocks)
+   return;
+
/* data+Q failure.  Reconstruct data from P,
 * then rebuild syndrome
 */
@@ -101,6 +107,7 @@ static void raid6_dual_recov(int disks, size_t bytes, int 
faila, int failb, stru
 
init_async_submit(, 0, tx, NULL, NULL, 
addr_conv);
tx = async_gen_syndrome(ptrs, 0, disks, bytes, );
+   kfree(blocks);
}
} else {
if (failb == disks-2) {
-- 
2.17.0.441.gb46fe60e1d-goog



[PATCH v2] async_pq: Remove VLA usage

2018-05-05 Thread Kyle Spiers
In the quest to remove VLAs from the kernel[1], this moves the
allocation of coefs and blocks from the stack to being kmalloc()ed.

[1] https://lkml.org/lkml/2018/3/7/621

Signed-off-by: Kyle Spiers 
---
Forgot to add slab.h
---
 crypto/async_tx/async_pq.c  | 18 ++
 crypto/async_tx/raid6test.c |  9 -
 2 files changed, 22 insertions(+), 5 deletions(-)

diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index 56bd612927ab..af1912313a23 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -194,9 +194,9 @@ async_gen_syndrome(struct page **blocks, unsigned int 
offset, int disks,
(src_cnt <= dma_maxpq(device, 0) ||
 dma_maxpq(device, DMA_PREP_CONTINUE) > 0) &&
is_dma_pq_aligned(device, offset, 0, len)) {
-   struct dma_async_tx_descriptor *tx;
+   struct dma_async_tx_descriptor *tx = NULL;
enum dma_ctrl_flags dma_flags = 0;
-   unsigned char coefs[src_cnt];
+   unsigned char *coefs;
int i, j;
 
/* run the p+q asynchronously */
@@ -207,6 +207,9 @@ async_gen_syndrome(struct page **blocks, unsigned int 
offset, int disks,
 * sources and update the coefficients accordingly
 */
unmap->len = len;
+   coefs = kmalloc_array(src_cnt, sizeof(*coefs), GFP_KERNEL);
+   if (!coefs)
+   goto out;
for (i = 0, j = 0; i < src_cnt; i++) {
if (blocks[i] == NULL)
continue;
@@ -240,7 +243,9 @@ async_gen_syndrome(struct page **blocks, unsigned int 
offset, int disks,
}
 
tx = do_async_gen_syndrome(chan, coefs, j, unmap, dma_flags, 
submit);
+out:
dmaengine_unmap_put(unmap);
+   kfree(coefs);
return tx;
}
 
@@ -298,8 +303,8 @@ async_syndrome_val(struct page **blocks, unsigned int 
offset, int disks,
 {
struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len);
struct dma_device *device = chan ? chan->device : NULL;
-   struct dma_async_tx_descriptor *tx;
-   unsigned char coefs[disks-2];
+   struct dma_async_tx_descriptor *tx = NULL;
+   unsigned char *coefs = NULL;
enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
struct dmaengine_unmap_data *unmap = NULL;
 
@@ -318,6 +323,9 @@ async_syndrome_val(struct page **blocks, unsigned int 
offset, int disks,
 __func__, disks, len);
 
unmap->len = len;
+   coefs = kmalloc_array(disks - 2, sizeof(*coefs), GFP_KERNEL);
+   if (!coefs)
+   goto out;
for (i = 0; i < disks-2; i++)
if (likely(blocks[i])) {
unmap->addr[j] = dma_map_page(dev, blocks[i],
@@ -423,6 +431,8 @@ async_syndrome_val(struct page **blocks, unsigned int 
offset, int disks,
async_tx_sync_epilog(submit);
tx = NULL;
}
+out:
+   kfree(coefs);
dmaengine_unmap_put(unmap);
 
return tx;
diff --git a/crypto/async_tx/raid6test.c b/crypto/async_tx/raid6test.c
index dad95f45b88f..4237a5ae8f42 100644
--- a/crypto/async_tx/raid6test.c
+++ b/crypto/async_tx/raid6test.c
@@ -24,6 +24,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #undef pr
 #define pr(fmt, args...) pr_info("raid6test: " fmt, ##args)
@@ -81,11 +82,16 @@ static void raid6_dual_recov(int disks, size_t bytes, int 
faila, int failb, stru
init_async_submit(, 0, NULL, NULL, NULL, 
addr_conv);
tx = async_gen_syndrome(ptrs, 0, disks, bytes, );
} else {
-   struct page *blocks[disks];
+   struct page **blocks;
struct page *dest;
int count = 0;
int i;
 
+   blocks = kmalloc_array(disks, sizeof(*blocks),
+   GFP_KERNEL);
+   if (!blocks)
+   return;
+
/* data+Q failure.  Reconstruct data from P,
 * then rebuild syndrome
 */
@@ -101,6 +107,7 @@ static void raid6_dual_recov(int disks, size_t bytes, int 
faila, int failb, stru
 
init_async_submit(, 0, tx, NULL, NULL, 
addr_conv);
tx = async_gen_syndrome(ptrs, 0, disks, bytes, );
+   kfree(blocks);
}
} else {
if (failb == disks-2) {
-- 
2.17.0.441.gb46fe60e1d-goog



[PATCH] async_pq: Remove VLA usage

2018-05-03 Thread Kyle Spiers
In the quest to remove VLAs from the kernel[1], this moves the
allocation of coefs and blocks from the stack to being kmalloc()ed.

[1] https://lkml.org/lkml/2018/3/7/621

Signed-off-by: Kyle Spiers <ksspi...@google.com>
---
 crypto/async_tx/async_pq.c  | 18 ++
 crypto/async_tx/raid6test.c |  8 +++-
 2 files changed, 21 insertions(+), 5 deletions(-)

diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index 56bd612927ab..af1912313a23 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -194,9 +194,9 @@ async_gen_syndrome(struct page **blocks, unsigned int 
offset, int disks,
(src_cnt <= dma_maxpq(device, 0) ||
 dma_maxpq(device, DMA_PREP_CONTINUE) > 0) &&
is_dma_pq_aligned(device, offset, 0, len)) {
-   struct dma_async_tx_descriptor *tx;
+   struct dma_async_tx_descriptor *tx = NULL;
enum dma_ctrl_flags dma_flags = 0;
-   unsigned char coefs[src_cnt];
+   unsigned char *coefs;
int i, j;
 
/* run the p+q asynchronously */
@@ -207,6 +207,9 @@ async_gen_syndrome(struct page **blocks, unsigned int 
offset, int disks,
 * sources and update the coefficients accordingly
 */
unmap->len = len;
+   coefs = kmalloc_array(src_cnt, sizeof(*coefs), GFP_KERNEL);
+   if (!coefs)
+   goto out;
for (i = 0, j = 0; i < src_cnt; i++) {
if (blocks[i] == NULL)
continue;
@@ -240,7 +243,9 @@ async_gen_syndrome(struct page **blocks, unsigned int 
offset, int disks,
}
 
tx = do_async_gen_syndrome(chan, coefs, j, unmap, dma_flags, 
submit);
+out:
dmaengine_unmap_put(unmap);
+   kfree(coefs);
return tx;
}
 
@@ -298,8 +303,8 @@ async_syndrome_val(struct page **blocks, unsigned int 
offset, int disks,
 {
struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len);
struct dma_device *device = chan ? chan->device : NULL;
-   struct dma_async_tx_descriptor *tx;
-   unsigned char coefs[disks-2];
+   struct dma_async_tx_descriptor *tx = NULL;
+   unsigned char *coefs = NULL;
enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
struct dmaengine_unmap_data *unmap = NULL;
 
@@ -318,6 +323,9 @@ async_syndrome_val(struct page **blocks, unsigned int 
offset, int disks,
 __func__, disks, len);
 
unmap->len = len;
+   coefs = kmalloc_array(disks - 2, sizeof(*coefs), GFP_KERNEL);
+   if (!coefs)
+   goto out;
for (i = 0; i < disks-2; i++)
if (likely(blocks[i])) {
unmap->addr[j] = dma_map_page(dev, blocks[i],
@@ -423,6 +431,8 @@ async_syndrome_val(struct page **blocks, unsigned int 
offset, int disks,
async_tx_sync_epilog(submit);
tx = NULL;
}
+out:
+   kfree(coefs);
dmaengine_unmap_put(unmap);
 
return tx;
diff --git a/crypto/async_tx/raid6test.c b/crypto/async_tx/raid6test.c
index dad95f45b88f..ea036b531ef2 100644
--- a/crypto/async_tx/raid6test.c
+++ b/crypto/async_tx/raid6test.c
@@ -81,11 +81,16 @@ static void raid6_dual_recov(int disks, size_t bytes, int 
faila, int failb, stru
init_async_submit(, 0, NULL, NULL, NULL, 
addr_conv);
tx = async_gen_syndrome(ptrs, 0, disks, bytes, );
} else {
-   struct page *blocks[disks];
+   struct page **blocks;
struct page *dest;
int count = 0;
int i;
 
+   blocks = kmalloc_array(disks, sizeof(*blocks),
+   GFP_KERNEL);
+   if (!blocks)
+   return;
+
/* data+Q failure.  Reconstruct data from P,
 * then rebuild syndrome
 */
@@ -101,6 +106,7 @@ static void raid6_dual_recov(int disks, size_t bytes, int 
faila, int failb, stru
 
init_async_submit(, 0, tx, NULL, NULL, 
addr_conv);
tx = async_gen_syndrome(ptrs, 0, disks, bytes, );
+   kfree(blocks);
}
} else {
if (failb == disks-2) {
-- 
2.17.0.441.gb46fe60e1d-goog



[PATCH] async_pq: Remove VLA usage

2018-05-03 Thread Kyle Spiers
In the quest to remove VLAs from the kernel[1], this moves the
allocation of coefs and blocks from the stack to being kmalloc()ed.

[1] https://lkml.org/lkml/2018/3/7/621

Signed-off-by: Kyle Spiers 
---
 crypto/async_tx/async_pq.c  | 18 ++
 crypto/async_tx/raid6test.c |  8 +++-
 2 files changed, 21 insertions(+), 5 deletions(-)

diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index 56bd612927ab..af1912313a23 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -194,9 +194,9 @@ async_gen_syndrome(struct page **blocks, unsigned int 
offset, int disks,
(src_cnt <= dma_maxpq(device, 0) ||
 dma_maxpq(device, DMA_PREP_CONTINUE) > 0) &&
is_dma_pq_aligned(device, offset, 0, len)) {
-   struct dma_async_tx_descriptor *tx;
+   struct dma_async_tx_descriptor *tx = NULL;
enum dma_ctrl_flags dma_flags = 0;
-   unsigned char coefs[src_cnt];
+   unsigned char *coefs;
int i, j;
 
/* run the p+q asynchronously */
@@ -207,6 +207,9 @@ async_gen_syndrome(struct page **blocks, unsigned int 
offset, int disks,
 * sources and update the coefficients accordingly
 */
unmap->len = len;
+   coefs = kmalloc_array(src_cnt, sizeof(*coefs), GFP_KERNEL);
+   if (!coefs)
+   goto out;
for (i = 0, j = 0; i < src_cnt; i++) {
if (blocks[i] == NULL)
continue;
@@ -240,7 +243,9 @@ async_gen_syndrome(struct page **blocks, unsigned int 
offset, int disks,
}
 
tx = do_async_gen_syndrome(chan, coefs, j, unmap, dma_flags, 
submit);
+out:
dmaengine_unmap_put(unmap);
+   kfree(coefs);
return tx;
}
 
@@ -298,8 +303,8 @@ async_syndrome_val(struct page **blocks, unsigned int 
offset, int disks,
 {
struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len);
struct dma_device *device = chan ? chan->device : NULL;
-   struct dma_async_tx_descriptor *tx;
-   unsigned char coefs[disks-2];
+   struct dma_async_tx_descriptor *tx = NULL;
+   unsigned char *coefs = NULL;
enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
struct dmaengine_unmap_data *unmap = NULL;
 
@@ -318,6 +323,9 @@ async_syndrome_val(struct page **blocks, unsigned int 
offset, int disks,
 __func__, disks, len);
 
unmap->len = len;
+   coefs = kmalloc_array(disks - 2, sizeof(*coefs), GFP_KERNEL);
+   if (!coefs)
+   goto out;
for (i = 0; i < disks-2; i++)
if (likely(blocks[i])) {
unmap->addr[j] = dma_map_page(dev, blocks[i],
@@ -423,6 +431,8 @@ async_syndrome_val(struct page **blocks, unsigned int 
offset, int disks,
async_tx_sync_epilog(submit);
tx = NULL;
}
+out:
+   kfree(coefs);
dmaengine_unmap_put(unmap);
 
return tx;
diff --git a/crypto/async_tx/raid6test.c b/crypto/async_tx/raid6test.c
index dad95f45b88f..ea036b531ef2 100644
--- a/crypto/async_tx/raid6test.c
+++ b/crypto/async_tx/raid6test.c
@@ -81,11 +81,16 @@ static void raid6_dual_recov(int disks, size_t bytes, int 
faila, int failb, stru
init_async_submit(, 0, NULL, NULL, NULL, 
addr_conv);
tx = async_gen_syndrome(ptrs, 0, disks, bytes, );
} else {
-   struct page *blocks[disks];
+   struct page **blocks;
struct page *dest;
int count = 0;
int i;
 
+   blocks = kmalloc_array(disks, sizeof(*blocks),
+   GFP_KERNEL);
+   if (!blocks)
+   return;
+
/* data+Q failure.  Reconstruct data from P,
 * then rebuild syndrome
 */
@@ -101,6 +106,7 @@ static void raid6_dual_recov(int disks, size_t bytes, int 
faila, int failb, stru
 
init_async_submit(, 0, tx, NULL, NULL, 
addr_conv);
tx = async_gen_syndrome(ptrs, 0, disks, bytes, );
+   kfree(blocks);
}
} else {
if (failb == disks-2) {
-- 
2.17.0.441.gb46fe60e1d-goog



[PATCH v2] rave-sp: Remove VLA

2018-04-27 Thread Kyle Spiers
As part of the effort to remove VLAs from the kernel[1], this creates
constants for the checksum lengths of CCITT and 8B2C and changes
crc_calculated to be the maximum size of a checksum.

https://lkml.org/lkml/2018/3/7/621

Signed-off-by: Kyle Spiers <ksspi...@google.com>
---
 drivers/mfd/rave-sp.c | 11 +--
 1 file changed, 9 insertions(+), 2 deletions(-)

diff --git a/drivers/mfd/rave-sp.c b/drivers/mfd/rave-sp.c
index 5c858e784a89..4ce96b7137db 100644
--- a/drivers/mfd/rave-sp.c
+++ b/drivers/mfd/rave-sp.c
@@ -45,7 +45,9 @@
 #define RAVE_SP_DLE0x10
 
 #define RAVE_SP_MAX_DATA_SIZE  64
-#define RAVE_SP_CHECKSUM_SIZE  2  /* Worst case scenario on RDU2 */
+#define RAVE_SP_CHECKSUM_8B2C  1
+#define RAVE_SP_CHECKSUM_CCITT 2
+#define RAVE_SP_CHECKSUM_SIZE  RAVE_SP_CHECKSUM_CCITT
 /*
  * We don't store STX, ETX and unescaped bytes, so Rx is only
  * DATA + CSUM
@@ -415,7 +417,12 @@ static void rave_sp_receive_frame(struct rave_sp *sp,
const size_t payload_length  = length - checksum_length;
const u8 *crc_reported   = [payload_length];
struct device *dev   = >serdev->dev;
-   u8 crc_calculated[checksum_length];
+   u8 crc_calculated[RAVE_SP_CHECKSUM_SIZE];
+
+   if (unlikely(checksum_length > sizeof(crc_calculated))) {
+   dev_warn(dev, "Checksum too long, dropping\n");
+   return;
+   }
 
print_hex_dump(KERN_DEBUG, "rave-sp rx: ", DUMP_PREFIX_NONE,
   16, 1, data, length, false);
-- 
2.17.0.441.gb46fe60e1d-goog



[PATCH v2] rave-sp: Remove VLA

2018-04-27 Thread Kyle Spiers
As part of the effort to remove VLAs from the kernel[1], this creates
constants for the checksum lengths of CCITT and 8B2C and changes
crc_calculated to be the maximum size of a checksum.

https://lkml.org/lkml/2018/3/7/621

Signed-off-by: Kyle Spiers 
---
 drivers/mfd/rave-sp.c | 11 +--
 1 file changed, 9 insertions(+), 2 deletions(-)

diff --git a/drivers/mfd/rave-sp.c b/drivers/mfd/rave-sp.c
index 5c858e784a89..4ce96b7137db 100644
--- a/drivers/mfd/rave-sp.c
+++ b/drivers/mfd/rave-sp.c
@@ -45,7 +45,9 @@
 #define RAVE_SP_DLE0x10
 
 #define RAVE_SP_MAX_DATA_SIZE  64
-#define RAVE_SP_CHECKSUM_SIZE  2  /* Worst case scenario on RDU2 */
+#define RAVE_SP_CHECKSUM_8B2C  1
+#define RAVE_SP_CHECKSUM_CCITT 2
+#define RAVE_SP_CHECKSUM_SIZE  RAVE_SP_CHECKSUM_CCITT
 /*
  * We don't store STX, ETX and unescaped bytes, so Rx is only
  * DATA + CSUM
@@ -415,7 +417,12 @@ static void rave_sp_receive_frame(struct rave_sp *sp,
const size_t payload_length  = length - checksum_length;
const u8 *crc_reported   = [payload_length];
struct device *dev   = >serdev->dev;
-   u8 crc_calculated[checksum_length];
+   u8 crc_calculated[RAVE_SP_CHECKSUM_SIZE];
+
+   if (unlikely(checksum_length > sizeof(crc_calculated))) {
+   dev_warn(dev, "Checksum too long, dropping\n");
+   return;
+   }
 
print_hex_dump(KERN_DEBUG, "rave-sp rx: ", DUMP_PREFIX_NONE,
   16, 1, data, length, false);
-- 
2.17.0.441.gb46fe60e1d-goog



Re: [PATCH] rave-sp: Remove VLA

2018-04-25 Thread Kyle Spiers
The error message is also wrong. Would "Checksum length too large" be fine?

On Wed, Apr 25, 2018 at 3:31 AM Lee Jones <lee.jo...@linaro.org> wrote:

> On Tue, 24 Apr 2018, Kees Cook wrote:

> > On Mon, Apr 23, 2018 at 10:43 PM, Lee Jones <lee.jo...@linaro.org>
wrote:
> > > On Mon, 23 Apr 2018, Kyle Spiers wrote:
> > >
> > >> As part of the effort to remove VLAs from the kernel[1], this creates
> > >> constants for the checksum lengths of CCITT and 8B2C and changes
> > >> crc_calculated to be the maximum size of a checksum.
> > >>
> > >> https://lkml.org/lkml/2018/3/7/621
> > >>
> > >> Signed-off-by: Kyle Spiers <ksspi...@google.com>
> > >> ---
> > >>  drivers/mfd/rave-sp.c | 11 +--
> > >>  1 file changed, 9 insertions(+), 2 deletions(-)
> > >>
> > >> diff --git a/drivers/mfd/rave-sp.c b/drivers/mfd/rave-sp.c
> > >> index 5c858e784a89..99fa482419f9 100644
> > >> --- a/drivers/mfd/rave-sp.c
> > >> +++ b/drivers/mfd/rave-sp.c
> > >> @@ -45,7 +45,9 @@
> > >>  #define RAVE_SP_DLE  0x10
> > >>
> > >>  #define RAVE_SP_MAX_DATA_SIZE64
> > >> -#define RAVE_SP_CHECKSUM_SIZE2  /* Worst case
scenario on RDU2 */
> > >> +#define RAVE_SP_CHECKSUM_8B2C1
> > >> +#define RAVE_SP_CHECKSUM_CCITT   2
> > >> +#define RAVE_SP_CHECKSUM_SIZERAVE_SP_CHECKSUM_CCITT
> > >>  /*
> > >>   * We don't store STX, ETX and unescaped bytes, so Rx is only
> > >>   * DATA + CSUM
> > >> @@ -415,7 +417,12 @@ static void rave_sp_receive_frame(struct
rave_sp *sp,
> > >>   const size_t payload_length  = length - checksum_length;
> > >>   const u8 *crc_reported   = [payload_length];
> > >>   struct device *dev   = >serdev->dev;
> > >> - u8 crc_calculated[checksum_length];
> > >> + u8 crc_calculated[RAVE_SP_CHECKSUM_SIZE];
> > >> +
> > >> + if (unlikely(length > sizeof(crc_calculated))) {
> > >
> > > Forgive me if I have this wrong (it's still very early here), but this
> > > doesn't leave any room for the payload?
> > >
> > > <--   length -->
> > > <- payload length ->
> > > [CK][CK][D][A][T][A] .. [64]
> > >
> > > It is my hope that length would always be larger than the size of the
> > > checksum, or else there would never be any data?
> > >
> > > Should this not be:
> > >
> > > if (unlikely(length > RAVE_SP_MAX_DATA_SIZE))
> >
> > Oh, whoops, no, this should be:
> >
> > + if (unlikely(checksum_lengh > sizeof(crc_calculated))) {
> >
> > (To validate the VLA max size.)

> That doesn't match the OP's error message though:

>dev_warn(dev, "Dropping oversized frame\n");

> Which I assume is designed to complement the existing warning:

>if (unlikely(length <= checksum_length))
>dev_warn(dev, "Dropping short frame\n");

> --
> Lee Jones [李琼斯]
> Linaro Services Technical Lead
> Linaro.org │ Open source software for ARM SoCs
> Follow Linaro: Facebook | Twitter | Blog


Re: [PATCH] rave-sp: Remove VLA

2018-04-25 Thread Kyle Spiers
The error message is also wrong. Would "Checksum length too large" be fine?

On Wed, Apr 25, 2018 at 3:31 AM Lee Jones  wrote:

> On Tue, 24 Apr 2018, Kees Cook wrote:

> > On Mon, Apr 23, 2018 at 10:43 PM, Lee Jones 
wrote:
> > > On Mon, 23 Apr 2018, Kyle Spiers wrote:
> > >
> > >> As part of the effort to remove VLAs from the kernel[1], this creates
> > >> constants for the checksum lengths of CCITT and 8B2C and changes
> > >> crc_calculated to be the maximum size of a checksum.
> > >>
> > >> https://lkml.org/lkml/2018/3/7/621
> > >>
> > >> Signed-off-by: Kyle Spiers 
> > >> ---
> > >>  drivers/mfd/rave-sp.c | 11 +--
> > >>  1 file changed, 9 insertions(+), 2 deletions(-)
> > >>
> > >> diff --git a/drivers/mfd/rave-sp.c b/drivers/mfd/rave-sp.c
> > >> index 5c858e784a89..99fa482419f9 100644
> > >> --- a/drivers/mfd/rave-sp.c
> > >> +++ b/drivers/mfd/rave-sp.c
> > >> @@ -45,7 +45,9 @@
> > >>  #define RAVE_SP_DLE  0x10
> > >>
> > >>  #define RAVE_SP_MAX_DATA_SIZE64
> > >> -#define RAVE_SP_CHECKSUM_SIZE2  /* Worst case
scenario on RDU2 */
> > >> +#define RAVE_SP_CHECKSUM_8B2C1
> > >> +#define RAVE_SP_CHECKSUM_CCITT   2
> > >> +#define RAVE_SP_CHECKSUM_SIZERAVE_SP_CHECKSUM_CCITT
> > >>  /*
> > >>   * We don't store STX, ETX and unescaped bytes, so Rx is only
> > >>   * DATA + CSUM
> > >> @@ -415,7 +417,12 @@ static void rave_sp_receive_frame(struct
rave_sp *sp,
> > >>   const size_t payload_length  = length - checksum_length;
> > >>   const u8 *crc_reported   = [payload_length];
> > >>   struct device *dev   = >serdev->dev;
> > >> - u8 crc_calculated[checksum_length];
> > >> + u8 crc_calculated[RAVE_SP_CHECKSUM_SIZE];
> > >> +
> > >> + if (unlikely(length > sizeof(crc_calculated))) {
> > >
> > > Forgive me if I have this wrong (it's still very early here), but this
> > > doesn't leave any room for the payload?
> > >
> > > <--   length -->
> > > <- payload length ->
> > > [CK][CK][D][A][T][A] .. [64]
> > >
> > > It is my hope that length would always be larger than the size of the
> > > checksum, or else there would never be any data?
> > >
> > > Should this not be:
> > >
> > > if (unlikely(length > RAVE_SP_MAX_DATA_SIZE))
> >
> > Oh, whoops, no, this should be:
> >
> > + if (unlikely(checksum_lengh > sizeof(crc_calculated))) {
> >
> > (To validate the VLA max size.)

> That doesn't match the OP's error message though:

>dev_warn(dev, "Dropping oversized frame\n");

> Which I assume is designed to complement the existing warning:

>if (unlikely(length <= checksum_length))
>dev_warn(dev, "Dropping short frame\n");

> --
> Lee Jones [李琼斯]
> Linaro Services Technical Lead
> Linaro.org │ Open source software for ARM SoCs
> Follow Linaro: Facebook | Twitter | Blog


[PATCH] rave-sp: Remove VLA

2018-04-23 Thread Kyle Spiers
As part of the effort to remove VLAs from the kernel[1], this creates
constants for the checksum lengths of CCITT and 8B2C and changes
crc_calculated to be the maximum size of a checksum.

https://lkml.org/lkml/2018/3/7/621

Signed-off-by: Kyle Spiers <ksspi...@google.com>
---
 drivers/mfd/rave-sp.c | 11 +--
 1 file changed, 9 insertions(+), 2 deletions(-)

diff --git a/drivers/mfd/rave-sp.c b/drivers/mfd/rave-sp.c
index 5c858e784a89..99fa482419f9 100644
--- a/drivers/mfd/rave-sp.c
+++ b/drivers/mfd/rave-sp.c
@@ -45,7 +45,9 @@
 #define RAVE_SP_DLE0x10
 
 #define RAVE_SP_MAX_DATA_SIZE  64
-#define RAVE_SP_CHECKSUM_SIZE  2  /* Worst case scenario on RDU2 */
+#define RAVE_SP_CHECKSUM_8B2C  1
+#define RAVE_SP_CHECKSUM_CCITT 2
+#define RAVE_SP_CHECKSUM_SIZE  RAVE_SP_CHECKSUM_CCITT
 /*
  * We don't store STX, ETX and unescaped bytes, so Rx is only
  * DATA + CSUM
@@ -415,7 +417,12 @@ static void rave_sp_receive_frame(struct rave_sp *sp,
const size_t payload_length  = length - checksum_length;
const u8 *crc_reported   = [payload_length];
struct device *dev   = >serdev->dev;
-   u8 crc_calculated[checksum_length];
+   u8 crc_calculated[RAVE_SP_CHECKSUM_SIZE];
+
+   if (unlikely(length > sizeof(crc_calculated))) {
+   dev_warn(dev, "Dropping oversized frame\n");
+   return;
+   }
 
print_hex_dump(KERN_DEBUG, "rave-sp rx: ", DUMP_PREFIX_NONE,
   16, 1, data, length, false);
-- 
2.17.0.484.g0c8726318c-goog



[PATCH] rave-sp: Remove VLA

2018-04-23 Thread Kyle Spiers
As part of the effort to remove VLAs from the kernel[1], this creates
constants for the checksum lengths of CCITT and 8B2C and changes
crc_calculated to be the maximum size of a checksum.

https://lkml.org/lkml/2018/3/7/621

Signed-off-by: Kyle Spiers 
---
 drivers/mfd/rave-sp.c | 11 +--
 1 file changed, 9 insertions(+), 2 deletions(-)

diff --git a/drivers/mfd/rave-sp.c b/drivers/mfd/rave-sp.c
index 5c858e784a89..99fa482419f9 100644
--- a/drivers/mfd/rave-sp.c
+++ b/drivers/mfd/rave-sp.c
@@ -45,7 +45,9 @@
 #define RAVE_SP_DLE0x10
 
 #define RAVE_SP_MAX_DATA_SIZE  64
-#define RAVE_SP_CHECKSUM_SIZE  2  /* Worst case scenario on RDU2 */
+#define RAVE_SP_CHECKSUM_8B2C  1
+#define RAVE_SP_CHECKSUM_CCITT 2
+#define RAVE_SP_CHECKSUM_SIZE  RAVE_SP_CHECKSUM_CCITT
 /*
  * We don't store STX, ETX and unescaped bytes, so Rx is only
  * DATA + CSUM
@@ -415,7 +417,12 @@ static void rave_sp_receive_frame(struct rave_sp *sp,
const size_t payload_length  = length - checksum_length;
const u8 *crc_reported   = [payload_length];
struct device *dev   = >serdev->dev;
-   u8 crc_calculated[checksum_length];
+   u8 crc_calculated[RAVE_SP_CHECKSUM_SIZE];
+
+   if (unlikely(length > sizeof(crc_calculated))) {
+   dev_warn(dev, "Dropping oversized frame\n");
+   return;
+   }
 
print_hex_dump(KERN_DEBUG, "rave-sp rx: ", DUMP_PREFIX_NONE,
   16, 1, data, length, false);
-- 
2.17.0.484.g0c8726318c-goog



[PATCH v4] isofs compress: Remove VLA usage

2018-04-10 Thread Kyle Spiers
As part of the effort to remove VLAs from the kernel[1], this changes
the allocation of the bhs and pages arrays from being on the stack to being
kcalloc()ed. This also allows for the removal of the explicit zeroing
of bhs.

https://lkml.org/lkml/2018/3/7/621

Signed-off-by: Kyle Spiers <ksspi...@google.com>
---
Fix sparc64 build
Fix Error recovery Mistakes
---
 fs/isofs/compress.c | 19 ---
 1 file changed, 16 insertions(+), 3 deletions(-)

diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c
index 9bb2fe35799d..10205ececc27 100644
--- a/fs/isofs/compress.c
+++ b/fs/isofs/compress.c
@@ -20,6 +20,7 @@
 #include 
 #include 
 
+#include 
 #include 
 #include 
 
@@ -59,7 +60,7 @@ static loff_t zisofs_uncompress_block(struct inode *inode, 
loff_t block_start,
>> bufshift;
int haveblocks;
blkcnt_t blocknum;
-   struct buffer_head *bhs[needblocks + 1];
+   struct buffer_head **bhs;
int curbh, curpage;
 
if (block_size > deflateBound(1UL << zisofs_block_shift)) {
@@ -80,7 +81,11 @@ static loff_t zisofs_uncompress_block(struct inode *inode, 
loff_t block_start,
 
/* Because zlib is not thread-safe, do all the I/O at the top. */
blocknum = block_start >> bufshift;
-   memset(bhs, 0, (needblocks + 1) * sizeof(struct buffer_head *));
+   bhs = kcalloc(needblocks + 1, sizeof(*bhs), GFP_KERNEL);
+   if (!bhs) {
+   *errp = -ENOMEM;
+   return 0;
+   }
haveblocks = isofs_get_blocks(inode, blocknum, bhs, needblocks);
ll_rw_block(REQ_OP_READ, 0, haveblocks, bhs);
 
@@ -190,6 +195,7 @@ static loff_t zisofs_uncompress_block(struct inode *inode, 
loff_t block_start,
 b_eio:
for (i = 0; i < haveblocks; i++)
brelse(bhs[i]);
+   kfree(bhs);
return stream.total_out;
 }
 
@@ -305,7 +311,7 @@ static int zisofs_readpage(struct file *file, struct page 
*page)
unsigned int zisofs_pages_per_cblock =
PAGE_SHIFT <= zisofs_block_shift ?
(1 << (zisofs_block_shift - PAGE_SHIFT)) : 0;
-   struct page *pages[max_t(unsigned, zisofs_pages_per_cblock, 1)];
+   struct page **pages;
pgoff_t index = page->index, end_index;
 
end_index = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -330,6 +336,12 @@ static int zisofs_readpage(struct file *file, struct page 
*page)
full_page = 0;
pcount = 1;
}
+   pages = kcalloc(max_t(unsigned int, zisofs_pages_per_cblock, 1),
+   sizeof(*pages), GFP_KERNEL);
+   if (!pages) {
+   unlock_page(page);
+   return -ENOMEM;
+   }
pages[full_page] = page;
 
for (i = 0; i < pcount; i++, index++) {
@@ -357,6 +369,7 @@ static int zisofs_readpage(struct file *file, struct page 
*page)
}   
 
/* At this point, err contains 0 or -EIO depending on the "critical" 
page */
+   kfree(pages);
return err;
 }
 
-- 
2.17.0.484.g0c8726318c-goog



[PATCH v4] isofs compress: Remove VLA usage

2018-04-10 Thread Kyle Spiers
As part of the effort to remove VLAs from the kernel[1], this changes
the allocation of the bhs and pages arrays from being on the stack to being
kcalloc()ed. This also allows for the removal of the explicit zeroing
of bhs.

https://lkml.org/lkml/2018/3/7/621

Signed-off-by: Kyle Spiers 
---
Fix sparc64 build
Fix Error recovery Mistakes
---
 fs/isofs/compress.c | 19 ---
 1 file changed, 16 insertions(+), 3 deletions(-)

diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c
index 9bb2fe35799d..10205ececc27 100644
--- a/fs/isofs/compress.c
+++ b/fs/isofs/compress.c
@@ -20,6 +20,7 @@
 #include 
 #include 
 
+#include 
 #include 
 #include 
 
@@ -59,7 +60,7 @@ static loff_t zisofs_uncompress_block(struct inode *inode, 
loff_t block_start,
>> bufshift;
int haveblocks;
blkcnt_t blocknum;
-   struct buffer_head *bhs[needblocks + 1];
+   struct buffer_head **bhs;
int curbh, curpage;
 
if (block_size > deflateBound(1UL << zisofs_block_shift)) {
@@ -80,7 +81,11 @@ static loff_t zisofs_uncompress_block(struct inode *inode, 
loff_t block_start,
 
/* Because zlib is not thread-safe, do all the I/O at the top. */
blocknum = block_start >> bufshift;
-   memset(bhs, 0, (needblocks + 1) * sizeof(struct buffer_head *));
+   bhs = kcalloc(needblocks + 1, sizeof(*bhs), GFP_KERNEL);
+   if (!bhs) {
+   *errp = -ENOMEM;
+   return 0;
+   }
haveblocks = isofs_get_blocks(inode, blocknum, bhs, needblocks);
ll_rw_block(REQ_OP_READ, 0, haveblocks, bhs);
 
@@ -190,6 +195,7 @@ static loff_t zisofs_uncompress_block(struct inode *inode, 
loff_t block_start,
 b_eio:
for (i = 0; i < haveblocks; i++)
brelse(bhs[i]);
+   kfree(bhs);
return stream.total_out;
 }
 
@@ -305,7 +311,7 @@ static int zisofs_readpage(struct file *file, struct page 
*page)
unsigned int zisofs_pages_per_cblock =
PAGE_SHIFT <= zisofs_block_shift ?
(1 << (zisofs_block_shift - PAGE_SHIFT)) : 0;
-   struct page *pages[max_t(unsigned, zisofs_pages_per_cblock, 1)];
+   struct page **pages;
pgoff_t index = page->index, end_index;
 
end_index = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -330,6 +336,12 @@ static int zisofs_readpage(struct file *file, struct page 
*page)
full_page = 0;
pcount = 1;
}
+   pages = kcalloc(max_t(unsigned int, zisofs_pages_per_cblock, 1),
+   sizeof(*pages), GFP_KERNEL);
+   if (!pages) {
+   unlock_page(page);
+   return -ENOMEM;
+   }
pages[full_page] = page;
 
for (i = 0; i < pcount; i++, index++) {
@@ -357,6 +369,7 @@ static int zisofs_readpage(struct file *file, struct page 
*page)
}   
 
/* At this point, err contains 0 or -EIO depending on the "critical" 
page */
+   kfree(pages);
return err;
 }
 
-- 
2.17.0.484.g0c8726318c-goog



[PATCH v3] isofs compress: Remove VLA usage

2018-04-10 Thread Kyle Spiers
As part of the effort to remove VLAs from the kernel[1], this changes
the allocation of the bhs and pages arrays from being on the stack to being
kcalloc()ed. This also allows for the removal of the explicit zeroing
of bhs.

https://lkml.org/lkml/2018/3/7/621

Signed-off-by: Kyle Spiers <ksspi...@google.com>
---
Fixed build for sparc64
Fixed error recovery mistakes
---
 fs/isofs/compress.c | 15 ---
 1 file changed, 12 insertions(+), 3 deletions(-)

diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c
index 9bb2fe35799d..4eba16bf173c 100644
--- a/fs/isofs/compress.c
+++ b/fs/isofs/compress.c
@@ -20,6 +20,7 @@
 #include 
 #include 
 
+#include 
 #include 
 #include 
 
@@ -59,7 +60,7 @@ static loff_t zisofs_uncompress_block(struct inode *inode, 
loff_t block_start,
>> bufshift;
int haveblocks;
blkcnt_t blocknum;
-   struct buffer_head *bhs[needblocks + 1];
+   struct buffer_head **bhs;
int curbh, curpage;
 
if (block_size > deflateBound(1UL << zisofs_block_shift)) {
@@ -80,7 +81,9 @@ static loff_t zisofs_uncompress_block(struct inode *inode, 
loff_t block_start,
 
/* Because zlib is not thread-safe, do all the I/O at the top. */
blocknum = block_start >> bufshift;
-   memset(bhs, 0, (needblocks + 1) * sizeof(struct buffer_head *));
+   bhs = kcalloc(needblocks + 1, sizeof(*bhs), GFP_KERNEL);
+   if (!bhs)
+   return -ENOMEM;
haveblocks = isofs_get_blocks(inode, blocknum, bhs, needblocks);
ll_rw_block(REQ_OP_READ, 0, haveblocks, bhs);
 
@@ -190,6 +193,7 @@ static loff_t zisofs_uncompress_block(struct inode *inode, 
loff_t block_start,
 b_eio:
for (i = 0; i < haveblocks; i++)
brelse(bhs[i]);
+   kfree(bhs);
return stream.total_out;
 }
 
@@ -305,7 +309,7 @@ static int zisofs_readpage(struct file *file, struct page 
*page)
unsigned int zisofs_pages_per_cblock =
PAGE_SHIFT <= zisofs_block_shift ?
(1 << (zisofs_block_shift - PAGE_SHIFT)) : 0;
-   struct page *pages[max_t(unsigned, zisofs_pages_per_cblock, 1)];
+   struct page **pages;
pgoff_t index = page->index, end_index;
 
end_index = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -330,6 +334,10 @@ static int zisofs_readpage(struct file *file, struct page 
*page)
full_page = 0;
pcount = 1;
}
+   pages = kcalloc(max_t(unsigned int, zisofs_pages_per_cblock, 1),
+   sizeof(*pages), GFP_KERNEL);
+   if (!pages)
+   return -ENOMEM;
pages[full_page] = page;
 
for (i = 0; i < pcount; i++, index++) {
@@ -357,6 +365,7 @@ static int zisofs_readpage(struct file *file, struct page 
*page)
}   
 
/* At this point, err contains 0 or -EIO depending on the "critical" 
page */
+   kfree(pages);
return err;
 }
 
-- 
2.17.0.484.g0c8726318c-goog



[PATCH v3] isofs compress: Remove VLA usage

2018-04-10 Thread Kyle Spiers
As part of the effort to remove VLAs from the kernel[1], this changes
the allocation of the bhs and pages arrays from being on the stack to being
kcalloc()ed. This also allows for the removal of the explicit zeroing
of bhs.

https://lkml.org/lkml/2018/3/7/621

Signed-off-by: Kyle Spiers 
---
Fixed build for sparc64
Fixed error recovery mistakes
---
 fs/isofs/compress.c | 15 ---
 1 file changed, 12 insertions(+), 3 deletions(-)

diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c
index 9bb2fe35799d..4eba16bf173c 100644
--- a/fs/isofs/compress.c
+++ b/fs/isofs/compress.c
@@ -20,6 +20,7 @@
 #include 
 #include 
 
+#include 
 #include 
 #include 
 
@@ -59,7 +60,7 @@ static loff_t zisofs_uncompress_block(struct inode *inode, 
loff_t block_start,
>> bufshift;
int haveblocks;
blkcnt_t blocknum;
-   struct buffer_head *bhs[needblocks + 1];
+   struct buffer_head **bhs;
int curbh, curpage;
 
if (block_size > deflateBound(1UL << zisofs_block_shift)) {
@@ -80,7 +81,9 @@ static loff_t zisofs_uncompress_block(struct inode *inode, 
loff_t block_start,
 
/* Because zlib is not thread-safe, do all the I/O at the top. */
blocknum = block_start >> bufshift;
-   memset(bhs, 0, (needblocks + 1) * sizeof(struct buffer_head *));
+   bhs = kcalloc(needblocks + 1, sizeof(*bhs), GFP_KERNEL);
+   if (!bhs)
+   return -ENOMEM;
haveblocks = isofs_get_blocks(inode, blocknum, bhs, needblocks);
ll_rw_block(REQ_OP_READ, 0, haveblocks, bhs);
 
@@ -190,6 +193,7 @@ static loff_t zisofs_uncompress_block(struct inode *inode, 
loff_t block_start,
 b_eio:
for (i = 0; i < haveblocks; i++)
brelse(bhs[i]);
+   kfree(bhs);
return stream.total_out;
 }
 
@@ -305,7 +309,7 @@ static int zisofs_readpage(struct file *file, struct page 
*page)
unsigned int zisofs_pages_per_cblock =
PAGE_SHIFT <= zisofs_block_shift ?
(1 << (zisofs_block_shift - PAGE_SHIFT)) : 0;
-   struct page *pages[max_t(unsigned, zisofs_pages_per_cblock, 1)];
+   struct page **pages;
pgoff_t index = page->index, end_index;
 
end_index = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -330,6 +334,10 @@ static int zisofs_readpage(struct file *file, struct page 
*page)
full_page = 0;
pcount = 1;
}
+   pages = kcalloc(max_t(unsigned int, zisofs_pages_per_cblock, 1),
+   sizeof(*pages), GFP_KERNEL);
+   if (!pages)
+   return -ENOMEM;
pages[full_page] = page;
 
for (i = 0; i < pcount; i++, index++) {
@@ -357,6 +365,7 @@ static int zisofs_readpage(struct file *file, struct page 
*page)
}   
 
/* At this point, err contains 0 or -EIO depending on the "critical" 
page */
+   kfree(pages);
return err;
 }
 
-- 
2.17.0.484.g0c8726318c-goog



[PATCH v2] isofs compress: Remove VLA usage

2018-04-05 Thread Kyle Spiers
As part of the effort to remove VLAs from the kernel[1], this changes
the allocation of the bhs and pages arrays from being on the stack to being
kcalloc()ed. This also allows for the removal of the explicit zeroing
of bhs.

https://lkml.org/lkml/2018/3/7/621

Signed-off-by: Kyle Spiers <ksspi...@google.com>
---
 fs/isofs/compress.c | 15 ---
 1 file changed, 12 insertions(+), 3 deletions(-)

diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c
index 9bb2fe35799d..4eba16bf173c 100644
--- a/fs/isofs/compress.c
+++ b/fs/isofs/compress.c
@@ -20,6 +20,7 @@
 #include 
 #include 
 
+#include 
 #include 
 #include 
 
@@ -59,7 +60,7 @@ static loff_t zisofs_uncompress_block(struct inode *inode, 
loff_t block_start,
>> bufshift;
int haveblocks;
blkcnt_t blocknum;
-   struct buffer_head *bhs[needblocks + 1];
+   struct buffer_head **bhs;
int curbh, curpage;
 
if (block_size > deflateBound(1UL << zisofs_block_shift)) {
@@ -80,7 +81,9 @@ static loff_t zisofs_uncompress_block(struct inode *inode, 
loff_t block_start,
 
/* Because zlib is not thread-safe, do all the I/O at the top. */
blocknum = block_start >> bufshift;
-   memset(bhs, 0, (needblocks + 1) * sizeof(struct buffer_head *));
+   bhs = kcalloc(needblocks + 1, sizeof(*bhs), GFP_KERNEL);
+   if (!bhs)
+   return -ENOMEM;
haveblocks = isofs_get_blocks(inode, blocknum, bhs, needblocks);
ll_rw_block(REQ_OP_READ, 0, haveblocks, bhs);
 
@@ -190,6 +193,7 @@ static loff_t zisofs_uncompress_block(struct inode *inode, 
loff_t block_start,
 b_eio:
for (i = 0; i < haveblocks; i++)
brelse(bhs[i]);
+   kfree(bhs);
return stream.total_out;
 }
 
@@ -305,7 +309,7 @@ static int zisofs_readpage(struct file *file, struct page 
*page)
unsigned int zisofs_pages_per_cblock =
PAGE_SHIFT <= zisofs_block_shift ?
(1 << (zisofs_block_shift - PAGE_SHIFT)) : 0;
-   struct page *pages[max_t(unsigned, zisofs_pages_per_cblock, 1)];
+   struct page **pages;
pgoff_t index = page->index, end_index;
 
end_index = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -330,6 +334,10 @@ static int zisofs_readpage(struct file *file, struct page 
*page)
full_page = 0;
pcount = 1;
}
+   pages = kcalloc(max_t(unsigned int, zisofs_pages_per_cblock, 1),
+   sizeof(*pages), GFP_KERNEL);
+   if (!pages)
+   return -ENOMEM;
pages[full_page] = page;
 
for (i = 0; i < pcount; i++, index++) {
@@ -357,6 +365,7 @@ static int zisofs_readpage(struct file *file, struct page 
*page)
}   
 
/* At this point, err contains 0 or -EIO depending on the "critical" 
page */
+   kfree(pages);
return err;
 }
 
-- 
2.17.0.484.g0c8726318c-goog



[PATCH v2] isofs compress: Remove VLA usage

2018-04-05 Thread Kyle Spiers
As part of the effort to remove VLAs from the kernel[1], this changes
the allocation of the bhs and pages arrays from being on the stack to being
kcalloc()ed. This also allows for the removal of the explicit zeroing
of bhs.

https://lkml.org/lkml/2018/3/7/621

Signed-off-by: Kyle Spiers 
---
 fs/isofs/compress.c | 15 ---
 1 file changed, 12 insertions(+), 3 deletions(-)

diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c
index 9bb2fe35799d..4eba16bf173c 100644
--- a/fs/isofs/compress.c
+++ b/fs/isofs/compress.c
@@ -20,6 +20,7 @@
 #include 
 #include 
 
+#include 
 #include 
 #include 
 
@@ -59,7 +60,7 @@ static loff_t zisofs_uncompress_block(struct inode *inode, 
loff_t block_start,
>> bufshift;
int haveblocks;
blkcnt_t blocknum;
-   struct buffer_head *bhs[needblocks + 1];
+   struct buffer_head **bhs;
int curbh, curpage;
 
if (block_size > deflateBound(1UL << zisofs_block_shift)) {
@@ -80,7 +81,9 @@ static loff_t zisofs_uncompress_block(struct inode *inode, 
loff_t block_start,
 
/* Because zlib is not thread-safe, do all the I/O at the top. */
blocknum = block_start >> bufshift;
-   memset(bhs, 0, (needblocks + 1) * sizeof(struct buffer_head *));
+   bhs = kcalloc(needblocks + 1, sizeof(*bhs), GFP_KERNEL);
+   if (!bhs)
+   return -ENOMEM;
haveblocks = isofs_get_blocks(inode, blocknum, bhs, needblocks);
ll_rw_block(REQ_OP_READ, 0, haveblocks, bhs);
 
@@ -190,6 +193,7 @@ static loff_t zisofs_uncompress_block(struct inode *inode, 
loff_t block_start,
 b_eio:
for (i = 0; i < haveblocks; i++)
brelse(bhs[i]);
+   kfree(bhs);
return stream.total_out;
 }
 
@@ -305,7 +309,7 @@ static int zisofs_readpage(struct file *file, struct page 
*page)
unsigned int zisofs_pages_per_cblock =
PAGE_SHIFT <= zisofs_block_shift ?
(1 << (zisofs_block_shift - PAGE_SHIFT)) : 0;
-   struct page *pages[max_t(unsigned, zisofs_pages_per_cblock, 1)];
+   struct page **pages;
pgoff_t index = page->index, end_index;
 
end_index = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -330,6 +334,10 @@ static int zisofs_readpage(struct file *file, struct page 
*page)
full_page = 0;
pcount = 1;
}
+   pages = kcalloc(max_t(unsigned int, zisofs_pages_per_cblock, 1),
+   sizeof(*pages), GFP_KERNEL);
+   if (!pages)
+   return -ENOMEM;
pages[full_page] = page;
 
for (i = 0; i < pcount; i++, index++) {
@@ -357,6 +365,7 @@ static int zisofs_readpage(struct file *file, struct page 
*page)
}   
 
/* At this point, err contains 0 or -EIO depending on the "critical" 
page */
+   kfree(pages);
return err;
 }
 
-- 
2.17.0.484.g0c8726318c-goog



[PATCH] isofs compress: Remove VLA usage

2018-04-04 Thread Kyle Spiers
As part of the effort to remove VLAs from the kernel[1], this changes
the allocation of the bhs and pages arrays from being on the stack to being
kcalloc()ed. This also allows for the removal of the explicit zeroing
of bhs.

https://lkml.org/lkml/2018/3/7/621

Signed-off-by: Kyle Spiers <ksspi...@google.com>
---
 fs/isofs/compress.c | 14 +++---
 1 file changed, 11 insertions(+), 3 deletions(-)

diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c
index 9bb2fe35799d..39cc99aecff8 100644
--- a/fs/isofs/compress.c
+++ b/fs/isofs/compress.c
@@ -59,7 +59,7 @@ static loff_t zisofs_uncompress_block(struct inode *inode, 
loff_t block_start,
>> bufshift;
int haveblocks;
blkcnt_t blocknum;
-   struct buffer_head *bhs[needblocks + 1];
+   struct buffer_head **bhs;
int curbh, curpage;
 
if (block_size > deflateBound(1UL << zisofs_block_shift)) {
@@ -80,7 +80,9 @@ static loff_t zisofs_uncompress_block(struct inode *inode, 
loff_t block_start,
 
/* Because zlib is not thread-safe, do all the I/O at the top. */
blocknum = block_start >> bufshift;
-   memset(bhs, 0, (needblocks + 1) * sizeof(struct buffer_head *));
+   bhs = kcalloc(needblocks + 1, sizeof(*bhs), GFP_KERNEL);
+   if (!bhs)
+   return -ENOMEM;
haveblocks = isofs_get_blocks(inode, blocknum, bhs, needblocks);
ll_rw_block(REQ_OP_READ, 0, haveblocks, bhs);
 
@@ -190,6 +192,7 @@ static loff_t zisofs_uncompress_block(struct inode *inode, 
loff_t block_start,
 b_eio:
for (i = 0; i < haveblocks; i++)
brelse(bhs[i]);
+   kfree(bhs);
return stream.total_out;
 }
 
@@ -305,7 +308,7 @@ static int zisofs_readpage(struct file *file, struct page 
*page)
unsigned int zisofs_pages_per_cblock =
PAGE_SHIFT <= zisofs_block_shift ?
(1 << (zisofs_block_shift - PAGE_SHIFT)) : 0;
-   struct page *pages[max_t(unsigned, zisofs_pages_per_cblock, 1)];
+   struct page **pages;
pgoff_t index = page->index, end_index;
 
end_index = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -330,6 +333,10 @@ static int zisofs_readpage(struct file *file, struct page 
*page)
full_page = 0;
pcount = 1;
}
+   pages = kcalloc(max_t(unsigned int, zisofs_pages_per_cblock, 1),
+   sizeof(*pages), GFP_KERNEL);
+   if (!pages)
+   return -ENOMEM;
pages[full_page] = page;
 
for (i = 0; i < pcount; i++, index++) {
@@ -357,6 +364,7 @@ static int zisofs_readpage(struct file *file, struct page 
*page)
}   
 
/* At this point, err contains 0 or -EIO depending on the "critical" 
page */
+   kfree(pages);
return err;
 }
 
-- 
2.17.0.484.g0c8726318c-goog



[PATCH] isofs compress: Remove VLA usage

2018-04-04 Thread Kyle Spiers
As part of the effort to remove VLAs from the kernel[1], this changes
the allocation of the bhs and pages arrays from being on the stack to being
kcalloc()ed. This also allows for the removal of the explicit zeroing
of bhs.

https://lkml.org/lkml/2018/3/7/621

Signed-off-by: Kyle Spiers 
---
 fs/isofs/compress.c | 14 +++---
 1 file changed, 11 insertions(+), 3 deletions(-)

diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c
index 9bb2fe35799d..39cc99aecff8 100644
--- a/fs/isofs/compress.c
+++ b/fs/isofs/compress.c
@@ -59,7 +59,7 @@ static loff_t zisofs_uncompress_block(struct inode *inode, 
loff_t block_start,
>> bufshift;
int haveblocks;
blkcnt_t blocknum;
-   struct buffer_head *bhs[needblocks + 1];
+   struct buffer_head **bhs;
int curbh, curpage;
 
if (block_size > deflateBound(1UL << zisofs_block_shift)) {
@@ -80,7 +80,9 @@ static loff_t zisofs_uncompress_block(struct inode *inode, 
loff_t block_start,
 
/* Because zlib is not thread-safe, do all the I/O at the top. */
blocknum = block_start >> bufshift;
-   memset(bhs, 0, (needblocks + 1) * sizeof(struct buffer_head *));
+   bhs = kcalloc(needblocks + 1, sizeof(*bhs), GFP_KERNEL);
+   if (!bhs)
+   return -ENOMEM;
haveblocks = isofs_get_blocks(inode, blocknum, bhs, needblocks);
ll_rw_block(REQ_OP_READ, 0, haveblocks, bhs);
 
@@ -190,6 +192,7 @@ static loff_t zisofs_uncompress_block(struct inode *inode, 
loff_t block_start,
 b_eio:
for (i = 0; i < haveblocks; i++)
brelse(bhs[i]);
+   kfree(bhs);
return stream.total_out;
 }
 
@@ -305,7 +308,7 @@ static int zisofs_readpage(struct file *file, struct page 
*page)
unsigned int zisofs_pages_per_cblock =
PAGE_SHIFT <= zisofs_block_shift ?
(1 << (zisofs_block_shift - PAGE_SHIFT)) : 0;
-   struct page *pages[max_t(unsigned, zisofs_pages_per_cblock, 1)];
+   struct page **pages;
pgoff_t index = page->index, end_index;
 
end_index = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -330,6 +333,10 @@ static int zisofs_readpage(struct file *file, struct page 
*page)
full_page = 0;
pcount = 1;
}
+   pages = kcalloc(max_t(unsigned int, zisofs_pages_per_cblock, 1),
+   sizeof(*pages), GFP_KERNEL);
+   if (!pages)
+   return -ENOMEM;
pages[full_page] = page;
 
for (i = 0; i < pcount; i++, index++) {
@@ -357,6 +364,7 @@ static int zisofs_readpage(struct file *file, struct page 
*page)
}   
 
/* At this point, err contains 0 or -EIO depending on the "critical" 
page */
+   kfree(pages);
return err;
 }
 
-- 
2.17.0.484.g0c8726318c-goog



[PATCH v2] pcm_native: Remove VLA usage

2018-03-28 Thread Kyle Spiers
As part of the effort to remove VLAs from the kernel[1], this changes
the allocation of the rstamps array from being on the stack to being
kcalloc()ed. This also allows for the removal of the explicit zeroing
loop.

Signed-off-by: Kyle Spiers <k...@spiers.me>
---
 sound/core/pcm_native.c | 11 +++
 1 file changed, 7 insertions(+), 4 deletions(-)

diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 77ba50d..db5e3c5 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -323,7 +323,7 @@ static int constrain_params_by_rules(struct 
snd_pcm_substream *substream,
struct snd_pcm_hw_constraints *constrs =
>runtime->hw_constraints;
unsigned int k;
-   unsigned int rstamps[constrs->rules_num];
+   unsigned int *rstamps;
unsigned int vstamps[SNDRV_PCM_HW_PARAM_LAST_INTERVAL + 1];
unsigned int stamp;
struct snd_pcm_hw_rule *r;
@@ -339,8 +339,8 @@ static int constrain_params_by_rules(struct 
snd_pcm_substream *substream,
 * Each member of 'rstamps' array represents the sequence number of
 * recent application of corresponding rule.
 */
-   for (k = 0; k < constrs->rules_num; k++)
-   rstamps[k] = 0;
+
+   rstamps = kcalloc(constrs->rules_num, sizeof(*rstamps), GFP_KERNEL);
 
/*
 * Each member of 'vstamps' array represents the sequence number of
@@ -398,8 +398,10 @@ static int constrain_params_by_rules(struct 
snd_pcm_substream *substream,
}
 
changed = r->func(params, r);
-   if (changed < 0)
+   if (changed < 0) {
+   kfree(rstamps);
return changed;
+   }
 
/*
 * When the parameter is changed, notify it to the caller
@@ -430,6 +432,7 @@ static int constrain_params_by_rules(struct 
snd_pcm_substream *substream,
if (again)
goto retry;
 
+   kfree(rstamps);
return 0;
 }
 
-- 
2.7.4



[PATCH v2] pcm_native: Remove VLA usage

2018-03-28 Thread Kyle Spiers
As part of the effort to remove VLAs from the kernel[1], this changes
the allocation of the rstamps array from being on the stack to being
kcalloc()ed. This also allows for the removal of the explicit zeroing
loop.

Signed-off-by: Kyle Spiers 
---
 sound/core/pcm_native.c | 11 +++
 1 file changed, 7 insertions(+), 4 deletions(-)

diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 77ba50d..db5e3c5 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -323,7 +323,7 @@ static int constrain_params_by_rules(struct 
snd_pcm_substream *substream,
struct snd_pcm_hw_constraints *constrs =
>runtime->hw_constraints;
unsigned int k;
-   unsigned int rstamps[constrs->rules_num];
+   unsigned int *rstamps;
unsigned int vstamps[SNDRV_PCM_HW_PARAM_LAST_INTERVAL + 1];
unsigned int stamp;
struct snd_pcm_hw_rule *r;
@@ -339,8 +339,8 @@ static int constrain_params_by_rules(struct 
snd_pcm_substream *substream,
 * Each member of 'rstamps' array represents the sequence number of
 * recent application of corresponding rule.
 */
-   for (k = 0; k < constrs->rules_num; k++)
-   rstamps[k] = 0;
+
+   rstamps = kcalloc(constrs->rules_num, sizeof(*rstamps), GFP_KERNEL);
 
/*
 * Each member of 'vstamps' array represents the sequence number of
@@ -398,8 +398,10 @@ static int constrain_params_by_rules(struct 
snd_pcm_substream *substream,
}
 
changed = r->func(params, r);
-   if (changed < 0)
+   if (changed < 0) {
+   kfree(rstamps);
return changed;
+   }
 
/*
 * When the parameter is changed, notify it to the caller
@@ -430,6 +432,7 @@ static int constrain_params_by_rules(struct 
snd_pcm_substream *substream,
if (again)
goto retry;
 
+   kfree(rstamps);
return 0;
 }
 
-- 
2.7.4



[PATCH] pcm_native: Remove VLA usage

2018-03-28 Thread Kyle Spiers
As part of the effort to remove VLAs from the kernel[1], this changes
the allocation of the rstamps array from being on the stack to being
kcalloc()ed. This also allows for the removal of the explicit zeroing
loop.

[1] https://lkml.org/lkml/2018/3/7/621

Signed-off-by: Kyle Spiers <k...@spiers.me>
---
 sound/core/pcm_native.c | 8 +---
 1 file changed, 5 insertions(+), 3 deletions(-)

diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 77ba50d..57240b8 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -323,7 +323,7 @@ static int constrain_params_by_rules(struct 
snd_pcm_substream *substream,
struct snd_pcm_hw_constraints *constrs =
>runtime->hw_constraints;
unsigned int k;
-   unsigned int rstamps[constrs->rules_num];
+   unsigned int *rstamps;
unsigned int vstamps[SNDRV_PCM_HW_PARAM_LAST_INTERVAL + 1];
unsigned int stamp;
struct snd_pcm_hw_rule *r;
@@ -339,8 +339,8 @@ static int constrain_params_by_rules(struct 
snd_pcm_substream *substream,
 * Each member of 'rstamps' array represents the sequence number of
 * recent application of corresponding rule.
 */
-   for (k = 0; k < constrs->rules_num; k++)
-   rstamps[k] = 0;
+
+   rstamps = kcalloc(constrs->rules_num, sizeof(*rstamps), GFP_KERNEL);
 
/*
 * Each member of 'vstamps' array represents the sequence number of
@@ -399,6 +399,7 @@ static int constrain_params_by_rules(struct 
snd_pcm_substream *substream,
 
changed = r->func(params, r);
if (changed < 0)
+   kfree(rstamps);
return changed;
 
/*
@@ -430,6 +431,7 @@ static int constrain_params_by_rules(struct 
snd_pcm_substream *substream,
if (again)
goto retry;
 
+   kfree(rstamps);
return 0;
 }
 
-- 
2.7.4



[PATCH] pcm_native: Remove VLA usage

2018-03-28 Thread Kyle Spiers
As part of the effort to remove VLAs from the kernel[1], this changes
the allocation of the rstamps array from being on the stack to being
kcalloc()ed. This also allows for the removal of the explicit zeroing
loop.

[1] https://lkml.org/lkml/2018/3/7/621

Signed-off-by: Kyle Spiers 
---
 sound/core/pcm_native.c | 8 +---
 1 file changed, 5 insertions(+), 3 deletions(-)

diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 77ba50d..57240b8 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -323,7 +323,7 @@ static int constrain_params_by_rules(struct 
snd_pcm_substream *substream,
struct snd_pcm_hw_constraints *constrs =
>runtime->hw_constraints;
unsigned int k;
-   unsigned int rstamps[constrs->rules_num];
+   unsigned int *rstamps;
unsigned int vstamps[SNDRV_PCM_HW_PARAM_LAST_INTERVAL + 1];
unsigned int stamp;
struct snd_pcm_hw_rule *r;
@@ -339,8 +339,8 @@ static int constrain_params_by_rules(struct 
snd_pcm_substream *substream,
 * Each member of 'rstamps' array represents the sequence number of
 * recent application of corresponding rule.
 */
-   for (k = 0; k < constrs->rules_num; k++)
-   rstamps[k] = 0;
+
+   rstamps = kcalloc(constrs->rules_num, sizeof(*rstamps), GFP_KERNEL);
 
/*
 * Each member of 'vstamps' array represents the sequence number of
@@ -399,6 +399,7 @@ static int constrain_params_by_rules(struct 
snd_pcm_substream *substream,
 
changed = r->func(params, r);
if (changed < 0)
+   kfree(rstamps);
return changed;
 
/*
@@ -430,6 +431,7 @@ static int constrain_params_by_rules(struct 
snd_pcm_substream *substream,
if (again)
goto retry;
 
+   kfree(rstamps);
return 0;
 }
 
-- 
2.7.4



[PATCH v3] rbd: Remove VLA usage

2018-03-17 Thread Kyle Spiers
 
As part of the effort to remove VLAs from the kernel[1], this moves
the literal values into the stack array calculation instead of using a
variable for the sizing. The resulting size can be found from
sizeof(buf).

[1] https://lkml.org/lkml/2018/3/7/621

Signed-off-by: Kyle Spiers <k...@spiers.me>
---
 drivers/block/rbd.c | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 8e40da0..46893e0 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -3100,8 +3100,8 @@ static int __rbd_notify_op_lock(struct rbd_device 
*rbd_dev,
 {
struct ceph_osd_client *osdc = _dev->rbd_client->client->osdc;
struct rbd_client_id cid = rbd_get_cid(rbd_dev);
-   int buf_size = 4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN;
-   char buf[buf_size];
+   char buf[4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN];
+   int buf_size = sizeof(buf);
void *p = buf;
 
dout("%s rbd_dev %p notify_op %d\n", __func__, rbd_dev, notify_op);
@@ -3619,8 +3619,8 @@ static void __rbd_acknowledge_notify(struct rbd_device 
*rbd_dev,
 u64 notify_id, u64 cookie, s32 *result)
 {
struct ceph_osd_client *osdc = _dev->rbd_client->client->osdc;
-   int buf_size = 4 + CEPH_ENCODING_START_BLK_LEN;
-   char buf[buf_size];
+   char buf[4 + CEPH_ENCODING_START_BLK_LEN];
+   int buf_size = sizeof(buf);
int ret;
 
if (result) {
-- 
2.7.4



[PATCH v3] rbd: Remove VLA usage

2018-03-17 Thread Kyle Spiers
 
As part of the effort to remove VLAs from the kernel[1], this moves
the literal values into the stack array calculation instead of using a
variable for the sizing. The resulting size can be found from
sizeof(buf).

[1] https://lkml.org/lkml/2018/3/7/621

Signed-off-by: Kyle Spiers 
---
 drivers/block/rbd.c | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 8e40da0..46893e0 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -3100,8 +3100,8 @@ static int __rbd_notify_op_lock(struct rbd_device 
*rbd_dev,
 {
struct ceph_osd_client *osdc = _dev->rbd_client->client->osdc;
struct rbd_client_id cid = rbd_get_cid(rbd_dev);
-   int buf_size = 4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN;
-   char buf[buf_size];
+   char buf[4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN];
+   int buf_size = sizeof(buf);
void *p = buf;
 
dout("%s rbd_dev %p notify_op %d\n", __func__, rbd_dev, notify_op);
@@ -3619,8 +3619,8 @@ static void __rbd_acknowledge_notify(struct rbd_device 
*rbd_dev,
 u64 notify_id, u64 cookie, s32 *result)
 {
struct ceph_osd_client *osdc = _dev->rbd_client->client->osdc;
-   int buf_size = 4 + CEPH_ENCODING_START_BLK_LEN;
-   char buf[buf_size];
+   char buf[4 + CEPH_ENCODING_START_BLK_LEN];
+   int buf_size = sizeof(buf);
int ret;
 
if (result) {
-- 
2.7.4



[PATCH v2] rbd: Remove VLA usage

2018-03-15 Thread Kyle Spiers
As part of the effort to remove VLAs from the kernel[1], this moves
the literal values into the stack array calculation instead of using a
variable for the sizing. The resulting size can be found from
sizeof(buf).

[1] https://lkml.org/lkml/2018/3/7/621

Signed-off-by: Kyle Spiers <k...@spiers.me>
---
 drivers/block/rbd.c | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 8e40da0..0e94e1f 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -3100,8 +3100,8 @@ static int __rbd_notify_op_lock(struct rbd_device 
*rbd_dev,
 {
struct ceph_osd_client *osdc = _dev->rbd_client->client->osdc;
struct rbd_client_id cid = rbd_get_cid(rbd_dev);
-   int buf_size = 4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN;
-   char buf[buf_size];
+   char buf[4 + 4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN];
+   int buf_size = sizeof(buf)
void *p = buf;
 
dout("%s rbd_dev %p notify_op %d\n", __func__, rbd_dev, notify_op);
@@ -3619,8 +3619,8 @@ static void __rbd_acknowledge_notify(struct rbd_device 
*rbd_dev,
 u64 notify_id, u64 cookie, s32 *result)
 {
struct ceph_osd_client *osdc = _dev->rbd_client->client->osdc;
-   int buf_size = 4 + CEPH_ENCODING_START_BLK_LEN;
-   char buf[buf_size];
+   char buf[4 + CEPH_ENCODING_START_BLK_LEN];
+   int buf_size = sizeof(buf);
int ret;
 
if (result) {
-- 
2.7.4




[PATCH v2] rbd: Remove VLA usage

2018-03-15 Thread Kyle Spiers
As part of the effort to remove VLAs from the kernel[1], this moves
the literal values into the stack array calculation instead of using a
variable for the sizing. The resulting size can be found from
sizeof(buf).

[1] https://lkml.org/lkml/2018/3/7/621

Signed-off-by: Kyle Spiers 
---
 drivers/block/rbd.c | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 8e40da0..0e94e1f 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -3100,8 +3100,8 @@ static int __rbd_notify_op_lock(struct rbd_device 
*rbd_dev,
 {
struct ceph_osd_client *osdc = _dev->rbd_client->client->osdc;
struct rbd_client_id cid = rbd_get_cid(rbd_dev);
-   int buf_size = 4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN;
-   char buf[buf_size];
+   char buf[4 + 4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN];
+   int buf_size = sizeof(buf)
void *p = buf;
 
dout("%s rbd_dev %p notify_op %d\n", __func__, rbd_dev, notify_op);
@@ -3619,8 +3619,8 @@ static void __rbd_acknowledge_notify(struct rbd_device 
*rbd_dev,
 u64 notify_id, u64 cookie, s32 *result)
 {
struct ceph_osd_client *osdc = _dev->rbd_client->client->osdc;
-   int buf_size = 4 + CEPH_ENCODING_START_BLK_LEN;
-   char buf[buf_size];
+   char buf[4 + CEPH_ENCODING_START_BLK_LEN];
+   int buf_size = sizeof(buf);
int ret;
 
if (result) {
-- 
2.7.4




[PATCH] rbd: Remove VLA usage

2018-03-09 Thread Kyle Spiers
>From 4198ebe2e8058ff676d8e2f993d8806d6ca29c11 Mon Sep 17 00:00:00 2001
From: Kyle Spiers <k...@spiers.me>
Date: Fri, 9 Mar 2018 12:34:15 -0800
Subject: [PATCH] rbd: Remove VLA usage

As part of the effort to remove VLAs from the kernel[1], this moves
the literal values into the stack array calculation instead of using a
variable for the sizing. The resulting size can be found from
sizeof(buf).

[1] https://lkml.org/lkml/2018/3/7/621

Signed-off-by: Kyle Spiers <k...@spiers.me>

---
 drivers/block/rbd.c | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 8e40da0..0e94e1f 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -3100,8 +3100,8 @@ static int __rbd_notify_op_lock(struct rbd_device
*rbd_dev,
 {
 struct ceph_osd_client *osdc = _dev->rbd_client->client->osdc;
 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
-    int buf_size = 4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN;
-    char buf[buf_size];
+    char buf[4 + 4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN];
+    int buf_size = sizeof(buf);
 void *p = buf;
 
 dout("%s rbd_dev %p notify_op %d\n", __func__, rbd_dev, notify_op);
@@ -3619,8 +3619,8 @@ static void __rbd_acknowledge_notify(struct
rbd_device *rbd_dev,
              u64 notify_id, u64 cookie, s32 *result)
 {
 struct ceph_osd_client *osdc = _dev->rbd_client->client->osdc;
-    int buf_size = 4 + CEPH_ENCODING_START_BLK_LEN;
-    char buf[buf_size];
+    char buf[4 + CEPH_ENCODING_START_BLK_LEN];
+    int buf_size = sizeof(buf);
 int ret;
 
 if (result) {
-- 2.7.4



[PATCH] rbd: Remove VLA usage

2018-03-09 Thread Kyle Spiers
>From 4198ebe2e8058ff676d8e2f993d8806d6ca29c11 Mon Sep 17 00:00:00 2001
From: Kyle Spiers 
Date: Fri, 9 Mar 2018 12:34:15 -0800
Subject: [PATCH] rbd: Remove VLA usage

As part of the effort to remove VLAs from the kernel[1], this moves
the literal values into the stack array calculation instead of using a
variable for the sizing. The resulting size can be found from
sizeof(buf).

[1] https://lkml.org/lkml/2018/3/7/621

Signed-off-by: Kyle Spiers 

---
 drivers/block/rbd.c | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 8e40da0..0e94e1f 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -3100,8 +3100,8 @@ static int __rbd_notify_op_lock(struct rbd_device
*rbd_dev,
 {
 struct ceph_osd_client *osdc = _dev->rbd_client->client->osdc;
 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
-    int buf_size = 4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN;
-    char buf[buf_size];
+    char buf[4 + 4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN];
+    int buf_size = sizeof(buf);
 void *p = buf;
 
 dout("%s rbd_dev %p notify_op %d\n", __func__, rbd_dev, notify_op);
@@ -3619,8 +3619,8 @@ static void __rbd_acknowledge_notify(struct
rbd_device *rbd_dev,
              u64 notify_id, u64 cookie, s32 *result)
 {
 struct ceph_osd_client *osdc = _dev->rbd_client->client->osdc;
-    int buf_size = 4 + CEPH_ENCODING_START_BLK_LEN;
-    char buf[buf_size];
+    char buf[4 + CEPH_ENCODING_START_BLK_LEN];
+    int buf_size = sizeof(buf);
 int ret;
 
 if (result) {
-- 2.7.4