3.16.7-ckt23 -stable review patch.  If anyone has any objections, please let me 
know.

---8<------------------------------------------------------------

From: NeilBrown <ne...@suse.com>

commit b02bab6b0f928d49dbfb03e1e4e9dd43647623d7 upstream.

These async_XX functions are called from md/raid5 in an atomic
section, between get_cpu() and put_cpu(), so they must not sleep.
So use GFP_NOWAIT rather than GFP_IO.

Dan Williams writes: Longer term async_tx needs to be merged into md
directly as we can allocate this unmap data statically per-stripe
rather than per request.

Fixed: 7476bd79fc01 ("async_pq: convert to dmaengine_unmap_data")
Reported-and-tested-by: Stanislav Samsonov <sl...@annapurnalabs.com>
Acked-by: Dan Williams <dan.j.willi...@intel.com>
Signed-off-by: NeilBrown <ne...@suse.com>
Signed-off-by: Vinod Koul <vinod.k...@intel.com>
Signed-off-by: Luis Henriques <luis.henriq...@canonical.com>
---
 crypto/async_tx/async_memcpy.c      | 2 +-
 crypto/async_tx/async_pq.c          | 4 ++--
 crypto/async_tx/async_raid6_recov.c | 4 ++--
 crypto/async_tx/async_xor.c         | 4 ++--
 4 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c
index f8c0b8dbeb75..88bc8e6b2a54 100644
--- a/crypto/async_tx/async_memcpy.c
+++ b/crypto/async_tx/async_memcpy.c
@@ -53,7 +53,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned 
int dest_offset,
        struct dmaengine_unmap_data *unmap = NULL;
 
        if (device)
-               unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOIO);
+               unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
 
        if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) 
{
                unsigned long dma_prep_flags = 0;
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index d05327caf69d..7eb264e65267 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -176,7 +176,7 @@ async_gen_syndrome(struct page **blocks, unsigned int 
offset, int disks,
        BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks)));
 
        if (device)
-               unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO);
+               unmap = dmaengine_get_unmap_data(device->dev, disks, 
GFP_NOWAIT);
 
        if (unmap &&
            (src_cnt <= dma_maxpq(device, 0) ||
@@ -294,7 +294,7 @@ async_syndrome_val(struct page **blocks, unsigned int 
offset, int disks,
        BUG_ON(disks < 4);
 
        if (device)
-               unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO);
+               unmap = dmaengine_get_unmap_data(device->dev, disks, 
GFP_NOWAIT);
 
        if (unmap && disks <= dma_maxpq(device, 0) &&
            is_dma_pq_aligned(device, offset, 0, len)) {
diff --git a/crypto/async_tx/async_raid6_recov.c 
b/crypto/async_tx/async_raid6_recov.c
index 934a84981495..8fab6275ea1f 100644
--- a/crypto/async_tx/async_raid6_recov.c
+++ b/crypto/async_tx/async_raid6_recov.c
@@ -41,7 +41,7 @@ async_sum_product(struct page *dest, struct page **srcs, 
unsigned char *coef,
        u8 *a, *b, *c;
 
        if (dma)
-               unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO);
+               unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT);
 
        if (unmap) {
                struct device *dev = dma->dev;
@@ -105,7 +105,7 @@ async_mult(struct page *dest, struct page *src, u8 coef, 
size_t len,
        u8 *d, *s;
 
        if (dma)
-               unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO);
+               unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT);
 
        if (unmap) {
                dma_addr_t dma_dest[2];
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index e1bce26cd4f9..da75777f2b3f 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -182,7 +182,7 @@ async_xor(struct page *dest, struct page **src_list, 
unsigned int offset,
        BUG_ON(src_cnt <= 1);
 
        if (device)
-               unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, 
GFP_NOIO);
+               unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, 
GFP_NOWAIT);
 
        if (unmap && is_dma_xor_aligned(device, offset, 0, len)) {
                struct dma_async_tx_descriptor *tx;
@@ -278,7 +278,7 @@ async_xor_val(struct page *dest, struct page **src_list, 
unsigned int offset,
        BUG_ON(src_cnt <= 1);
 
        if (device)
-               unmap = dmaengine_get_unmap_data(device->dev, src_cnt, 
GFP_NOIO);
+               unmap = dmaengine_get_unmap_data(device->dev, src_cnt, 
GFP_NOWAIT);
 
        if (unmap && src_cnt <= device->max_xor &&
            is_dma_xor_aligned(device, offset, 0, len)) {

Reply via email to