This patch fix a double empty line issue reported by checkpatch.
While at it, since now the maximum line length is now 100, reorder some
wrapped line.

Signed-off-by: Corentin Labbe <cla...@baylibre.com>
---
 .../allwinner/sun8i-ce/sun8i-ce-cipher.c      | 34 ++++++-------------
 .../crypto/allwinner/sun8i-ce/sun8i-ce-core.c |  9 ++---
 2 files changed, 14 insertions(+), 29 deletions(-)

diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c 
b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
index 2dcf508b0f18..9dae2be26e48 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
@@ -164,12 +164,10 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine 
*engine, void *async_req
                                goto theend_key;
                        }
                        offset = areq->cryptlen - ivsize;
-                       scatterwalk_map_and_copy(rctx->backup_iv, areq->src,
-                                                offset, ivsize, 0);
+                       scatterwalk_map_and_copy(rctx->backup_iv, areq->src, 
offset, ivsize, 0);
                }
                memcpy(rctx->bounce_iv, areq->iv, ivsize);
-               addr_iv = dma_map_single(ce->dev, rctx->bounce_iv, rctx->ivlen,
-                                        DMA_TO_DEVICE);
+               addr_iv = dma_map_single(ce->dev, rctx->bounce_iv, rctx->ivlen, 
DMA_TO_DEVICE);
                cet->t_iv = cpu_to_le32(addr_iv);
                if (dma_mapping_error(ce->dev, addr_iv)) {
                        dev_err(ce->dev, "Cannot DMA MAP IV\n");
@@ -179,8 +177,7 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine 
*engine, void *async_req
        }
 
        if (areq->src == areq->dst) {
-               nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src),
-                                   DMA_BIDIRECTIONAL);
+               nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src), 
DMA_BIDIRECTIONAL);
                if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
                        dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
                        err = -EINVAL;
@@ -188,15 +185,13 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine 
*engine, void *async_req
                }
                nr_sgd = nr_sgs;
        } else {
-               nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src),
-                                   DMA_TO_DEVICE);
+               nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src), 
DMA_TO_DEVICE);
                if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
                        dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
                        err = -EINVAL;
                        goto theend_iv;
                }
-               nr_sgd = dma_map_sg(ce->dev, areq->dst, sg_nents(areq->dst),
-                                   DMA_FROM_DEVICE);
+               nr_sgd = dma_map_sg(ce->dev, areq->dst, sg_nents(areq->dst), 
DMA_FROM_DEVICE);
                if (nr_sgd <= 0 || nr_sgd > MAX_SG) {
                        dev_err(ce->dev, "Invalid sg number %d\n", nr_sgd);
                        err = -EINVAL;
@@ -251,15 +246,13 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine 
*engine, void *async_req
 theend_iv:
        if (areq->iv && ivsize > 0) {
                if (addr_iv)
-                       dma_unmap_single(ce->dev, addr_iv, rctx->ivlen,
-                                        DMA_TO_DEVICE);
+                       dma_unmap_single(ce->dev, addr_iv, rctx->ivlen, 
DMA_TO_DEVICE);
                offset = areq->cryptlen - ivsize;
                if (rctx->op_dir & CE_DECRYPTION) {
                        memcpy(areq->iv, rctx->backup_iv, ivsize);
                        kfree_sensitive(rctx->backup_iv);
                } else {
-                       scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
-                                                ivsize, 0);
+                       scatterwalk_map_and_copy(areq->iv, areq->dst, offset, 
ivsize, 0);
                }
                kfree(rctx->bounce_iv);
        }
@@ -315,15 +308,13 @@ static int sun8i_ce_cipher_unprepare(struct crypto_engine 
*engine, void *async_r
 
        if (areq->iv && ivsize > 0) {
                if (cet->t_iv)
-                       dma_unmap_single(ce->dev, cet->t_iv, rctx->ivlen,
-                                        DMA_TO_DEVICE);
+                       dma_unmap_single(ce->dev, cet->t_iv, rctx->ivlen, 
DMA_TO_DEVICE);
                offset = areq->cryptlen - ivsize;
                if (rctx->op_dir & CE_DECRYPTION) {
                        memcpy(areq->iv, rctx->backup_iv, ivsize);
                        kfree_sensitive(rctx->backup_iv);
                } else {
-                       scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
-                                                ivsize, 0);
+                       scatterwalk_map_and_copy(areq->iv, areq->dst, offset, 
ivsize, 0);
                }
                kfree(rctx->bounce_iv);
        }
@@ -395,7 +386,6 @@ int sun8i_ce_cipher_init(struct crypto_tfm *tfm)
        sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx) +
                         crypto_skcipher_reqsize(op->fallback_tfm);
 
-
        dev_info(op->ce->dev, "Fallback for %s is %s\n",
                 crypto_tfm_alg_driver_name(&sktfm->base),
                 
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)));
@@ -427,8 +417,7 @@ void sun8i_ce_cipher_exit(struct crypto_tfm *tfm)
        pm_runtime_put_sync_suspend(op->ce->dev);
 }
 
-int sun8i_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
-                       unsigned int keylen)
+int sun8i_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned 
int keylen)
 {
        struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
        struct sun8i_ce_dev *ce = op->ce;
@@ -459,8 +448,7 @@ int sun8i_ce_aes_setkey(struct crypto_skcipher *tfm, const 
u8 *key,
        return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
 }
 
-int sun8i_ce_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
-                        unsigned int keylen)
+int sun8i_ce_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned 
int keylen)
 {
        struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
        int err;
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c 
b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
index cf320898a4b1..1dbbd40ad576 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
@@ -596,8 +596,7 @@ static int sun8i_ce_dbgfs_read(struct seq_file *seq, void 
*v)
                }
        }
 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_TRNG
-       seq_printf(seq, "HWRNG %lu %lu\n",
-                  ce->hwrng_stat_req, ce->hwrng_stat_bytes);
+       seq_printf(seq, "HWRNG %lu %lu\n", ce->hwrng_stat_req, 
ce->hwrng_stat_bytes);
 #endif
        return 0;
 }
@@ -635,8 +634,7 @@ static int sun8i_ce_allocate_chanlist(struct sun8i_ce_dev 
*ce)
 {
        int i, err;
 
-       ce->chanlist = devm_kcalloc(ce->dev, MAXFLOW,
-                                   sizeof(struct sun8i_ce_flow), GFP_KERNEL);
+       ce->chanlist = devm_kcalloc(ce->dev, MAXFLOW, sizeof(struct 
sun8i_ce_flow), GFP_KERNEL);
        if (!ce->chanlist)
                return -ENOMEM;
 
@@ -935,8 +933,7 @@ static int sun8i_ce_probe(struct platform_device *pdev)
        if (err)
                goto error_pm;
 
-       err = devm_request_irq(&pdev->dev, irq, ce_irq_handler, 0,
-                              "sun8i-ce-ns", ce);
+       err = devm_request_irq(&pdev->dev, irq, ce_irq_handler, 0, 
"sun8i-ce-ns", ce);
        if (err) {
                dev_err(ce->dev, "Cannot request CryptoEngine Non-secure IRQ 
(err=%d)\n", err);
                goto error_irq;
-- 
2.26.2

Reply via email to