[PATCH] arm64/lib: add optimized implementation of sha_transform
This implementation keeps the 64 bytes of workspace in registers rather than on the stack, eliminating most of the loads and stores, and reducing the instruction count by about 25%. Signed-off-by: Ard Biesheuvel ard.biesheu...@linaro.org --- Hello all, No performance numbers I am allowed to share, unfortunately, so if anyone else (with access to actual, representative hardware) would care to have a go, I would be very grateful. This can be done by building the tcrypt.ko module (CONFIG_CRYPTO_TEST=m), and inserting the module using 'mode=303' as a parameter (note that the insmod always fails, but produces its test output to the kernel log). Also note that the sha_transform() function will be part of the kernel proper, so just rebuilding the sha1_generic module is not sufficient. Cheers, arch/arm64/kernel/arm64ksyms.c | 3 + arch/arm64/lib/Makefile| 2 +- arch/arm64/lib/sha1.S | 256 + 3 files changed, 260 insertions(+), 1 deletion(-) create mode 100644 arch/arm64/lib/sha1.S diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c index 338b568cd8ae..1f5693fb5d93 100644 --- a/arch/arm64/kernel/arm64ksyms.c +++ b/arch/arm64/kernel/arm64ksyms.c @@ -56,3 +56,6 @@ EXPORT_SYMBOL(clear_bit); EXPORT_SYMBOL(test_and_clear_bit); EXPORT_SYMBOL(change_bit); EXPORT_SYMBOL(test_and_change_bit); + + /* SHA-1 implementation under lib/ */ +EXPORT_SYMBOL(sha_transform); diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile index 328ce1a99daa..ea093ebb9a9a 100644 --- a/arch/arm64/lib/Makefile +++ b/arch/arm64/lib/Makefile @@ -1,4 +1,4 @@ lib-y := bitops.o clear_user.o delay.o copy_from_user.o \ copy_to_user.o copy_in_user.o copy_page.o\ clear_page.o memchr.o memcpy.o memmove.o memset.o\ - strchr.o strrchr.o + strchr.o strrchr.o sha1.o diff --git a/arch/arm64/lib/sha1.S b/arch/arm64/lib/sha1.S new file mode 100644 index ..877b8d70e992 --- /dev/null +++ b/arch/arm64/lib/sha1.S @@ -0,0 +1,256 @@ +/* + * linux/arch/arm64/lib/sha1.S + * + * Copyright (C) 2014 Linaro Ltd ard.biesheu...@linaro.org + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include linux/linkage.h +#include asm/assembler.h + + .text + + k .reqw1 + + res .reqw2 + xres.reqx2 + + wA .reqw3 + wB .reqw4 + wC .reqw5 + wD .reqw6 + wE .reqw7 + + tmp .reqw16 + xtmp.reqx16 + + .macro sha1_choose, out, b, c, d + eor \out, \c, \d + and \out, \out, \b + eor \out, \out, \d + .endm + + .macro sha1_parity, out, b, c, d + eor \out, \b, \c + eor \out, \out, \d + .endm + + .macro sha1_majority, out, b, c, d + eor tmp, \b, \c + and \out, \b, \c + and tmp, tmp, \d + add \out, \out, tmp + .endm + + .macro mix_state, st0, st1, st4, st6, st7 + extrxtmp, \st7, \st6, #32 + eor \st0, \st0, \st1 + eor xtmp, xtmp, \st4 + eor xtmp, xtmp, \st0 + ror res, tmp, #(32 - 1) + lsr xtmp, xtmp, #32 + ror tmp, tmp, #(32 - 1) + orr \st0, xres, xtmp, lsl #32 + .endm + + .macro sha1_round, func, r, h, a, b, c, d, e + sha1_\func res, \b, \c, \d + add res, res, \e + ror \e, \a, #(32 - 5) + .ifc\h, h + add xres, xres, x\r, lsr #32 + .else + add res, res, w\r + .endif + add \e, \e, k + ror \b, \b, #2 + add \e, \e, res + .endm + + /* +* void sha_transform(__u32 *digest, const char *data, __u32 *array) +*/ +ENTRY(sha_transform) + /* load input into state array */ + ldp x8, x9, [x1] + ldp x10, x11, [x1, #16] + ldp x12, x13, [x1, #32] + ldp x14, x15, [x1, #48] + + /* load digest input */ + ldr wA, [x0] + ldp wB, wC, [x0, #4] + ldp wD, wE, [x0, #12] + + /* endian-reverse the input on LE builds */ +CPU_LE( rev32 x8, x8 ) +CPU_LE( rev32 x9, x9 ) +CPU_LE( rev32 x10, x10) +CPU_LE( rev32 x11, x11) +CPU_LE( rev32
[PATCH cryptodev 4/4] crypto: caam - add support for aead null encryption
Add support for the following combinations: -encryption: null -authentication: md5, sha* (1, 224, 256, 384, 512) Signed-off-by: Tudor Ambarus tudor.amba...@freescale.com Signed-off-by: Horia Geanta horia.gea...@freescale.com --- drivers/crypto/caam/caamalg.c | 327 +- drivers/crypto/caam/compat.h | 1 + drivers/crypto/caam/desc_constr.h | 27 ++-- 3 files changed, 342 insertions(+), 13 deletions(-) diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 5016e63b6c25..a9ba8b159636 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -70,6 +70,10 @@ #define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 18 * CAAM_CMD_SZ) #define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ) +#define DESC_AEAD_NULL_BASE(3 * CAAM_CMD_SZ) +#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 14 * CAAM_CMD_SZ) +#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ) + #define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ) #define DESC_ABLKCIPHER_ENC_LEN(DESC_ABLKCIPHER_BASE + \ 20 * CAAM_CMD_SZ) @@ -109,9 +113,9 @@ static inline void append_dec_op1(u32 *desc, u32 type) */ static inline void aead_append_src_dst(u32 *desc, u32 msg_type) { + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH); - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); } /* @@ -200,6 +204,196 @@ static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx, set_jump_tgt_here(desc, key_jump_cmd); } +static int aead_null_set_sh_desc(struct crypto_aead *aead) +{ + struct aead_tfm *tfm = aead-base.crt_aead; + struct caam_ctx *ctx = crypto_aead_ctx(aead); + struct device *jrdev = ctx-jrdev; + bool keys_fit_inline = false; + u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd; + u32 *desc; + + /* +* Job Descriptor and Shared Descriptors +* must all fit into the 64-word Descriptor h/w Buffer +*/ + if (DESC_AEAD_NULL_ENC_LEN + DESC_JOB_IO_LEN + + ctx-split_key_pad_len = CAAM_DESC_BYTES_MAX) + keys_fit_inline = true; + + /* aead_encrypt shared descriptor */ + desc = ctx-sh_desc_enc; + + init_sh_desc(desc, HDR_SHARE_SERIAL); + + /* Skip if already shared */ + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | + JUMP_COND_SHRD); + if (keys_fit_inline) + append_key_as_imm(desc, ctx-key, ctx-split_key_pad_len, + ctx-split_key_len, CLASS_2 | + KEY_DEST_MDHA_SPLIT | KEY_ENC); + else + append_key(desc, ctx-key_dma, ctx-split_key_len, CLASS_2 | + KEY_DEST_MDHA_SPLIT | KEY_ENC); + set_jump_tgt_here(desc, key_jump_cmd); + + /* cryptlen = seqoutlen - authsize */ + append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx-authsize); + + /* +* NULL encryption; IV is zero +* assoclen = (assoclen + cryptlen) - cryptlen +*/ + append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ); + + /* read assoc before reading payload */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | +KEY_VLF); + + /* Prepare to read and write cryptlen bytes */ + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); + + /* +* MOVE_LEN opcode is not available in all SEC HW revisions, +* thus need to do some magic, i.e. self-patch the descriptor +* buffer. +*/ + read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | + MOVE_DEST_MATH3 | + (0x6 MOVE_LEN_SHIFT)); + write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | +MOVE_DEST_DESCBUF | +MOVE_WAITCOMP | +(0x8 MOVE_LEN_SHIFT)); + + /* Class 2 operation */ + append_operation(desc, ctx-class2_alg_type | +OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); + + /* Read and write cryptlen bytes */ + aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1); + + set_move_tgt_here(desc, read_move_cmd); + set_move_tgt_here(desc, write_move_cmd); + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); + append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO | + MOVE_AUX_LS); + + /* Write ICV */ +
[PATCH cryptodev 2/4] crypto: export NULL algorithms defines
These defines might be needed by crypto drivers. Signed-off-by: Horia Geanta horia.gea...@freescale.com --- crypto/crypto_null.c | 6 +- include/crypto/null.h | 11 +++ 2 files changed, 12 insertions(+), 5 deletions(-) create mode 100644 include/crypto/null.h diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c index fee7265cd35d..1dc54bb95a87 100644 --- a/crypto/crypto_null.c +++ b/crypto/crypto_null.c @@ -17,6 +17,7 @@ * */ +#include crypto/null.h #include crypto/internal/hash.h #include crypto/internal/skcipher.h #include linux/init.h @@ -24,11 +25,6 @@ #include linux/mm.h #include linux/string.h -#define NULL_KEY_SIZE 0 -#define NULL_BLOCK_SIZE1 -#define NULL_DIGEST_SIZE 0 -#define NULL_IV_SIZE 0 - static int null_compress(struct crypto_tfm *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen) { diff --git a/include/crypto/null.h b/include/crypto/null.h new file mode 100644 index ..b7c864cc70df --- /dev/null +++ b/include/crypto/null.h @@ -0,0 +1,11 @@ +/* Values for NULL algorithms */ + +#ifndef _CRYPTO_NULL_H +#define _CRYPTO_NULL_H + +#define NULL_KEY_SIZE 0 +#define NULL_BLOCK_SIZE1 +#define NULL_DIGEST_SIZE 0 +#define NULL_IV_SIZE 0 + +#endif -- 1.8.3.1 -- To unsubscribe from this list: send the line unsubscribe linux-crypto in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
[PATCH cryptodev 1/4] crypto: caam - remove error propagation handling
Commit 61bb86bba169507a5f223b94b9176c32c84b4721 (crypto: caam - set descriptor sharing type to SERIAL) changed the descriptor sharing mode from SHARE_WAIT to SHARE_SERIAL. All descriptor commands that handle the ok to share and error propagation settings should also go away, since they have no meaning for SHARE_SERIAL. Signed-off-by: Horia Geanta horia.gea...@freescale.com --- drivers/crypto/caam/caamalg.c | 54 ++- 1 file changed, 7 insertions(+), 47 deletions(-) diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index b71f2fd749df..5016e63b6c25 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -66,8 +66,8 @@ /* length of descriptors text */ #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ) -#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ) -#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 21 * CAAM_CMD_SZ) +#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ) +#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 18 * CAAM_CMD_SZ) #define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ) #define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ) @@ -104,19 +104,6 @@ static inline void append_dec_op1(u32 *desc, u32 type) } /* - * Wait for completion of class 1 key loading before allowing - * error propagation - */ -static inline void append_dec_shr_done(u32 *desc) -{ - u32 *jump_cmd; - - jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TEST_ALL); - set_jump_tgt_here(desc, jump_cmd); - append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); -} - -/* * For aead functions, read payload and write payload, * both of which are specified in req-src and req-dst */ @@ -211,9 +198,6 @@ static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx, append_key_aead(desc, ctx, keys_fit_inline); set_jump_tgt_here(desc, key_jump_cmd); - - /* Propagate errors from shared to job descriptor */ - append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); } static int aead_set_sh_desc(struct crypto_aead *aead) @@ -222,7 +206,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead) struct caam_ctx *ctx = crypto_aead_ctx(aead); struct device *jrdev = ctx-jrdev; bool keys_fit_inline = false; - u32 *key_jump_cmd, *jump_cmd; u32 geniv, moveiv; u32 *desc; @@ -253,7 +236,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead) /* assoclen + cryptlen = seqinlen - ivsize */ append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm-ivsize); - /* assoclen + cryptlen = (assoclen + cryptlen) - cryptlen */ + /* assoclen = (assoclen + cryptlen) - cryptlen */ append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ); /* read assoc before reading payload */ @@ -296,28 +279,16 @@ static int aead_set_sh_desc(struct crypto_aead *aead) CAAM_DESC_BYTES_MAX) keys_fit_inline = true; - desc = ctx-sh_desc_dec; - /* aead_decrypt shared descriptor */ - init_sh_desc(desc, HDR_SHARE_SERIAL); - - /* Skip if already shared */ - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | - JUMP_COND_SHRD); - - append_key_aead(desc, ctx, keys_fit_inline); + desc = ctx-sh_desc_dec; - /* Only propagate error immediately if shared */ - jump_cmd = append_jump(desc, JUMP_TEST_ALL); - set_jump_tgt_here(desc, key_jump_cmd); - append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); - set_jump_tgt_here(desc, jump_cmd); + init_sh_desc_key_aead(desc, ctx, keys_fit_inline); /* Class 2 operation */ append_operation(desc, ctx-class2_alg_type | OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); - /* assoclen + cryptlen = seqinlen - ivsize */ + /* assoclen + cryptlen = seqinlen - ivsize - authsize */ append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, ctx-authsize + tfm-ivsize) /* assoclen = (assoclen + cryptlen) - cryptlen */ @@ -340,7 +311,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead) /* Load ICV */ append_seq_fifo_load(desc, ctx-authsize, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); - append_dec_shr_done(desc); ctx-sh_desc_dec_dma = dma_map_single(jrdev, desc, desc_bytes(desc), @@ -532,7 +502,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, struct ablkcipher_tfm *tfm = ablkcipher-base.crt_ablkcipher; struct device *jrdev = ctx-jrdev; int ret = 0; - u32 *key_jump_cmd, *jump_cmd; + u32 *key_jump_cmd; u32 *desc; #ifdef DEBUG @@ -563,9 +533,6 @@ static int
[PATCH cryptodev 3/4] crypto: testmgr - add aead null encryption test vectors
Add test vectors for aead with null encryption and md5, respectively sha1 authentication. Input data is taken from test vectors listed in RFC2410. Signed-off-by: Horia Geanta horia.gea...@freescale.com --- crypto/tcrypt.c | 8 +++ crypto/testmgr.c | 32 ++ crypto/testmgr.h | 180 +++ 3 files changed, 220 insertions(+) diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 0d9003ae8c61..870be7b4dc05 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -1511,6 +1511,14 @@ static int do_test(int m) ret += tcrypt_test(authenc(hmac(sha1),cbc(aes))); break; + case 156: + ret += tcrypt_test(authenc(hmac(md5),ecb(cipher_null))); + break; + + case 157: + ret += tcrypt_test(authenc(hmac(sha1),ecb(cipher_null))); + break; + case 200: test_cipher_speed(ecb(aes), ENCRYPT, sec, NULL, 0, speed_template_16_24_32); diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 77955507f6f1..dc3cf3535ef0 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -1809,6 +1809,22 @@ static const struct alg_test_desc alg_test_descs[] = { } } }, { + .alg = authenc(hmac(md5),ecb(cipher_null)), + .test = alg_test_aead, + .fips_allowed = 1, + .suite = { + .aead = { + .enc = { + .vecs = hmac_md5_ecb_cipher_null_enc_tv_template, + .count = HMAC_MD5_ECB_CIPHER_NULL_ENC_TEST_VECTORS + }, + .dec = { + .vecs = hmac_md5_ecb_cipher_null_dec_tv_template, + .count = HMAC_MD5_ECB_CIPHER_NULL_DEC_TEST_VECTORS + } + } + } + }, { .alg = authenc(hmac(sha1),cbc(aes)), .test = alg_test_aead, .fips_allowed = 1, @@ -1821,6 +1837,22 @@ static const struct alg_test_desc alg_test_descs[] = { } } }, { + .alg = authenc(hmac(sha1),ecb(cipher_null)), + .test = alg_test_aead, + .fips_allowed = 1, + .suite = { + .aead = { + .enc = { + .vecs = hmac_sha1_ecb_cipher_null_enc_tv_template, + .count = HMAC_SHA1_ECB_CIPHER_NULL_ENC_TEST_VECTORS + }, + .dec = { + .vecs = hmac_sha1_ecb_cipher_null_dec_tv_template, + .count = HMAC_SHA1_ECB_CIPHER_NULL_DEC_TEST_VECTORS + } + } + } + }, { .alg = authenc(hmac(sha256),cbc(aes)), .test = alg_test_aead, .fips_allowed = 1, diff --git a/crypto/testmgr.h b/crypto/testmgr.h index 7d44aa3d6b44..3db83dbba1d9 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -12821,6 +12821,10 @@ static struct cipher_testvec cast6_xts_dec_tv_template[] = { #define AES_DEC_TEST_VECTORS 4 #define AES_CBC_ENC_TEST_VECTORS 5 #define AES_CBC_DEC_TEST_VECTORS 5 +#define HMAC_MD5_ECB_CIPHER_NULL_ENC_TEST_VECTORS 2 +#define HMAC_MD5_ECB_CIPHER_NULL_DEC_TEST_VECTORS 2 +#define HMAC_SHA1_ECB_CIPHER_NULL_ENC_TEST_VECTORS 2 +#define HMAC_SHA1_ECB_CIPHER_NULL_DEC_TEST_VECTORS 2 #define HMAC_SHA1_AES_CBC_ENC_TEST_VECTORS 7 #define HMAC_SHA256_AES_CBC_ENC_TEST_VECTORS 7 #define HMAC_SHA512_AES_CBC_ENC_TEST_VECTORS 7 @@ -13627,6 +13631,90 @@ static struct cipher_testvec aes_cbc_dec_tv_template[] = { }, }; +static struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = { + { /* Input data from RFC 2410 Case 1 */ +#ifdef __LITTLE_ENDIAN + .key= \x08\x00/* rta length */ + \x01\x00/* rta type */ +#else + .key= \x00\x08/* rta length */ + \x00\x01/* rta type */ +#endif + \x00\x00\x00\x00/* enc key length */ + \x00\x00\x00\x00\x00\x00\x00\x00 + \x00\x00\x00\x00\x00\x00\x00\x00, + .klen = 8 + 16 + 0, + .iv = , + .input = \x01\x23\x45\x67\x89\xab\xcd\xef, + .ilen = 8, + .result = \x01\x23\x45\x67\x89\xab\xcd\xef + \xaa\x42\xfe\x43\x8d\xea\xa3\x5a + \xb9\x3d\x9f\xb1\xa3\x8e\x9b\xae, + .rlen = 8 + 16, + }, {
[PATCH crypto] crypto: caam - add missing key_dma unmap
(struct caam_ctx) ctx-key_dma needs to be unmapped when context is cleaned up. Signed-off-by: Horia Geanta horia.gea...@freescale.com --- drivers/crypto/caam/caamalg.c | 5 + 1 file changed, 5 insertions(+) diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index b71f2fd749df..f44cfe065eb3 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -2099,6 +2099,11 @@ static void caam_cra_exit(struct crypto_tfm *tfm) dma_unmap_single(ctx-jrdev, ctx-sh_desc_givenc_dma, desc_bytes(ctx-sh_desc_givenc), DMA_TO_DEVICE); + if (ctx-key_dma + !dma_mapping_error(ctx-jrdev, ctx-key_dma)) + dma_unmap_single(ctx-jrdev, ctx-key_dma, +ctx-enckeylen + ctx-split_key_pad_len, +DMA_TO_DEVICE); caam_jr_free(ctx-jrdev); } -- 1.8.3.1 -- To unsubscribe from this list: send the line unsubscribe linux-crypto in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
algif for compression?
I see the algif_hash and algif_blkcipher implementations to allow userspace AF_ALG socket access to kernel blkcipher and hash algorithms, but has anyone done a algif_compression to allow userspace access to compression algs? I'm asking specifically wrt the 842 crypto module, which uses the hardware compression coprocessors on newer powerpc systems. If not, is there any reason against adding a algif_compression to provide the access? Thanks! -- To unsubscribe from this list: send the line unsubscribe linux-crypto in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html