[PATCH v2 2/7] crypto:chtls: key len correction

2018-05-01 Thread Atul Gupta
corrected the key length to copy 128b key. Removed 192b and 256b
key as user input supports key of size 128b in gcm_ctx

Reported-by: Dan Carpenter 
Signed-off-by: Atul Gupta 
---
 drivers/crypto/chelsio/chtls/chtls_hw.c | 6 +-
 1 file changed, 1 insertion(+), 5 deletions(-)

diff --git a/drivers/crypto/chelsio/chtls/chtls_hw.c 
b/drivers/crypto/chelsio/chtls/chtls_hw.c
index 54a13aa9..55d5014 100644
--- a/drivers/crypto/chelsio/chtls/chtls_hw.c
+++ b/drivers/crypto/chelsio/chtls/chtls_hw.c
@@ -213,7 +213,7 @@ static int chtls_key_info(struct chtls_sock *csk,
  struct _key_ctx *kctx,
  u32 keylen, u32 optname)
 {
-   unsigned char key[CHCR_KEYCTX_CIPHER_KEY_SIZE_256];
+   unsigned char key[AES_KEYSIZE_128];
struct tls12_crypto_info_aes_gcm_128 *gcm_ctx;
unsigned char ghash_h[AEAD_H_SIZE];
struct crypto_cipher *cipher;
@@ -228,10 +228,6 @@ static int chtls_key_info(struct chtls_sock *csk,
 
if (keylen == AES_KEYSIZE_128) {
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
-   } else if (keylen == AES_KEYSIZE_192) {
-   ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
-   } else if (keylen == AES_KEYSIZE_256) {
-   ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
} else {
pr_err("GCM: Invalid key length %d\n", keylen);
return -EINVAL;
-- 
1.8.3.1



[PATCH v2 4/7] crypto: chtls: kbuild warnings

2018-05-01 Thread Atul Gupta
- unindented continue
- check for null page
- signed return

Reported-by: Dan Carpenter 
Signed-off-by: Atul Gupta 
---
 drivers/crypto/chelsio/chtls/chtls_io.c | 11 ++-
 1 file changed, 6 insertions(+), 5 deletions(-)

diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c 
b/drivers/crypto/chelsio/chtls/chtls_io.c
index 85ddc07..0d2e7e7 100644
--- a/drivers/crypto/chelsio/chtls/chtls_io.c
+++ b/drivers/crypto/chelsio/chtls/chtls_io.c
@@ -907,11 +907,11 @@ static int chtls_skb_copy_to_page_nocache(struct sock *sk,
 }
 
 /* Read TLS header to find content type and data length */
-static u16 tls_header_read(struct tls_hdr *thdr, struct iov_iter *from)
+static int tls_header_read(struct tls_hdr *thdr, struct iov_iter *from)
 {
if (copy_from_iter(thdr, sizeof(*thdr), from) != sizeof(*thdr))
return -EFAULT;
-   return (__force u16)cpu_to_be16(thdr->length);
+   return (__force int)cpu_to_be16(thdr->length);
 }
 
 static int csk_mem_free(struct chtls_dev *cdev, struct sock *sk)
@@ -1083,9 +1083,10 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, 
size_t size)
int off = TCP_OFF(sk);
bool merge;
 
-   if (page)
-   pg_size <<= compound_order(page);
+   if (!page)
+   goto wait_for_memory;
 
+   pg_size <<= compound_order(page);
if (off < pg_size &&
skb_can_coalesce(skb, i, page, off)) {
merge = 1;
@@ -1492,7 +1493,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct 
msghdr *msg, size_t len,
break;
chtls_cleanup_rbuf(sk, copied);
sk_wait_data(sk, &timeo, NULL);
-   continue;
+   continue;
 found_ok_skb:
if (!skb->len) {
skb_dst_set(skb, NULL);
-- 
1.8.3.1



[PATCH v2 1/7] crypto: chtls: wait for memory sendmsg, sendpage

2018-05-01 Thread Atul Gupta
Reported-by: Gustavo A. R. Silva 
Signed-off-by: Atul Gupta 
---
 drivers/crypto/chelsio/chtls/chtls.h  |  1 +
 drivers/crypto/chelsio/chtls/chtls_io.c   | 90 +--
 drivers/crypto/chelsio/chtls/chtls_main.c |  1 +
 3 files changed, 89 insertions(+), 3 deletions(-)

diff --git a/drivers/crypto/chelsio/chtls/chtls.h 
b/drivers/crypto/chelsio/chtls/chtls.h
index f4b8f1e..778c194 100644
--- a/drivers/crypto/chelsio/chtls/chtls.h
+++ b/drivers/crypto/chelsio/chtls/chtls.h
@@ -149,6 +149,7 @@ struct chtls_dev {
struct list_head rcu_node;
struct list_head na_node;
unsigned int send_page_order;
+   int max_host_sndbuf;
struct key_map kmap;
 };
 
diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c 
b/drivers/crypto/chelsio/chtls/chtls_io.c
index 5a75be4..a4c7d2d 100644
--- a/drivers/crypto/chelsio/chtls/chtls_io.c
+++ b/drivers/crypto/chelsio/chtls/chtls_io.c
@@ -914,6 +914,78 @@ static u16 tls_header_read(struct tls_hdr *thdr, struct 
iov_iter *from)
return (__force u16)cpu_to_be16(thdr->length);
 }
 
+static int csk_mem_free(struct chtls_dev *cdev, struct sock *sk)
+{
+   return (cdev->max_host_sndbuf - sk->sk_wmem_queued) > 0;
+}
+
+static int csk_wait_memory(struct chtls_dev *cdev,
+  struct sock *sk, long *timeo_p)
+{
+   DEFINE_WAIT_FUNC(wait, woken_wake_function);
+   int sndbuf, err = 0;
+   long current_timeo;
+   long vm_wait = 0;
+   bool noblock;
+
+   current_timeo = *timeo_p;
+   noblock = (*timeo_p ? false : true);
+   sndbuf = cdev->max_host_sndbuf;
+   if (sndbuf > sk->sk_wmem_queued) {
+   current_timeo = (prandom_u32() % (HZ / 5)) + 2;
+   vm_wait = (prandom_u32() % (HZ / 5)) + 2;
+   }
+
+   add_wait_queue(sk_sleep(sk), &wait);
+   while (1) {
+   sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
+
+   if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
+   goto do_error;
+   if (!*timeo_p) {
+   if (noblock)
+   set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+   goto do_nonblock;
+   }
+   if (signal_pending(current))
+   goto do_interrupted;
+   sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
+   if (sndbuf > sk->sk_wmem_queued && !vm_wait)
+   break;
+
+   set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+   sk->sk_write_pending++;
+   sk_wait_event(sk, ¤t_timeo, sk->sk_err ||
+ (sk->sk_shutdown & SEND_SHUTDOWN) ||
+ (sndbuf > sk->sk_wmem_queued && !vm_wait), &wait);
+   sk->sk_write_pending--;
+
+   if (vm_wait) {
+   vm_wait -= current_timeo;
+   current_timeo = *timeo_p;
+   if (current_timeo != MAX_SCHEDULE_TIMEOUT) {
+   current_timeo -= vm_wait;
+   if (current_timeo < 0)
+   current_timeo = 0;
+   }
+   vm_wait = 0;
+   }
+   *timeo_p = current_timeo;
+   }
+out:
+   remove_wait_queue(sk_sleep(sk), &wait);
+   return err;
+do_error:
+   err = -EPIPE;
+   goto out;
+do_nonblock:
+   err = -EAGAIN;
+   goto out;
+do_interrupted:
+   err = sock_intr_errno(*timeo_p);
+   goto out;
+}
+
 int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
 {
struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
@@ -952,6 +1024,8 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, 
size_t size)
copy = mss - skb->len;
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
+   if (!csk_mem_free(cdev, sk))
+   goto wait_for_sndbuf;
 
if (is_tls_tx(csk) && !csk->tlshws.txleft) {
struct tls_hdr hdr;
@@ -1099,8 +1173,10 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, 
size_t size)
if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND)
push_frames_if_head(sk);
continue;
+wait_for_sndbuf:
+   set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
 wait_for_memory:
-   err = sk_stream_wait_memory(sk, &timeo);
+   err = csk_wait_memory(cdev, sk, &timeo);
if (err)
goto do_error;
}
@@ -1131,6 +1207,7 @@ int chtls_sendpage(struct sock *sk, struct page *page,
   int offset, size_t size, int flags)
 {
struct chtls_sock *csk;
+   struct chtls_dev *cdev;
int mss, err, copied;
struct tcp_sock *tp;
long timeo;
@@ -1138,6 +1215,7 @@ int chtls_sendpage(struct soc

[PATCH v2 3/7] crypto: chtls: dereference null variable

2018-05-01 Thread Atul Gupta
skb dereferenced before check in sendpage

Reported-by: Dan Carpenter 
Signed-off-by: Atul Gupta 
---
 drivers/crypto/chelsio/chtls/chtls_io.c | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c 
b/drivers/crypto/chelsio/chtls/chtls_io.c
index a4c7d2d..85ddc07 100644
--- a/drivers/crypto/chelsio/chtls/chtls_io.c
+++ b/drivers/crypto/chelsio/chtls/chtls_io.c
@@ -1230,9 +1230,8 @@ int chtls_sendpage(struct sock *sk, struct page *page,
struct sk_buff *skb = skb_peek_tail(&csk->txq);
int copy, i;
 
-   copy = mss - skb->len;
if (!skb || (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) ||
-   copy <= 0) {
+   (copy = mss - skb->len) <= 0) {
 new_buf:
if (!csk_mem_free(cdev, sk))
goto wait_for_sndbuf;
-- 
1.8.3.1



[PATCH v2 5/7] crypto: chtls: free beyond end rspq_skb_cache

2018-05-01 Thread Atul Gupta
Reported-by: Dan Carpenter 
Signed-off-by: Atul Gupta 
---
 drivers/crypto/chelsio/chtls/chtls_main.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c 
b/drivers/crypto/chelsio/chtls/chtls_main.c
index e9ffc3d..1ef56d6 100644
--- a/drivers/crypto/chelsio/chtls/chtls_main.c
+++ b/drivers/crypto/chelsio/chtls/chtls_main.c
@@ -250,7 +250,7 @@ static void *chtls_uld_add(const struct cxgb4_lld_info 
*info)
 
return cdev;
 out_rspq_skb:
-   for (j = 0; j <= i; j++)
+   for (j = 0; j < i; j++)
kfree_skb(cdev->rspq_skb_cache[j]);
kfree_skb(cdev->askb);
 out_skb:
-- 
1.8.3.1



[PATCH v2 6/7] crypto: chtls: generic handling of data and hdr

2018-05-01 Thread Atul Gupta
removed redundant check and made TLS PDU and header recv
handling common as received from HW

Signed-off-by: Atul Gupta 
Signed-off-by: Harsh Jain 
---
 drivers/crypto/chelsio/chtls/chtls.h| 10 ++
 drivers/crypto/chelsio/chtls/chtls_cm.c | 12 +---
 drivers/crypto/chelsio/chtls/chtls_io.c | 54 -
 3 files changed, 23 insertions(+), 53 deletions(-)

diff --git a/drivers/crypto/chelsio/chtls/chtls.h 
b/drivers/crypto/chelsio/chtls/chtls.h
index 778c194..a53a0e6 100644
--- a/drivers/crypto/chelsio/chtls/chtls.h
+++ b/drivers/crypto/chelsio/chtls/chtls.h
@@ -67,11 +67,6 @@ enum {
CPL_RET_UNKNOWN_TID = 4/* unexpected unknown TID */
 };
 
-#define TLS_RCV_ST_READ_HEADER 0xF0
-#define TLS_RCV_ST_READ_BODY   0xF1
-#define TLS_RCV_ST_READ_DONE   0xF2
-#define TLS_RCV_ST_READ_NB 0xF3
-
 #define LISTEN_INFO_HASH_SIZE 32
 #define RSPQ_HASH_BITS 5
 struct listen_info {
@@ -279,6 +274,7 @@ struct tlsrx_cmp_hdr {
 #define TLSRX_HDR_PKT_MAC_ERROR_FTLSRX_HDR_PKT_MAC_ERROR_V(1U)
 
 #define TLSRX_HDR_PKT_ERROR_M   0x1F
+#define CONTENT_TYPE_ERROR 0x7F
 
 struct ulp_mem_rw {
__be32 cmd;
@@ -348,8 +344,8 @@ enum {
ULPCB_FLAG_HOLD  = 1 << 3,  /* skb not ready for Tx yet */
ULPCB_FLAG_COMPL = 1 << 4,  /* request WR completion */
ULPCB_FLAG_URG   = 1 << 5,  /* urgent data */
-   ULPCB_FLAG_TLS_ND= 1 << 6, /* payload of zero length */
-   ULPCB_FLAG_NO_HDR= 1 << 7, /* not a ofld wr */
+   ULPCB_FLAG_TLS_HDR   = 1 << 6,  /* payload with tls hdr */
+   ULPCB_FLAG_NO_HDR= 1 << 7,  /* not a ofld wr */
 };
 
 /* The ULP mode/submode of an skbuff */
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c 
b/drivers/crypto/chelsio/chtls/chtls_cm.c
index 23c43b8..2bb6f03 100644
--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
+++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
@@ -1608,12 +1608,14 @@ static void chtls_set_hdrlen(struct sk_buff *skb, 
unsigned int nlen)
 
 static void chtls_rx_hdr(struct sock *sk, struct sk_buff *skb)
 {
-   struct cpl_rx_tls_cmp *cmp_cpl = cplhdr(skb);
+   struct tlsrx_cmp_hdr *tls_hdr_pkt;
+   struct cpl_rx_tls_cmp *cmp_cpl;
struct sk_buff *skb_rec;
struct chtls_sock *csk;
struct chtls_hws *tlsk;
struct tcp_sock *tp;
 
+   cmp_cpl = cplhdr(skb);
csk = rcu_dereference_sk_user_data(sk);
tlsk = &csk->tlshws;
tp = tcp_sk(sk);
@@ -1623,16 +1625,18 @@ static void chtls_rx_hdr(struct sock *sk, struct 
sk_buff *skb)
 
skb_reset_transport_header(skb);
__skb_pull(skb, sizeof(*cmp_cpl));
+   tls_hdr_pkt = (struct tlsrx_cmp_hdr *)skb->data;
+   if (tls_hdr_pkt->res_to_mac_error & TLSRX_HDR_PKT_ERROR_M)
+   tls_hdr_pkt->type = CONTENT_TYPE_ERROR;
if (!skb->data_len)
-   __skb_trim(skb, CPL_RX_TLS_CMP_LENGTH_G
-   (ntohl(cmp_cpl->pdulength_length)));
+   __skb_trim(skb, TLS_HEADER_LENGTH);
 
tp->rcv_nxt +=
CPL_RX_TLS_CMP_PDULENGTH_G(ntohl(cmp_cpl->pdulength_length));
 
+   ULP_SKB_CB(skb)->flags |= ULPCB_FLAG_TLS_HDR;
skb_rec = __skb_dequeue(&tlsk->sk_recv_queue);
if (!skb_rec) {
-   ULP_SKB_CB(skb)->flags |= ULPCB_FLAG_TLS_ND;
__skb_queue_tail(&sk->sk_receive_queue, skb);
} else {
chtls_set_hdrlen(skb, tlsk->pldlen);
diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c 
b/drivers/crypto/chelsio/chtls/chtls_io.c
index 0d2e7e7..9dbdea0 100644
--- a/drivers/crypto/chelsio/chtls/chtls_io.c
+++ b/drivers/crypto/chelsio/chtls/chtls_io.c
@@ -1533,31 +1533,13 @@ static int chtls_pt_recvmsg(struct sock *sk, struct 
msghdr *msg, size_t len,
}
}
}
-   if (hws->rstate == TLS_RCV_ST_READ_BODY) {
-   if (skb_copy_datagram_msg(skb, offset,
- msg, avail)) {
-   if (!copied) {
-   copied = -EFAULT;
-   break;
-   }
-   }
-   } else {
-   struct tlsrx_cmp_hdr *tls_hdr_pkt =
-   (struct tlsrx_cmp_hdr *)skb->data;
-
-   if ((tls_hdr_pkt->res_to_mac_error &
-   TLSRX_HDR_PKT_ERROR_M))
-   tls_hdr_pkt->type = 0x7F;
-
-   /* CMP pld len is for recv seq */
-   hws->rcvpld = skb->hdr_len;
-   if (skb_copy_datagram_msg(skb, offset, msg, avail)) {
-   if (!copied) {
-   copied = -EFAULT;
-   break;
-  

[PATCH v2 7/7] crypto: chtls: HW supported socket opt

2018-05-01 Thread Atul Gupta
HW supported socket options are handled by HW while rest
are handled by SW

Signed-off-by: Atul Gupta 
---
 drivers/crypto/chelsio/chtls/chtls.h  |  10 ++
 drivers/crypto/chelsio/chtls/chtls_cm.h   |  12 ++
 drivers/crypto/chelsio/chtls/chtls_hw.c   |   2 +-
 drivers/crypto/chelsio/chtls/chtls_main.c | 191 +-
 4 files changed, 211 insertions(+), 4 deletions(-)

diff --git a/drivers/crypto/chelsio/chtls/chtls.h 
b/drivers/crypto/chelsio/chtls/chtls.h
index a53a0e6..3e46d28 100644
--- a/drivers/crypto/chelsio/chtls/chtls.h
+++ b/drivers/crypto/chelsio/chtls/chtls.h
@@ -353,6 +353,15 @@ enum {
 #define TCP_PAGE(sk)   (sk->sk_frag.page)
 #define TCP_OFF(sk)(sk->sk_frag.offset)
 
+struct tcp_cong_ops {
+   struct tcp_congestion_ops   ops;
+   int key;
+};
+
+#define CONG_OPS(__s, __k) \
+   { { .name = __s, .owner = THIS_MODULE }, .key = CONG_ALG_##__k, }
+#define CONG_ALG_NONE (-1)
+
 static inline struct chtls_dev *to_chtls_dev(struct tls_device *tlsdev)
 {
return container_of(tlsdev, struct chtls_dev, tlsdev);
@@ -472,6 +481,7 @@ int send_tx_flowc_wr(struct sock *sk, int compl,
 void chtls_tcp_push(struct sock *sk, int flags);
 int chtls_push_frames(struct chtls_sock *csk, int comp);
 int chtls_set_tcb_tflag(struct sock *sk, unsigned int bit_pos, int val);
+int chtls_set_tcb_field(struct sock *sk, u16 word, u64 mask, u64 val);
 int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 mode);
 void skb_entail(struct sock *sk, struct sk_buff *skb, int flags);
 unsigned int keyid_to_addr(int start_addr, int keyid);
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.h 
b/drivers/crypto/chelsio/chtls/chtls_cm.h
index 78eb3af..569b723 100644
--- a/drivers/crypto/chelsio/chtls/chtls_cm.h
+++ b/drivers/crypto/chelsio/chtls/chtls_cm.h
@@ -36,9 +36,21 @@
 #define TF_TLS_ENABLE_S  0
 #define TF_TLS_ENABLE_V(x) ((x) << TF_TLS_ENABLE_S)
 
+#define TF_NAGLE_S  7
+#define TF_NAGLE_V(x) ((x) << TF_NAGLE_S)
+
 #define TF_RX_QUIESCE_S15
 #define TF_RX_QUIESCE_V(x) ((x) << TF_RX_QUIESCE_S)
 
+#define TF_TURBO_S 21
+#define TF_TURBO_V(x) ((x) << TF_TURBO_S)
+
+#define TF_CCTRL_SEL0_S22
+#define TF_CCTRL_SEL0_V(x) ((x) << TF_CCTRL_SEL0_S)
+
+#define TCB_TOS_S  10
+#define TCB_TOS_V(x)   ((x) << TCB_TOS_S)
+
 /*
  * Max receive window supported by HW in bytes.  Only a small part of it can
  * be set through option0, the rest needs to be set through RX_DATA_ACK.
diff --git a/drivers/crypto/chelsio/chtls/chtls_hw.c 
b/drivers/crypto/chelsio/chtls/chtls_hw.c
index 55d5014..1b7ee6b 100644
--- a/drivers/crypto/chelsio/chtls/chtls_hw.c
+++ b/drivers/crypto/chelsio/chtls/chtls_hw.c
@@ -61,7 +61,7 @@ static void __set_tcb_field(struct sock *sk, struct sk_buff 
*skb, u16 word,
  * Send control message to HW, message go as immediate data and packet
  * is freed immediately.
  */
-static int chtls_set_tcb_field(struct sock *sk, u16 word, u64 mask, u64 val)
+int chtls_set_tcb_field(struct sock *sk, u16 word, u64 mask, u64 val)
 {
struct cpl_set_tcb_field *req;
unsigned int credits_needed;
diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c 
b/drivers/crypto/chelsio/chtls/chtls_main.c
index 1ef56d6..7d6965e 100644
--- a/drivers/crypto/chelsio/chtls/chtls_main.c
+++ b/drivers/crypto/chelsio/chtls/chtls_main.c
@@ -512,15 +512,200 @@ static int do_chtls_setsockopt(struct sock *sk, int 
optname,
return rc;
 }
 
-static int chtls_setsockopt(struct sock *sk, int level, int optname,
+void chtls_set_tos(struct sock *sk)
+{
+   u64 mask, val;
+
+   mask = 0x3FULL;
+   val = (inet_sk(sk)->tos >> 2) & 0x3F;
+   chtls_set_tcb_field(sk, 3, TCB_TOS_V(mask), TCB_TOS_V(val));
+}
+
+#define UNSUP_IP_SOCK_OPT ((1 << IP_OPTIONS))
+
+/*
+ *  Socket option code for IP.
+ */
+static int do_ip_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
 {
+   if (level != SOL_IP)
+   return -ENOPROTOOPT;
+
+   /* unsupported options */
+   if ((1 << optname) & UNSUP_IP_SOCK_OPT)
+   return -ENOPROTOOPT;
+
+   /* specially handled options */
+   if (optname == IP_TOS) {
+   struct inet_sock *inet = inet_sk(sk);
+   int val = 0, err = 0;
+
+   if (optlen >= sizeof(int)) {
+   if (get_user(val, (int __user *)optval))
+   return -EFAULT;
+   } else if (optlen >= sizeof(char)) {
+   unsigned char ucval;
+
+   if (get_user(ucval, (unsigned char __user *)optval))
+   return -EFAULT;
+   val = (int)ucval;
+   }
+   lock_sock(sk);
+   val &= ~3;
+   val |= inet->tos & 3;
+   if (IPTOS_PREC(val) >= IPTOS_PREC_CRITIC_ECP &&
+   !capable(CAP_NET_A

RE: [PATCH V8 1/5] crypto: Multi-buffer encryption infrastructure support

2018-05-01 Thread Dey, Megha


>-Original Message-
>From: Herbert Xu [mailto:herb...@gondor.apana.org.au]
>Sent: Thursday, April 26, 2018 2:45 AM
>To: Dey, Megha 
>Cc: linux-ker...@vger.kernel.org; linux-crypto@vger.kernel.org;
>da...@davemloft.net
>Subject: Re: [PATCH V8 1/5] crypto: Multi-buffer encryption infrastructure
>support
>
>On Wed, Apr 25, 2018 at 01:14:26AM +, Dey, Megha wrote:
>>
>> Is there any existing implementation of async crypto algorithm that uses the
>above approach? The ones I could find are either sync, have an outer and
>inner algorithm or use cryptd.
>>
>> I tried removing the mcryptd layer and the outer algorithm and some
>> plumbing to pass the correct structures, but see crashes.(obviously
>> some errors in the plumbing)
>
>OK, you can't just remove it because the inner algorithm requires
>kernel_fpu_begin/kernel_fpu_end.  So we do need two layers but I don't think
>we need cryptd or mcryptd.
>
>The existing simd wrapper should work just fine on the inner algorithm,
>provided that we add hash support to it.

Hi Herbert,

crypto/simd.c provides a simd_skcipher_create_compat. I have used the same 
template to introduce simd_ahash_create_compat
which would wrap around the inner hash algorithm.

Hence we would still register 2 algs, outer and inner.
>
>> I am not sure if we remove mcryptd, how would we queue work, flush
>partially completed jobs or call completions (currently done by mcryptd) if we
>simply call the inner algorithm.
>
>I don't think mcryptd is providing any real facility to the flushing apart 
>from a
>helper.  That same helper can live anywhere.

Currently we have outer_alg -> mcryptd alg -> inner_alg

Mcryptd is mainly providing the following:
1. Ensuring the lanes(8 in case of AVX2) are full before dispatching to the 
lower inner algorithm. This is obviously why we would expect better performance 
for multi-buffer as opposed to the present single-buffer algorithms.
2. If there no new incoming jobs, issue a flush.
3. A glue layer which sends the correct pointers and completions.

If we get rid of mcryptd, these functions needs to be done by someone. Since 
all multi-buffer algorithms would require this tasks, where do you suggest 
these helpers live, if not the current mcryptd.c?

I am not sure if you are suggesting that we need to get rid of the mcryptd work 
queue itself. In that case, we would need to execute in the context of the job 
requesting the crypto transformation.
>
>Cheers,
>--
>Email: Herbert Xu  Home Page:
>http://gondor.apana.org.au/~herbert/
>PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt