Re: [PATCH v13 net-next 09/12] crypto: chtls - Inline TLS record Tx

2018-03-27 Thread Stefano Brivio
On Tue, 27 Mar 2018 23:06:38 +0530
Atul Gupta  wrote:

> +static u8 tcp_state_to_flowc_state(u8 state)
> +{
> + u8 ret = FW_FLOWC_MNEM_TCPSTATE_ESTABLISHED;
> +
> + switch (state) {
> + case TCP_ESTABLISHED:
> + ret = FW_FLOWC_MNEM_TCPSTATE_ESTABLISHED;
> + break;
> + case TCP_CLOSE_WAIT:
> + ret = FW_FLOWC_MNEM_TCPSTATE_CLOSEWAIT;
> + break;
> + case TCP_FIN_WAIT1:
> + ret = FW_FLOWC_MNEM_TCPSTATE_FINWAIT1;
> + break;
> + case TCP_CLOSING:
> + ret = FW_FLOWC_MNEM_TCPSTATE_CLOSING;
> + break;
> + case TCP_LAST_ACK:
> + ret = FW_FLOWC_MNEM_TCPSTATE_LASTACK;
> + break;
> + case TCP_FIN_WAIT2:
> + ret = FW_FLOWC_MNEM_TCPSTATE_FINWAIT2;
> + break;

Can't you just return those values right away instead?

> [...]
>
> +static u64 tlstx_seq_number(struct chtls_hws *hws)
> +{
> + return hws->tx_seq_no++;
> +}

The name of this function, as I also had commented earlier, is
misleading, because you are also incrementing the sequence number.

> [...]
>
> +static void mark_urg(struct tcp_sock *tp, int flags,
> +  struct sk_buff *skb)
> +{
> + if (unlikely(flags & MSG_OOB)) {
> + tp->snd_up = tp->write_seq;
> + ULP_SKB_CB(skb)->flags = ULPCB_FLAG_URG |
> +  ULPCB_FLAG_BARRIER |
> +  ULPCB_FLAG_NO_APPEND |
> +  ULPCB_FLAG_NEED_HDR;

Is this indentation the result of a previous 'if' clause which is now
gone?

> [...]
>
> +/*
> + * Returns true if a TCP socket is corked.
> + */
> +static int corked(const struct tcp_sock *tp, int flags)
> +{
> + return (flags & MSG_MORE) | (tp->nonagle & TCP_NAGLE_CORK);

I guess you meant || here. Shouldn't this be a bool?

> +}
> +
> +/*
> + * Returns true if a send should try to push new data.
> + */
> +static int send_should_push(struct sock *sk, int flags)
> +{
> + return should_push(sk) && !corked(tcp_sk(sk), flags);
> +}

If it returns true, I guess it should be a bool.

> [...]

-- 
Stefano


Re: [PATCH v13 net-next 09/12] crypto: chtls - Inline TLS record Tx

2018-03-27 Thread Stefano Brivio
On Tue, 27 Mar 2018 23:06:38 +0530
Atul Gupta  wrote:

> TLS handler for record transmit.
> Create Inline TLS work request and post to FW.
> Create Inline TLS record CPLs for hardware
> 
> Signed-off-by: Atul Gupta 
> Signed-off-by: Michael Werner 
> Reviewed-by: Stefano Brivio 

Absolutely not.

-- 
Stefano


[PATCH v13 net-next 09/12] crypto: chtls - Inline TLS record Tx

2018-03-27 Thread Atul Gupta
TLS handler for record transmit.
Create Inline TLS work request and post to FW.
Create Inline TLS record CPLs for hardware

Signed-off-by: Atul Gupta 
Signed-off-by: Michael Werner 
Reviewed-by: Stefano Brivio 
---
 drivers/crypto/chelsio/chtls/chtls_io.c | 1228 +++
 1 file changed, 1228 insertions(+)
 create mode 100644 drivers/crypto/chelsio/chtls/chtls_io.c

diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c 
b/drivers/crypto/chelsio/chtls/chtls_io.c
new file mode 100644
index 000..b0e7ee1
--- /dev/null
+++ b/drivers/crypto/chelsio/chtls/chtls_io.c
@@ -0,0 +1,1228 @@
+/*
+ * Copyright (c) 2018 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Written by: Atul Gupta (atul.gu...@chelsio.com)
+ */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "chtls.h"
+#include "chtls_cm.h"
+
+static bool is_tls_rx(struct chtls_sock *csk)
+{
+   return csk->tlshws.rxkey >= 0;
+}
+
+static bool is_tls_tx(struct chtls_sock *csk)
+{
+   return csk->tlshws.txkey >= 0;
+}
+
+static int data_sgl_len(const struct sk_buff *skb)
+{
+   unsigned int cnt;
+
+   cnt = skb_shinfo(skb)->nr_frags;
+   return sgl_len(cnt) * 8;
+}
+
+static int nos_ivs(struct sock *sk, unsigned int size)
+{
+   struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+
+   return DIV_ROUND_UP(size, csk->tlshws.mfs);
+}
+
+static int set_ivs_imm(struct sock *sk, const struct sk_buff *skb)
+{
+   int ivs_size = nos_ivs(sk, skb->len) * CIPHER_BLOCK_SIZE;
+   int hlen = TLS_WR_CPL_LEN + data_sgl_len(skb);
+
+   if ((hlen + KEY_ON_MEM_SZ + ivs_size) <
+   MAX_IMM_OFLD_TX_DATA_WR_LEN) {
+   ULP_SKB_CB(skb)->ulp.tls.iv = 1;
+   return 1;
+   }
+   ULP_SKB_CB(skb)->ulp.tls.iv = 0;
+   return 0;
+}
+
+static int max_ivs_size(struct sock *sk, int size)
+{
+   return nos_ivs(sk, size) * CIPHER_BLOCK_SIZE;
+}
+
+static int ivs_size(struct sock *sk, const struct sk_buff *skb)
+{
+   return set_ivs_imm(sk, skb) ? (nos_ivs(sk, skb->len) *
+CIPHER_BLOCK_SIZE) : 0;
+}
+
+static int flowc_wr_credits(int nparams, int *flowclenp)
+{
+   int flowclen16, flowclen;
+
+   flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
+   flowclen16 = DIV_ROUND_UP(flowclen, 16);
+   flowclen = flowclen16 * 16;
+
+   if (flowclenp)
+   *flowclenp = flowclen;
+
+   return flowclen16;
+}
+
+static struct sk_buff *create_flowc_wr_skb(struct sock *sk,
+  struct fw_flowc_wr *flowc,
+  int flowclen)
+{
+   struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+   struct sk_buff *skb;
+
+   skb = alloc_skb(flowclen, GFP_ATOMIC);
+   if (!skb)
+   return NULL;
+
+   memcpy(__skb_put(skb, flowclen), flowc, flowclen);
+   skb_set_queue_mapping(skb, (csk->txq_idx << 1) | CPL_PRIORITY_DATA);
+
+   return skb;
+}
+
+static int send_flowc_wr(struct sock *sk, struct fw_flowc_wr *flowc,
+int flowclen)
+{
+   struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+   struct tcp_sock *tp = tcp_sk(sk);
+   int flowclen16 = flowclen / 16;
+   struct sk_buff *skb;
+   int ret;
+
+   if (csk_flag(sk, CSK_TX_DATA_SENT)) {
+   skb = create_flowc_wr_skb(sk, flowc, flowclen);
+   if (!skb)
+   return -ENOMEM;
+
+   skb_entail(sk, skb,
+  ULPCB_FLAG_NO_HDR | ULPCB_FLAG_NO_APPEND);
+   return 0;
+   }
+
+   ret = cxgb4_immdata_send(csk->egress_dev,
+csk->txq_idx,
+flowc, flowclen);
+   if (!ret)
+   return flowclen16;
+   skb = create_flowc_wr_skb(sk, flowc, flowclen);
+   if (!skb)
+   return -ENOMEM;
+   send_or_defer(sk, tp, skb, 0);
+   return flowclen16;
+}
+
+static u8 tcp_state_to_flowc_state(u8 state)
+{
+   u8 ret = FW_FLOWC_MNEM_TCPSTATE_ESTABLISHED;
+
+   switch (state) {
+   case TCP_ESTABLISHED:
+   ret = FW_FLOWC_MNEM_TCPSTATE_ESTABLISHED;
+   break;
+   case TCP_CLOSE_WAIT:
+   ret = FW_FLOWC_MNEM_TCPSTATE_CLOSEWAIT;
+   break;
+   case TCP_FIN_WAIT1:
+   ret = FW_FLOWC_MNEM_TCPSTATE_FINWAIT1;
+   break;
+   case TCP_CLOSING:
+   ret = FW_FLOWC_MNEM_TCPSTATE_CLOSING;
+   break;
+   case TCP_LAST_ACK:
+   ret = FW_FLOWC_MNEM_TCPSTATE_LASTACK;
+   break;
+   case TCP_FIN_WAIT2:
+   ret = FW_FLOWC_M