Re: [PATCH v14 net-next 09/12] crypto: chtls - Inline TLS record Tx

2018-03-29 Thread Atul Gupta


On 3/29/2018 9:56 PM, Sabrina Dubroca wrote:
> 2018-03-29, 21:27:51 +0530, Atul Gupta wrote:
>> TLS handler for record transmit.
>> Create Inline TLS work request and post to FW.
>> Create Inline TLS record CPLs for hardware
>>
>> Signed-off-by: Atul Gupta 
>> Signed-off-by: Michael Werner 
>> ---
> ...
>> +int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
>> +{
>> +struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
>> +struct chtls_dev *cdev = csk->cdev;
>> +struct tcp_sock *tp = tcp_sk(sk);
>> +struct sk_buff *skb;
>> +int mss, flags, err;
>> +int recordsz = 0;
>> +int copied = 0;
>> +int hdrlen = 0;
>> +long timeo;
>> +
>> +lock_sock(sk);
>> +flags = msg->msg_flags;
>> +timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
>> +
>> +if (!sk_in_state(sk, TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
>> +err = sk_stream_wait_connect(sk, &timeo);
>> +if (err)
>> +goto out_err;
>> +}
>> +
>> +if (sk->sk_prot->sendmsg != chtls_sendmsg) {
> Can that actually happen? If so, how? AFAICT, this function is only
> called when sk->sk_prot has been set to be chtls_cpl_prot.
this is relevant for the tunneling of the active-open path and can be removed 
for now
>
>> +release_sock(sk);
>> +if (sk->sk_prot->sendmsg)
>> +return sk->sk_prot->sendmsg(sk, msg, size);
>> +else
>> +return sk->sk_socket->ops->sendmsg(sk->sk_socket,
>> +   msg, size);
>> +}



Re: [PATCH v14 net-next 09/12] crypto: chtls - Inline TLS record Tx

2018-03-29 Thread Sabrina Dubroca
2018-03-29, 21:27:51 +0530, Atul Gupta wrote:
> TLS handler for record transmit.
> Create Inline TLS work request and post to FW.
> Create Inline TLS record CPLs for hardware
> 
> Signed-off-by: Atul Gupta 
> Signed-off-by: Michael Werner 
> ---

...
> +int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
> +{
> + struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
> + struct chtls_dev *cdev = csk->cdev;
> + struct tcp_sock *tp = tcp_sk(sk);
> + struct sk_buff *skb;
> + int mss, flags, err;
> + int recordsz = 0;
> + int copied = 0;
> + int hdrlen = 0;
> + long timeo;
> +
> + lock_sock(sk);
> + flags = msg->msg_flags;
> + timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
> +
> + if (!sk_in_state(sk, TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
> + err = sk_stream_wait_connect(sk, &timeo);
> + if (err)
> + goto out_err;
> + }
> +
> + if (sk->sk_prot->sendmsg != chtls_sendmsg) {

Can that actually happen? If so, how? AFAICT, this function is only
called when sk->sk_prot has been set to be chtls_cpl_prot.

> + release_sock(sk);
> + if (sk->sk_prot->sendmsg)
> + return sk->sk_prot->sendmsg(sk, msg, size);
> + else
> + return sk->sk_socket->ops->sendmsg(sk->sk_socket,
> +msg, size);
> + }

-- 
Sabrina


[PATCH v14 net-next 09/12] crypto: chtls - Inline TLS record Tx

2018-03-29 Thread Atul Gupta
TLS handler for record transmit.
Create Inline TLS work request and post to FW.
Create Inline TLS record CPLs for hardware

Signed-off-by: Atul Gupta 
Signed-off-by: Michael Werner 
---
 drivers/crypto/chelsio/chtls/chtls_io.c | 1248 +++
 1 file changed, 1248 insertions(+)
 create mode 100644 drivers/crypto/chelsio/chtls/chtls_io.c

diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c 
b/drivers/crypto/chelsio/chtls/chtls_io.c
new file mode 100644
index 000..4bc60cf
--- /dev/null
+++ b/drivers/crypto/chelsio/chtls/chtls_io.c
@@ -0,0 +1,1248 @@
+/*
+ * Copyright (c) 2018 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Written by: Atul Gupta (atul.gu...@chelsio.com)
+ */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "chtls.h"
+#include "chtls_cm.h"
+
+static bool is_tls_rx(struct chtls_sock *csk)
+{
+   return csk->tlshws.rxkey >= 0;
+}
+
+static bool is_tls_tx(struct chtls_sock *csk)
+{
+   return csk->tlshws.txkey >= 0;
+}
+
+static int data_sgl_len(const struct sk_buff *skb)
+{
+   unsigned int cnt;
+
+   cnt = skb_shinfo(skb)->nr_frags;
+   return sgl_len(cnt) * 8;
+}
+
+static int nos_ivs(struct sock *sk, unsigned int size)
+{
+   struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+
+   return DIV_ROUND_UP(size, csk->tlshws.mfs);
+}
+
+static int set_ivs_imm(struct sock *sk, const struct sk_buff *skb)
+{
+   int ivs_size = nos_ivs(sk, skb->len) * CIPHER_BLOCK_SIZE;
+   int hlen = TLS_WR_CPL_LEN + data_sgl_len(skb);
+
+   if ((hlen + KEY_ON_MEM_SZ + ivs_size) <
+   MAX_IMM_OFLD_TX_DATA_WR_LEN) {
+   ULP_SKB_CB(skb)->ulp.tls.iv = 1;
+   return 1;
+   }
+   ULP_SKB_CB(skb)->ulp.tls.iv = 0;
+   return 0;
+}
+
+static int max_ivs_size(struct sock *sk, int size)
+{
+   return nos_ivs(sk, size) * CIPHER_BLOCK_SIZE;
+}
+
+static int ivs_size(struct sock *sk, const struct sk_buff *skb)
+{
+   return set_ivs_imm(sk, skb) ? (nos_ivs(sk, skb->len) *
+CIPHER_BLOCK_SIZE) : 0;
+}
+
+static int flowc_wr_credits(int nparams, int *flowclenp)
+{
+   int flowclen16, flowclen;
+
+   flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
+   flowclen16 = DIV_ROUND_UP(flowclen, 16);
+   flowclen = flowclen16 * 16;
+
+   if (flowclenp)
+   *flowclenp = flowclen;
+
+   return flowclen16;
+}
+
+static struct sk_buff *create_flowc_wr_skb(struct sock *sk,
+  struct fw_flowc_wr *flowc,
+  int flowclen)
+{
+   struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+   struct sk_buff *skb;
+
+   skb = alloc_skb(flowclen, GFP_ATOMIC);
+   if (!skb)
+   return NULL;
+
+   memcpy(__skb_put(skb, flowclen), flowc, flowclen);
+   skb_set_queue_mapping(skb, (csk->txq_idx << 1) | CPL_PRIORITY_DATA);
+
+   return skb;
+}
+
+static int send_flowc_wr(struct sock *sk, struct fw_flowc_wr *flowc,
+int flowclen)
+{
+   struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+   struct tcp_sock *tp = tcp_sk(sk);
+   int flowclen16 = flowclen / 16;
+   struct sk_buff *skb;
+   int ret;
+
+   if (csk_flag(sk, CSK_TX_DATA_SENT)) {
+   skb = create_flowc_wr_skb(sk, flowc, flowclen);
+   if (!skb)
+   return -ENOMEM;
+
+   skb_entail(sk, skb,
+  ULPCB_FLAG_NO_HDR | ULPCB_FLAG_NO_APPEND);
+   return 0;
+   }
+
+   ret = cxgb4_immdata_send(csk->egress_dev,
+csk->txq_idx,
+flowc, flowclen);
+   if (!ret)
+   return flowclen16;
+   skb = create_flowc_wr_skb(sk, flowc, flowclen);
+   if (!skb)
+   return -ENOMEM;
+   send_or_defer(sk, tp, skb, 0);
+   return flowclen16;
+}
+
+static u8 tcp_state_to_flowc_state(u8 state)
+{
+   switch (state) {
+   case TCP_ESTABLISHED:
+   return FW_FLOWC_MNEM_TCPSTATE_ESTABLISHED;
+   case TCP_CLOSE_WAIT:
+   return FW_FLOWC_MNEM_TCPSTATE_CLOSEWAIT;
+   case TCP_FIN_WAIT1:
+   return FW_FLOWC_MNEM_TCPSTATE_FINWAIT1;
+   case TCP_CLOSING:
+   return FW_FLOWC_MNEM_TCPSTATE_CLOSING;
+   case TCP_LAST_ACK:
+   return FW_FLOWC_MNEM_TCPSTATE_LASTACK;
+   case TCP_FIN_WAIT2:
+   return FW_FLOWC_MNEM_TCPSTATE_FINWAIT2;
+   }
+
+   return FW_FLOWC_MNEM_TCPSTATE_ESTABLISHED;
+}
+
+int send_tx_flowc_wr(struct sock *sk, int compl,
+u32 snd_nxt, u32 rcv_nxt)
+{
+