[PATCH 4.16 02/43] crypto: chelsio - request to HW should wrap

2018-06-14 Thread Greg Kroah-Hartman
4.16-stable review patch.  If anyone has any objections, please let me know.

--

From: Atul Gupta 

commit 4c826fed675dfffd8485c5477b616d61d1ec9e9a upstream.

-Tx request and data is copied to HW Q in 64B desc, check for
end of queue and adjust the current position to start from
beginning before passing the additional request info.
-key context copy should check key length only
-Few reverse christmas tree correction

Signed-off-by: Atul Gupta 
Signed-off-by: Herbert Xu 
Signed-off-by: Greg Kroah-Hartman 

---
 drivers/crypto/chelsio/chcr_ipsec.c |   35 +--
 1 file changed, 17 insertions(+), 18 deletions(-)

--- a/drivers/crypto/chelsio/chcr_ipsec.c
+++ b/drivers/crypto/chelsio/chcr_ipsec.c
@@ -346,18 +346,23 @@ inline void *copy_cpltx_pktxt(struct sk_
struct net_device *dev,
void *pos)
 {
+   struct cpl_tx_pkt_core *cpl;
+   struct sge_eth_txq *q;
struct adapter *adap;
struct port_info *pi;
-   struct sge_eth_txq *q;
-   struct cpl_tx_pkt_core *cpl;
-   u64 cntrl = 0;
u32 ctrl0, qidx;
+   u64 cntrl = 0;
+   int left;
 
pi = netdev_priv(dev);
adap = pi->adapter;
qidx = skb->queue_mapping;
q = >sge.ethtxq[qidx + pi->first_qset];
 
+   left = (void *)q->q.stat - pos;
+   if (!left)
+   pos = q->q.desc;
+
cpl = (struct cpl_tx_pkt_core *)pos;
 
if (skb->ip_summed == CHECKSUM_PARTIAL)
@@ -383,18 +388,17 @@ inline void *copy_key_cpltx_pktxt(struct
void *pos,
struct ipsec_sa_entry *sa_entry)
 {
-   struct adapter *adap;
-   struct port_info *pi;
-   struct sge_eth_txq *q;
-   unsigned int len, qidx;
struct _key_ctx *key_ctx;
int left, eoq, key_len;
+   struct sge_eth_txq *q;
+   struct adapter *adap;
+   struct port_info *pi;
+   unsigned int qidx;
 
pi = netdev_priv(dev);
adap = pi->adapter;
qidx = skb->queue_mapping;
q = >sge.ethtxq[qidx + pi->first_qset];
-   len = sa_entry->enckey_len + sizeof(struct cpl_tx_pkt_core);
key_len = sa_entry->kctx_len;
 
/* end of queue, reset pos to start of queue */
@@ -412,19 +416,14 @@ inline void *copy_key_cpltx_pktxt(struct
pos += sizeof(struct _key_ctx);
left -= sizeof(struct _key_ctx);
 
-   if (likely(len <= left)) {
+   if (likely(key_len <= left)) {
memcpy(key_ctx->key, sa_entry->key, key_len);
pos += key_len;
} else {
-   if (key_len <= left) {
-   memcpy(pos, sa_entry->key, key_len);
-   pos += key_len;
-   } else {
-   memcpy(pos, sa_entry->key, left);
-   memcpy(q->q.desc, sa_entry->key + left,
-  key_len - left);
-   pos = (u8 *)q->q.desc + (key_len - left);
-   }
+   memcpy(pos, sa_entry->key, left);
+   memcpy(q->q.desc, sa_entry->key + left,
+  key_len - left);
+   pos = (u8 *)q->q.desc + (key_len - left);
}
/* Copy CPL TX PKT XT */
pos = copy_cpltx_pktxt(skb, dev, pos);




[PATCH 4.16 02/43] crypto: chelsio - request to HW should wrap

2018-06-14 Thread Greg Kroah-Hartman
4.16-stable review patch.  If anyone has any objections, please let me know.

--

From: Atul Gupta 

commit 4c826fed675dfffd8485c5477b616d61d1ec9e9a upstream.

-Tx request and data is copied to HW Q in 64B desc, check for
end of queue and adjust the current position to start from
beginning before passing the additional request info.
-key context copy should check key length only
-Few reverse christmas tree correction

Signed-off-by: Atul Gupta 
Signed-off-by: Herbert Xu 
Signed-off-by: Greg Kroah-Hartman 

---
 drivers/crypto/chelsio/chcr_ipsec.c |   35 +--
 1 file changed, 17 insertions(+), 18 deletions(-)

--- a/drivers/crypto/chelsio/chcr_ipsec.c
+++ b/drivers/crypto/chelsio/chcr_ipsec.c
@@ -346,18 +346,23 @@ inline void *copy_cpltx_pktxt(struct sk_
struct net_device *dev,
void *pos)
 {
+   struct cpl_tx_pkt_core *cpl;
+   struct sge_eth_txq *q;
struct adapter *adap;
struct port_info *pi;
-   struct sge_eth_txq *q;
-   struct cpl_tx_pkt_core *cpl;
-   u64 cntrl = 0;
u32 ctrl0, qidx;
+   u64 cntrl = 0;
+   int left;
 
pi = netdev_priv(dev);
adap = pi->adapter;
qidx = skb->queue_mapping;
q = >sge.ethtxq[qidx + pi->first_qset];
 
+   left = (void *)q->q.stat - pos;
+   if (!left)
+   pos = q->q.desc;
+
cpl = (struct cpl_tx_pkt_core *)pos;
 
if (skb->ip_summed == CHECKSUM_PARTIAL)
@@ -383,18 +388,17 @@ inline void *copy_key_cpltx_pktxt(struct
void *pos,
struct ipsec_sa_entry *sa_entry)
 {
-   struct adapter *adap;
-   struct port_info *pi;
-   struct sge_eth_txq *q;
-   unsigned int len, qidx;
struct _key_ctx *key_ctx;
int left, eoq, key_len;
+   struct sge_eth_txq *q;
+   struct adapter *adap;
+   struct port_info *pi;
+   unsigned int qidx;
 
pi = netdev_priv(dev);
adap = pi->adapter;
qidx = skb->queue_mapping;
q = >sge.ethtxq[qidx + pi->first_qset];
-   len = sa_entry->enckey_len + sizeof(struct cpl_tx_pkt_core);
key_len = sa_entry->kctx_len;
 
/* end of queue, reset pos to start of queue */
@@ -412,19 +416,14 @@ inline void *copy_key_cpltx_pktxt(struct
pos += sizeof(struct _key_ctx);
left -= sizeof(struct _key_ctx);
 
-   if (likely(len <= left)) {
+   if (likely(key_len <= left)) {
memcpy(key_ctx->key, sa_entry->key, key_len);
pos += key_len;
} else {
-   if (key_len <= left) {
-   memcpy(pos, sa_entry->key, key_len);
-   pos += key_len;
-   } else {
-   memcpy(pos, sa_entry->key, left);
-   memcpy(q->q.desc, sa_entry->key + left,
-  key_len - left);
-   pos = (u8 *)q->q.desc + (key_len - left);
-   }
+   memcpy(pos, sa_entry->key, left);
+   memcpy(q->q.desc, sa_entry->key + left,
+  key_len - left);
+   pos = (u8 *)q->q.desc + (key_len - left);
}
/* Copy CPL TX PKT XT */
pos = copy_cpltx_pktxt(skb, dev, pos);