The IPsec operation shrinks AAD directly before payload in enqueue burst and restores the memory in dequeue burst.
This commit adds the support of OOP mode follows the similar strategy. Signed-off-by: Suanming Mou <suanmi...@nvidia.com> Acked-by: Matan Azrad <ma...@nvidia.com> --- doc/guides/cryptodevs/mlx5.rst | 3 ++ drivers/crypto/mlx5/mlx5_crypto.c | 2 +- drivers/crypto/mlx5/mlx5_crypto_gcm.c | 43 +++++++++++++++++++++------ 3 files changed, 38 insertions(+), 10 deletions(-) diff --git a/doc/guides/cryptodevs/mlx5.rst b/doc/guides/cryptodevs/mlx5.rst index 320f57bb02..a88d0e07b6 100644 --- a/doc/guides/cryptodevs/mlx5.rst +++ b/doc/guides/cryptodevs/mlx5.rst @@ -201,6 +201,9 @@ for an additional list of options shared with other mlx5 drivers. AAD (ESP SPI and SN) to the payload during enqueue OP. It then restores the original memory layout in the decrypt OP. ESP.IV size supported range is [0,16] bytes. + For OOP case, PMD will replace the bytes preceding the OP destination + address to match the information found between the AAD pointer and the + OP source address. User should prepare this headroom in this case. Set to ``full_capable`` by default. diff --git a/drivers/crypto/mlx5/mlx5_crypto.c b/drivers/crypto/mlx5/mlx5_crypto.c index d49a375dcb..bf9cbd4a6a 100644 --- a/drivers/crypto/mlx5/mlx5_crypto.c +++ b/drivers/crypto/mlx5/mlx5_crypto.c @@ -25,6 +25,7 @@ #define MLX5_CRYPTO_FEATURE_FLAGS(wrapped_mode) \ (RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | RTE_CRYPTODEV_FF_HW_ACCELERATED | \ + RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT | \ (wrapped_mode ? RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY : 0) | \ RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS) @@ -61,7 +62,6 @@ mlx5_crypto_dev_infos_get(struct rte_cryptodev *dev, RTE_CRYPTODEV_FF_IN_PLACE_SGL | RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT | RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | - RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT | RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT; dev_info->capabilities = priv->caps; diff --git a/drivers/crypto/mlx5/mlx5_crypto_gcm.c b/drivers/crypto/mlx5/mlx5_crypto_gcm.c index 189e798d1d..f598273873 100644 --- a/drivers/crypto/mlx5/mlx5_crypto_gcm.c +++ b/drivers/crypto/mlx5/mlx5_crypto_gcm.c @@ -1000,6 +1000,7 @@ mlx5_crypto_gcm_ipsec_enqueue_burst(void *queue_pair, struct mlx5_crypto_gcm_data gcm_data; struct rte_crypto_op *op; struct rte_mbuf *m_src; + struct rte_mbuf *m_dst; uint16_t mask = qp->entries_n - 1; uint16_t remain = qp->entries_n - (qp->pi - qp->qp_ci); uint32_t idx; @@ -1029,19 +1030,32 @@ mlx5_crypto_gcm_ipsec_enqueue_burst(void *queue_pair, MLX5_ASSERT(pkt_iv_len <= MLX5_CRYPTO_GCM_IPSEC_IV_SIZE); gcm_data.src_bytes = op->sym->aead.data.length + sess->aad_len; gcm_data.src_mkey = mlx5_mr_mb2mr(&qp->mr_ctrl, op->sym->m_src); - /* OOP mode is not supported. */ - MLX5_ASSERT(!op->sym->m_dst || op->sym->m_dst == m_src); - gcm_data.dst_addr = gcm_data.src_addr; - gcm_data.dst_mkey = gcm_data.src_mkey; + m_dst = op->sym->m_dst; + if (m_dst && m_dst != m_src) { + MLX5_ASSERT(m_dst->nb_segs == 1 && + (rte_pktmbuf_headroom(m_dst) + op->sym->aead.data.offset) + >= sess->aad_len + pkt_iv_len); + gcm_data.dst_addr = RTE_PTR_SUB + (rte_pktmbuf_mtod_offset(m_dst, + void *, op->sym->aead.data.offset), sess->aad_len); + gcm_data.dst_mkey = mlx5_mr_mb2mr(&qp->mr_ctrl, m_dst); + } else { + gcm_data.dst_addr = gcm_data.src_addr; + gcm_data.dst_mkey = gcm_data.src_mkey; + } gcm_data.dst_bytes = gcm_data.src_bytes; /* Digest should follow payload. */ - MLX5_ASSERT(RTE_PTR_ADD - (gcm_data.src_addr, sess->aad_len + op->sym->aead.data.length) == - op->sym->aead.digest.data); - if (sess->op_type == MLX5_CRYPTO_OP_TYPE_ENCRYPTION) + if (sess->op_type == MLX5_CRYPTO_OP_TYPE_ENCRYPTION) { + MLX5_ASSERT(RTE_PTR_ADD(gcm_data.dst_addr, + sess->aad_len + op->sym->aead.data.length) == + op->sym->aead.digest.data); gcm_data.dst_bytes += sess->tag_len; - else + } else { + MLX5_ASSERT(RTE_PTR_ADD(gcm_data.src_addr, + sess->aad_len + op->sym->aead.data.length) == + op->sym->aead.digest.data); gcm_data.src_bytes += sess->tag_len; + } mlx5_crypto_gcm_wqe_set(qp, op, idx, &gcm_data); /* * All the data such as IV have been copied above, @@ -1080,6 +1094,7 @@ mlx5_crypto_gcm_restore_ipsec_mem(struct mlx5_crypto_qp *qp, struct mlx5_crypto_session *sess; struct rte_crypto_op *op; struct rte_mbuf *m_src; + struct rte_mbuf *m_dst; uint8_t *payload; while (orci != rci) { @@ -1095,6 +1110,16 @@ mlx5_crypto_gcm_restore_ipsec_mem(struct mlx5_crypto_qp *qp, RTE_PTR_SUB(payload, sess->aad_len), sess->aad_len); rte_memcpy(RTE_PTR_SUB(payload, MLX5_CRYPTO_GCM_IPSEC_IV_SIZE), &qp->ipsec_mem[idx], MLX5_CRYPTO_GCM_IPSEC_IV_SIZE); + m_dst = op->sym->m_dst; + if (m_dst && m_dst != m_src) { + uint32_t bytes_to_copy; + + bytes_to_copy = RTE_PTR_DIFF(payload, op->sym->aead.aad.data); + rte_memcpy(RTE_PTR_SUB(rte_pktmbuf_mtod_offset(m_dst, void *, + op->sym->aead.data.offset), bytes_to_copy), + op->sym->aead.aad.data, + bytes_to_copy); + } orci++; } } -- 2.34.1