Underlying libsso_snow3g library now supports bit-level
operations, so PMD has been updated to allow them.

Signed-off-by: Pablo de Lara <pablo.de.lara.guarch at intel.com>
---
 doc/guides/cryptodevs/snow3g.rst       |   5 +-
 drivers/crypto/snow3g/rte_snow3g_pmd.c | 129 ++++++++++++++++++++++++++++++---
 2 files changed, 120 insertions(+), 14 deletions(-)

diff --git a/doc/guides/cryptodevs/snow3g.rst b/doc/guides/cryptodevs/snow3g.rst
index a084cad..65628c6 100644
--- a/doc/guides/cryptodevs/snow3g.rst
+++ b/doc/guides/cryptodevs/snow3g.rst
@@ -51,8 +51,9 @@ Limitations
 -----------

 * Chained mbufs are not supported.
-* Snow3g(UEA2) supported only if cipher length, cipher offset fields are 
byte-aligned.
-* Snow3g(UIA2) supported only if hash length, hash offset fields are 
byte-aligned.
+* Snow3g(UIA2) supported only if hash offset field is byte-aligned.
+* In-place bit-level operations for Snow3g(UEA2) are not supported
+  (if length and/or offset of data to be ciphered is not byte-aligned).

 Installation
 ------------
diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd.c 
b/drivers/crypto/snow3g/rte_snow3g_pmd.c
index f3e0e66..51f7051 100644
--- a/drivers/crypto/snow3g/rte_snow3g_pmd.c
+++ b/drivers/crypto/snow3g/rte_snow3g_pmd.c
@@ -204,14 +204,6 @@ process_snow3g_cipher_op(struct rte_crypto_op **ops,
                        break;
                }

-               if (((ops[i]->sym->cipher.data.length % BYTE_LEN) != 0)
-                               || ((ops[i]->sym->cipher.data.offset
-                                       % BYTE_LEN) != 0)) {
-                       ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-                       SNOW3G_LOG_ERR("Data Length or offset");
-                       break;
-               }
-
                src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
                                (ops[i]->sym->cipher.data.offset >> 3);
                dst[i] = ops[i]->sym->m_dst ?
@@ -231,6 +223,39 @@ process_snow3g_cipher_op(struct rte_crypto_op **ops,
        return processed_ops;
 }

+/** Encrypt/decrypt mbuf (bit level function). */
+static uint8_t
+process_snow3g_cipher_op_bit(struct rte_crypto_op *op,
+               struct snow3g_session *session)
+{
+       uint8_t *src, *dst;
+       uint8_t *IV;
+       uint32_t length_in_bits, offset_in_bits;
+
+       /* Sanity checks. */
+       if (op->sym->cipher.iv.length != 16) {
+               op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+               SNOW3G_LOG_ERR("iv");
+               return 0;
+       }
+
+       offset_in_bits = op->sym->cipher.data.offset;
+       src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
+       if (op->sym->m_dst == NULL) {
+               op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+               SNOW3G_LOG_ERR("bit-level in-place not supported\n");
+               return 0;
+       }
+       dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
+       IV = op->sym->cipher.iv.data;
+       length_in_bits = op->sym->cipher.data.length;
+
+       sso_snow3g_f8_1_buffer_bit(&session->pKeySched_cipher, IV,
+                       src, dst, length_in_bits, offset_in_bits);
+
+       return 1;
+}
+
 /** Generate/verify hash from mbufs with same hash key. */
 static int
 process_snow3g_hash_op(struct rte_crypto_op **ops,
@@ -255,11 +280,10 @@ process_snow3g_hash_op(struct rte_crypto_op **ops,
                        break;
                }

-               if (((ops[i]->sym->auth.data.length % BYTE_LEN) != 0)
-                               || ((ops[i]->sym->auth.data.offset
-                                       % BYTE_LEN) != 0)) {
+               /* Data must be byte aligned */
+               if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
                        ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-                       SNOW3G_LOG_ERR("Data Length or offset");
+                       SNOW3G_LOG_ERR("Offset");
                        break;
                }

@@ -345,6 +369,52 @@ process_ops(struct rte_crypto_op **ops, struct 
snow3g_session *session,
        return processed_ops;
 }

+/** Process a crypto op with length/offset in bits. */
+static int
+process_op_bit(struct rte_crypto_op *op, struct snow3g_session *session,
+               struct snow3g_qp *qp)
+{
+       unsigned processed_op;
+
+       switch (session->op) {
+       case SNOW3G_OP_ONLY_CIPHER:
+               processed_op = process_snow3g_cipher_op_bit(op,
+                               session);
+               break;
+       case SNOW3G_OP_ONLY_AUTH:
+               processed_op = process_snow3g_hash_op(&op, session, 1);
+               break;
+       case SNOW3G_OP_CIPHER_AUTH:
+               processed_op = process_snow3g_cipher_op_bit(op, session);
+               if (processed_op == 1)
+                       process_snow3g_hash_op(&op, session, 1);
+               break;
+       case SNOW3G_OP_AUTH_CIPHER:
+               processed_op = process_snow3g_hash_op(&op, session, 1);
+               if (processed_op == 1)
+                       process_snow3g_cipher_op_bit(op, session);
+               break;
+       default:
+               /* Operation not supported. */
+               processed_op = 0;
+       }
+
+       /*
+        * If there was no error/authentication failure,
+        * change status to successful.
+        */
+       if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+               op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+       /* Free session if a session-less crypto op. */
+       if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+               rte_mempool_put(qp->sess_mp, op->sym->session);
+               op->sym->session = NULL;
+       }
+
+       return processed_op;
+}
+
 static uint16_t
 snow3g_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
                uint16_t nb_ops)
@@ -374,6 +444,41 @@ snow3g_pmd_enqueue_burst(void *queue_pair, struct 
rte_crypto_op **ops,
                        return enqueued_ops;
                }

+               /* If length/offset is at bit-level, process this buffer alone. 
*/
+               if (((curr_c_op->sym->cipher.data.length % BYTE_LEN) != 0)
+                               || ((curr_c_op->sym->cipher.data.offset
+                                       % BYTE_LEN) != 0)) {
+                       /* Process the ops of the previous session. */
+                       if (prev_sess != NULL) {
+                               processed_ops = process_ops(c_ops,
+                                       prev_sess, qp, burst_size);
+                               n = rte_ring_enqueue_burst(qp->processed_ops,
+                                               (void **)c_ops,
+                                               processed_ops);
+                               qp->qp_stats.enqueued_count += n;
+                               enqueued_ops += n;
+                               if (n < burst_size) {
+                                       qp->qp_stats.enqueue_err_count +=
+                                               nb_ops - enqueued_ops;
+                                       return enqueued_ops;
+                               }
+                               burst_size = 0;
+
+                               prev_sess = NULL;
+                       }
+
+                       processed_ops = process_op_bit(curr_c_op, curr_sess, 
qp);
+                       n = rte_ring_enqueue_burst(qp->processed_ops,
+                                       (void **)&curr_c_op, processed_ops);
+                       qp->qp_stats.enqueued_count += n;
+                       enqueued_ops += n;
+                       if (n != 1) {
+                               qp->qp_stats.enqueue_err_count += nb_ops - 
enqueued_ops;
+                               return enqueued_ops;
+                       }
+                       continue;
+               }
+
                /* Batch ops that share the same session. */
                if (prev_sess == NULL) {
                        prev_sess = curr_sess;
-- 
2.5.0

Reply via email to