Add support to the cryptodev scheduler PMD for the existing security
protocols in the security library, namely IPSec, MACSec, PDCP and
DOCSIS. This includes adding the following:
- synchronization of worker's security capabilities
- retreival of the scheduler's synchronized security capabilities
- retrieval of the security session size i.e. maximum session size
  across all workers
- creation of security sessions on each worker
- deletion of security sessions on each worker

Signed-off-by: David Coyle <david.co...@intel.com>
Signed-off-by: Kevin O'Sullivan <kevin.osulli...@intel.com>
---
 doc/guides/rel_notes/release_23_11.rst        |   3 +
 drivers/crypto/scheduler/meson.build          |   2 +-
 .../scheduler/rte_cryptodev_scheduler.c       | 228 ++++++++++-
 drivers/crypto/scheduler/scheduler_failover.c |  12 +-
 .../crypto/scheduler/scheduler_multicore.c    |  10 +-
 .../scheduler/scheduler_pkt_size_distr.c      |  54 +--
 drivers/crypto/scheduler/scheduler_pmd.c      |  32 ++
 drivers/crypto/scheduler/scheduler_pmd_ops.c  | 374 +++++++++++++-----
 .../crypto/scheduler/scheduler_pmd_private.h  | 148 ++++---
 .../crypto/scheduler/scheduler_roundrobin.c   |   6 +-
 10 files changed, 640 insertions(+), 229 deletions(-)

diff --git a/doc/guides/rel_notes/release_23_11.rst 
b/doc/guides/rel_notes/release_23_11.rst
index 4411bb32c1..6f2a11552f 100644
--- a/doc/guides/rel_notes/release_23_11.rst
+++ b/doc/guides/rel_notes/release_23_11.rst
@@ -72,6 +72,9 @@ New Features
      Also, make sure to start the actual text at the margin.
      =======================================================
 
+* **Updated Cryptodev Scheduler PMD.**
+
+  Added support for security protocols through the ``rte_security`` API 
callbacks.
 
 Removed Items
 -------------
diff --git a/drivers/crypto/scheduler/meson.build 
b/drivers/crypto/scheduler/meson.build
index cd18efc791..752d655415 100644
--- a/drivers/crypto/scheduler/meson.build
+++ b/drivers/crypto/scheduler/meson.build
@@ -7,7 +7,7 @@ if is_windows
     subdir_done()
 endif
 
-deps += ['bus_vdev', 'reorder']
+deps += ['bus_vdev', 'reorder', 'security']
 sources = files(
         'rte_cryptodev_scheduler.c',
         'scheduler_failover.c',
diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c 
b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
index 258d6f8c43..21fab828c1 100644
--- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
+++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
@@ -10,6 +10,8 @@
 #include "rte_cryptodev_scheduler.h"
 #include "scheduler_pmd_private.h"
 
+#define MAX_CAPS 256
+
 /** update the scheduler pmd's capability with attaching device's
  *  capability.
  *  For each device to be attached, the scheduler's capability should be
@@ -59,7 +61,6 @@ sync_caps(struct rte_cryptodev_capabilities *caps,
                                        cap->sym.auth.digest_size.max ?
                                        s_cap->sym.auth.digest_size.max :
                                        cap->sym.auth.digest_size.max;
-
                        }
 
                        if (s_cap->sym.xform_type ==
@@ -81,25 +82,184 @@ sync_caps(struct rte_cryptodev_capabilities *caps,
 
                memset(&caps[sync_nb_caps - 1], 0, sizeof(*cap));
                sync_nb_caps--;
+               i--;
        }
 
        return sync_nb_caps;
 }
 
+#define CMP_SEC_CAP_PROTO(proto) \
+       memcmp(&sec_cap1->proto, &sec_cap2->proto, sizeof(sec_cap1->proto))
+
 static int
-update_scheduler_capability(struct scheduler_ctx *sched_ctx)
+check_sec_cap_equal(const struct rte_security_capability *sec_cap1,
+               struct rte_security_capability *sec_cap2)
+{
+       if (sec_cap1->action != sec_cap2->action ||
+                       sec_cap1->protocol != sec_cap2->protocol ||
+                       sec_cap1->ol_flags != sec_cap2->ol_flags)
+               return 0;
+
+       if (sec_cap1->protocol == RTE_SECURITY_PROTOCOL_IPSEC)
+               return !CMP_SEC_CAP_PROTO(ipsec);
+       else if (sec_cap1->protocol == RTE_SECURITY_PROTOCOL_MACSEC)
+               return !CMP_SEC_CAP_PROTO(macsec);
+       else if (sec_cap1->protocol == RTE_SECURITY_PROTOCOL_PDCP)
+               return !CMP_SEC_CAP_PROTO(pdcp);
+       else if (sec_cap1->protocol == RTE_SECURITY_PROTOCOL_DOCSIS)
+               return !CMP_SEC_CAP_PROTO(docsis);
+       else
+               return 0;
+}
+
+#define SET_SEC_CAP_PROTO(proto) (dst_sec_cap->proto = src_sec_cap->proto)
+
+static void
+copy_sec_cap(struct rte_security_capability *dst_sec_cap,
+               struct rte_security_capability *src_sec_cap)
+{
+       dst_sec_cap->action = src_sec_cap->action;
+       dst_sec_cap->protocol = src_sec_cap->protocol;
+       if (src_sec_cap->protocol == RTE_SECURITY_PROTOCOL_IPSEC)
+               SET_SEC_CAP_PROTO(ipsec);
+       else if (src_sec_cap->protocol == RTE_SECURITY_PROTOCOL_MACSEC)
+               SET_SEC_CAP_PROTO(macsec);
+       else if (src_sec_cap->protocol == RTE_SECURITY_PROTOCOL_PDCP)
+               SET_SEC_CAP_PROTO(pdcp);
+       else if (src_sec_cap->protocol == RTE_SECURITY_PROTOCOL_DOCSIS)
+               SET_SEC_CAP_PROTO(docsis);
+       dst_sec_cap->ol_flags = src_sec_cap->ol_flags;
+}
+
+static uint32_t
+sync_sec_crypto_caps(struct rte_cryptodev_capabilities *tmp_sec_crypto_caps,
+               const struct rte_cryptodev_capabilities *sec_crypto_caps,
+               const struct rte_cryptodev_capabilities *worker_sec_crypto_caps)
+{
+       uint8_t nb_caps = 0;
+
+       nb_caps = sync_caps(tmp_sec_crypto_caps, nb_caps, sec_crypto_caps);
+       sync_caps(tmp_sec_crypto_caps, nb_caps, worker_sec_crypto_caps);
+
+       return nb_caps;
+}
+
+/** update the scheduler pmd's security capability with attaching device's
+ *  security capability.
+ *  For each device to be attached, the scheduler's security capability should
+ *  be the common capability set of all workers
+ **/
+static uint32_t
+sync_sec_caps(struct rte_security_capability *sec_caps,
+               struct rte_cryptodev_capabilities sec_crypto_caps[][MAX_CAPS],
+               uint32_t nb_sec_caps,
+               const struct rte_security_capability *worker_sec_caps)
 {
-       struct rte_cryptodev_capabilities tmp_caps[256] = { {0} };
-       uint32_t nb_caps = 0, i;
+       uint32_t nb_worker_sec_caps = 0, i;
+
+       if (worker_sec_caps == NULL)
+               return 0;
+
+       while (worker_sec_caps[nb_worker_sec_caps].action !=
+                                       RTE_SECURITY_ACTION_TYPE_NONE)
+               nb_worker_sec_caps++;
+
+       /* Handle first worker */
+       if (nb_sec_caps == 0) {
+               uint32_t nb_worker_sec_crypto_caps = 0;
 
-       if (sched_ctx->capabilities) {
-               rte_free(sched_ctx->capabilities);
-               sched_ctx->capabilities = NULL;
+               rte_memcpy(sec_caps, worker_sec_caps,
+                               sizeof(*sec_caps) * nb_worker_sec_caps);
+
+               for (i = 0; i < nb_worker_sec_caps; i++) {
+                       while (worker_sec_caps[i].crypto_capabilities[
+                                       nb_worker_sec_crypto_caps].op !=
+                                               RTE_CRYPTO_OP_TYPE_UNDEFINED)
+                               nb_worker_sec_crypto_caps++;
+
+                       rte_memcpy(&sec_crypto_caps[i][0],
+                               &worker_sec_caps[i].crypto_capabilities[0],
+                               sizeof(sec_crypto_caps[i][0]) *
+                                       nb_worker_sec_crypto_caps);
+               }
+               return nb_worker_sec_caps;
        }
 
-       for (i = 0; i < sched_ctx->nb_workers; i++) {
-               struct rte_cryptodev_info dev_info;
+       for (i = 0; i < nb_sec_caps; i++) {
+               struct rte_security_capability *sec_cap = &sec_caps[i];
+               uint32_t j;
+
+               for (j = 0; j < nb_worker_sec_caps; j++) {
+                       struct rte_cryptodev_capabilities
+                                       tmp_sec_crypto_caps[MAX_CAPS] = { {0} };
+                       uint32_t nb_sec_crypto_caps = 0;
+                       const struct rte_security_capability *worker_sec_cap =
+                                                               
&worker_sec_caps[j];
+
+                       if (!check_sec_cap_equal(worker_sec_cap, sec_cap))
+                               continue;
 
+                       /* Sync the crypto caps of the common security cap */
+                       nb_sec_crypto_caps = sync_sec_crypto_caps(
+                                               tmp_sec_crypto_caps,
+                                               &sec_crypto_caps[i][0],
+                                               
&worker_sec_cap->crypto_capabilities[0]);
+
+                       memset(&sec_crypto_caps[i][0], 0,
+                                       sizeof(*&sec_crypto_caps[i][0]) *
+                                               MAX_CAPS);
+
+                       rte_memcpy(&sec_crypto_caps[i][0],
+                                       &tmp_sec_crypto_caps[0],
+                                       sizeof(*&sec_crypto_caps[i][0]) *
+                                               nb_sec_crypto_caps);
+
+                       /* No common cap found */
+                       break;
+               }
+
+               if (j < nb_worker_sec_caps)
+                       continue;
+
+               /*
+                * Remove an uncommon security cap, and it's associated crypto
+                * caps, from the arrays
+                */
+               for (j = i; j < nb_sec_caps - 1; j++) {
+                       rte_memcpy(&sec_caps[j], &sec_caps[j+1],
+                                       sizeof(*sec_cap));
+
+                       rte_memcpy(&sec_crypto_caps[j][0],
+                                       &sec_crypto_caps[j+1][0],
+                                       sizeof(*&sec_crypto_caps[j][0]) *
+                                               MAX_CAPS);
+               }
+               memset(&sec_caps[nb_sec_caps - 1], 0, sizeof(*sec_cap));
+               memset(&sec_crypto_caps[nb_sec_caps - 1][0], 0,
+                       sizeof(*&sec_crypto_caps[nb_sec_caps - 1][0]) *
+                               MAX_CAPS);
+               nb_sec_caps--;
+               i--;
+       }
+
+       return nb_sec_caps;
+}
+
+static int
+update_scheduler_capability(struct scheduler_ctx *sched_ctx)
+{
+       struct rte_cryptodev_capabilities tmp_caps[MAX_CAPS] = { {0} };
+       struct rte_security_capability tmp_sec_caps[MAX_CAPS] = { {0} };
+       struct rte_cryptodev_capabilities
+               tmp_sec_crypto_caps[MAX_CAPS][MAX_CAPS] = { {{0}} };
+       uint32_t nb_caps = 0, nb_sec_caps = 0, i;
+       struct rte_cryptodev_info dev_info;
+
+       /* Free any previously allocated capability memory */
+       scheduler_free_capabilities(sched_ctx);
+
+       /* Determine the new cryptodev capabilities for the scheduler */
+       for (i = 0; i < sched_ctx->nb_workers; i++) {
                rte_cryptodev_info_get(sched_ctx->workers[i].dev_id, &dev_info);
 
                nb_caps = sync_caps(tmp_caps, nb_caps, dev_info.capabilities);
@@ -116,6 +276,54 @@ update_scheduler_capability(struct scheduler_ctx 
*sched_ctx)
        rte_memcpy(sched_ctx->capabilities, tmp_caps,
                        sizeof(struct rte_cryptodev_capabilities) * nb_caps);
 
+       /* Determine the new security capabilities for the scheduler */
+       for (i = 0; i < sched_ctx->nb_workers; i++) {
+               struct rte_cryptodev *dev =
+                               &rte_cryptodevs[sched_ctx->workers[i].dev_id];
+               struct rte_security_ctx *sec_ctx = dev->security_ctx;
+
+               nb_sec_caps = sync_sec_caps(tmp_sec_caps, tmp_sec_crypto_caps,
+                       nb_sec_caps, rte_security_capabilities_get(sec_ctx));
+       }
+
+       sched_ctx->sec_capabilities = rte_zmalloc_socket(NULL,
+                                       sizeof(struct rte_security_capability) *
+                                       (nb_sec_caps + 1), 0, SOCKET_ID_ANY);
+       if (!sched_ctx->sec_capabilities)
+               return -ENOMEM;
+
+       sched_ctx->sec_crypto_capabilities = rte_zmalloc_socket(NULL,
+                               sizeof(struct rte_cryptodev_capabilities *) *
+                               (nb_sec_caps + 1),
+                               0, SOCKET_ID_ANY);
+       if (!sched_ctx->sec_crypto_capabilities)
+               return -ENOMEM;
+
+       for (i = 0; i < nb_sec_caps; i++) {
+               uint16_t nb_sec_crypto_caps = 0;
+
+               copy_sec_cap(&sched_ctx->sec_capabilities[i], &tmp_sec_caps[i]);
+
+               while (tmp_sec_crypto_caps[i][nb_sec_crypto_caps].op !=
+                                               RTE_CRYPTO_OP_TYPE_UNDEFINED)
+                       nb_sec_crypto_caps++;
+
+               sched_ctx->sec_crypto_capabilities[i] =
+                       rte_zmalloc_socket(NULL,
+                               sizeof(struct rte_cryptodev_capabilities) *
+                               (nb_sec_crypto_caps + 1), 0, SOCKET_ID_ANY);
+               if (!sched_ctx->sec_crypto_capabilities[i])
+                       return -ENOMEM;
+
+               rte_memcpy(sched_ctx->sec_crypto_capabilities[i],
+                               &tmp_sec_crypto_caps[i][0],
+                               sizeof(struct rte_cryptodev_capabilities)
+                                       * nb_sec_crypto_caps);
+
+               sched_ctx->sec_capabilities[i].crypto_capabilities =
+                               sched_ctx->sec_crypto_capabilities[i];
+       }
+
        return 0;
 }
 
@@ -205,6 +413,7 @@ rte_cryptodev_scheduler_worker_attach(uint8_t scheduler_id, 
uint8_t worker_id)
        sched_ctx->nb_workers++;
 
        if (update_scheduler_capability(sched_ctx) < 0) {
+               scheduler_free_capabilities(sched_ctx);
                worker->dev_id = 0;
                worker->driver_id = 0;
                sched_ctx->nb_workers--;
@@ -266,6 +475,7 @@ rte_cryptodev_scheduler_worker_detach(uint8_t scheduler_id, 
uint8_t worker_id)
        sched_ctx->nb_workers--;
 
        if (update_scheduler_capability(sched_ctx) < 0) {
+               scheduler_free_capabilities(sched_ctx);
                CR_SCHED_LOG(ERR, "capabilities update failed");
                return -ENOTSUP;
        }
diff --git a/drivers/crypto/scheduler/scheduler_failover.c 
b/drivers/crypto/scheduler/scheduler_failover.c
index f24d2fc44b..52ff2ffbb7 100644
--- a/drivers/crypto/scheduler/scheduler_failover.c
+++ b/drivers/crypto/scheduler/scheduler_failover.c
@@ -28,7 +28,7 @@ failover_worker_enqueue(struct scheduler_worker *worker,
 {
        uint16_t processed_ops;
 
-       scheduler_set_worker_session(ops, nb_ops, index);
+       scheduler_set_worker_sessions(ops, nb_ops, index);
 
        processed_ops = rte_cryptodev_enqueue_burst(worker->dev_id,
                        worker->qp_id, ops, nb_ops);
@@ -51,7 +51,7 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, 
uint16_t nb_ops)
                        ops, nb_ops, PRIMARY_WORKER_IDX);
 
        if (enqueued_ops < nb_ops) {
-               scheduler_retrieve_session(&ops[enqueued_ops],
+               scheduler_retrieve_sessions(&ops[enqueued_ops],
                                                nb_ops - enqueued_ops);
                enqueued_ops += failover_worker_enqueue(
                                &qp_ctx->secondary_worker,
@@ -59,7 +59,7 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, 
uint16_t nb_ops)
                                nb_ops - enqueued_ops,
                                SECONDARY_WORKER_IDX);
                if (enqueued_ops < nb_ops)
-                       scheduler_retrieve_session(&ops[enqueued_ops],
+                       scheduler_retrieve_sessions(&ops[enqueued_ops],
                                                nb_ops - enqueued_ops);
        }
 
@@ -102,7 +102,7 @@ schedule_dequeue(void *qp, struct rte_crypto_op **ops, 
uint16_t nb_ops)
        qp_ctx->deq_idx = (~qp_ctx->deq_idx) & WORKER_SWITCH_MASK;
 
        if (nb_deq_ops == nb_ops)
-               goto retrieve_session;
+               goto retrieve_sessions;
 
        worker = workers[qp_ctx->deq_idx];
 
@@ -112,8 +112,8 @@ schedule_dequeue(void *qp, struct rte_crypto_op **ops, 
uint16_t nb_ops)
                worker->nb_inflight_cops -= nb_deq_ops2;
        }
 
-retrieve_session:
-       scheduler_retrieve_session(ops, nb_deq_ops + nb_deq_ops2);
+retrieve_sessions:
+       scheduler_retrieve_sessions(ops, nb_deq_ops + nb_deq_ops2);
 
        return nb_deq_ops + nb_deq_ops2;
 }
diff --git a/drivers/crypto/scheduler/scheduler_multicore.c 
b/drivers/crypto/scheduler/scheduler_multicore.c
index 3dea850661..a21b522f9f 100644
--- a/drivers/crypto/scheduler/scheduler_multicore.c
+++ b/drivers/crypto/scheduler/scheduler_multicore.c
@@ -183,7 +183,7 @@ mc_scheduler_worker(struct rte_cryptodev *dev)
 
        while (!mc_ctx->stop_signal) {
                if (pending_enq_ops) {
-                       scheduler_set_worker_session(
+                       scheduler_set_worker_sessions(
                                &enq_ops[pending_enq_ops_idx], pending_enq_ops,
                                worker_idx);
                        processed_ops =
@@ -192,7 +192,7 @@ mc_scheduler_worker(struct rte_cryptodev *dev)
                                        &enq_ops[pending_enq_ops_idx],
                                        pending_enq_ops);
                        if (processed_ops < pending_deq_ops)
-                               scheduler_retrieve_session(
+                               scheduler_retrieve_sessions(
                                        &enq_ops[pending_enq_ops_idx +
                                                processed_ops],
                                        pending_deq_ops - processed_ops);
@@ -203,13 +203,13 @@ mc_scheduler_worker(struct rte_cryptodev *dev)
                        processed_ops = rte_ring_dequeue_burst(enq_ring, (void 
*)enq_ops,
                                                        MC_SCHED_BUFFER_SIZE, 
NULL);
                        if (processed_ops) {
-                               scheduler_set_worker_session(enq_ops,
+                               scheduler_set_worker_sessions(enq_ops,
                                        processed_ops, worker_idx);
                                pending_enq_ops_idx = 
rte_cryptodev_enqueue_burst(
                                                worker->dev_id, worker->qp_id,
                                                enq_ops, processed_ops);
                                if (pending_enq_ops_idx < processed_ops)
-                                       scheduler_retrieve_session(
+                                       scheduler_retrieve_sessions(
                                                enq_ops + pending_enq_ops_idx,
                                                processed_ops -
                                                pending_enq_ops_idx);
@@ -229,7 +229,7 @@ mc_scheduler_worker(struct rte_cryptodev *dev)
                                        worker->dev_id, worker->qp_id, deq_ops,
                                        MC_SCHED_BUFFER_SIZE);
                        if (processed_ops) {
-                               scheduler_retrieve_session(deq_ops,
+                               scheduler_retrieve_sessions(deq_ops,
                                        processed_ops);
                                inflight_ops -= processed_ops;
                                if (reordering_enabled) {
diff --git a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c 
b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
index 0c51fff930..30bb5ce0e2 100644
--- a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
+++ b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
@@ -59,7 +59,6 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, 
uint16_t nb_ops)
        }
 
        for (i = 0; (i < (nb_ops - 8)) && (nb_ops > 8); i += 4) {
-               struct scheduler_session_ctx *sess_ctx[4];
                uint8_t target[4];
                uint32_t job_len[4];
 
@@ -76,17 +75,7 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, 
uint16_t nb_ops)
                rte_prefetch0((uint8_t *)ops[i + 7]->sym->session +
                        sizeof(struct rte_cryptodev_sym_session));
 
-               sess_ctx[0] = CRYPTODEV_GET_SYM_SESS_PRIV(ops[i]->sym->session);
-               sess_ctx[1] = CRYPTODEV_GET_SYM_SESS_PRIV(ops[i + 
1]->sym->session);
-               sess_ctx[2] = CRYPTODEV_GET_SYM_SESS_PRIV(ops[i + 
2]->sym->session);
-               sess_ctx[3] = CRYPTODEV_GET_SYM_SESS_PRIV(ops[i + 
3]->sym->session);
-
-               /* job_len is initialized as cipher data length, once
-                * it is 0, equals to auth data length
-                */
-               job_len[0] = ops[i]->sym->cipher.data.length;
-               job_len[0] += (ops[i]->sym->cipher.data.length == 0) *
-                               ops[i]->sym->auth.data.length;
+               job_len[0] = scheduler_get_job_len(ops[i]);
                /* decide the target op based on the job length */
                target[0] = !(job_len[0] & psd_qp_ctx->threshold);
                p_enq_op = &enq_ops[target[0]];
@@ -100,15 +89,11 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, 
uint16_t nb_ops)
                        break;
                }
 
-               if (ops[i]->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
-                       ops[i]->sym->session =
-                               sess_ctx[0]->worker_sess[target[0]];
+               scheduler_set_single_worker_session(ops[i], target[0]);
                sched_ops[p_enq_op->worker_idx][p_enq_op->pos] = ops[i];
                p_enq_op->pos++;
 
-               job_len[1] = ops[i + 1]->sym->cipher.data.length;
-               job_len[1] += (ops[i + 1]->sym->cipher.data.length == 0) *
-                               ops[i+1]->sym->auth.data.length;
+               job_len[1] = scheduler_get_job_len(ops[i + 1]);
                target[1] = !(job_len[1] & psd_qp_ctx->threshold);
                p_enq_op = &enq_ops[target[1]];
 
@@ -118,15 +103,11 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, 
uint16_t nb_ops)
                        break;
                }
 
-               if (ops[i + 1]->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
-                       ops[i + 1]->sym->session =
-                               sess_ctx[1]->worker_sess[target[1]];
+               scheduler_set_single_worker_session(ops[i + 1], target[1]);
                sched_ops[p_enq_op->worker_idx][p_enq_op->pos] = ops[i+1];
                p_enq_op->pos++;
 
-               job_len[2] = ops[i + 2]->sym->cipher.data.length;
-               job_len[2] += (ops[i + 2]->sym->cipher.data.length == 0) *
-                               ops[i + 2]->sym->auth.data.length;
+               job_len[2] = scheduler_get_job_len(ops[i + 2]);
                target[2] = !(job_len[2] & psd_qp_ctx->threshold);
                p_enq_op = &enq_ops[target[2]];
 
@@ -136,15 +117,11 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, 
uint16_t nb_ops)
                        break;
                }
 
-               if (ops[i + 2]->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
-                       ops[i + 2]->sym->session =
-                               sess_ctx[2]->worker_sess[target[2]];
+               scheduler_set_single_worker_session(ops[i + 2], target[2]);
                sched_ops[p_enq_op->worker_idx][p_enq_op->pos] = ops[i+2];
                p_enq_op->pos++;
 
-               job_len[3] = ops[i + 3]->sym->cipher.data.length;
-               job_len[3] += (ops[i + 3]->sym->cipher.data.length == 0) *
-                               ops[i + 3]->sym->auth.data.length;
+               job_len[3] = scheduler_get_job_len(ops[i + 3]);
                target[3] = !(job_len[3] & psd_qp_ctx->threshold);
                p_enq_op = &enq_ops[target[3]];
 
@@ -154,22 +131,16 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, 
uint16_t nb_ops)
                        break;
                }
 
-               if (ops[i + 3]->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
-                       ops[i + 3]->sym->session =
-                               sess_ctx[3]->worker_sess[target[3]];
+               scheduler_set_single_worker_session(ops[i + 3], target[3]);
                sched_ops[p_enq_op->worker_idx][p_enq_op->pos] = ops[i+3];
                p_enq_op->pos++;
        }
 
        for (; i < nb_ops; i++) {
-               struct scheduler_session_ctx *sess_ctx =
-                       CRYPTODEV_GET_SYM_SESS_PRIV(ops[i]->sym->session);
                uint32_t job_len;
                uint8_t target;
 
-               job_len = ops[i]->sym->cipher.data.length;
-               job_len += (ops[i]->sym->cipher.data.length == 0) *
-                               ops[i]->sym->auth.data.length;
+               job_len = scheduler_get_job_len(ops[i]);
                target = !(job_len & psd_qp_ctx->threshold);
                p_enq_op = &enq_ops[target];
 
@@ -179,8 +150,7 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, 
uint16_t nb_ops)
                        break;
                }
 
-               if (ops[i]->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
-                       ops[i]->sym->session = sess_ctx->worker_sess[target];
+               scheduler_set_single_worker_session(ops[i], target);
                sched_ops[p_enq_op->worker_idx][p_enq_op->pos] = ops[i];
                p_enq_op->pos++;
        }
@@ -236,7 +206,7 @@ schedule_dequeue(void *qp, struct rte_crypto_op **ops, 
uint16_t nb_ops)
        if (worker->nb_inflight_cops) {
                nb_deq_ops_pri = rte_cryptodev_dequeue_burst(worker->dev_id,
                        worker->qp_id, ops, nb_ops);
-               scheduler_retrieve_session(ops, nb_deq_ops_pri);
+               scheduler_retrieve_sessions(ops, nb_deq_ops_pri);
                worker->nb_inflight_cops -= nb_deq_ops_pri;
        }
 
@@ -251,7 +221,7 @@ schedule_dequeue(void *qp, struct rte_crypto_op **ops, 
uint16_t nb_ops)
                nb_deq_ops_sec = rte_cryptodev_dequeue_burst(worker->dev_id,
                                worker->qp_id, &ops[nb_deq_ops_pri],
                                nb_ops - nb_deq_ops_pri);
-               scheduler_retrieve_session(&ops[nb_deq_ops_pri], 
nb_deq_ops_sec);
+               scheduler_retrieve_sessions(&ops[nb_deq_ops_pri], 
nb_deq_ops_sec);
                worker->nb_inflight_cops -= nb_deq_ops_sec;
 
                if (!worker->nb_inflight_cops)
diff --git a/drivers/crypto/scheduler/scheduler_pmd.c 
b/drivers/crypto/scheduler/scheduler_pmd.c
index 4e8bbf0e09..f641afaf89 100644
--- a/drivers/crypto/scheduler/scheduler_pmd.c
+++ b/drivers/crypto/scheduler/scheduler_pmd.c
@@ -233,6 +233,35 @@ cryptodev_scheduler_create(const char *name,
                return -ENOMEM;
        }
 
+       struct rte_security_ctx *security_instance;
+       security_instance = rte_zmalloc_socket(NULL,
+                                       sizeof(struct rte_security_ctx),
+                                       RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
+       if (security_instance == NULL) {
+               CR_SCHED_LOG(ERR, "rte_security_ctx memory alloc failed");
+               return -ENOMEM;
+       }
+
+       security_instance->device = (void *)dev;
+       security_instance->ops = rte_crypto_scheduler_pmd_sec_ops;
+       security_instance->sess_cnt = 0;
+       dev->security_ctx = security_instance;
+
+       /*
+        * Initialize security capabilities structure as an empty structure,
+        * in case device information is requested when no workers are attached
+        */
+       sched_ctx->sec_capabilities = rte_zmalloc_socket(NULL,
+                                       sizeof(struct rte_security_capability),
+                                       0, SOCKET_ID_ANY);
+
+       if (!sched_ctx->sec_capabilities) {
+               rte_free(security_instance);
+               CR_SCHED_LOG(ERR, "Not enough memory for security capability "
+                               "information");
+               return -ENOMEM;
+       }
+
        rte_cryptodev_pmd_probing_finish(dev);
 
        return 0;
@@ -263,6 +292,9 @@ cryptodev_scheduler_remove(struct rte_vdev_device *vdev)
                                        sched_ctx->workers[i].dev_id);
        }
 
+       rte_free(dev->security_ctx);
+       dev->security_ctx = NULL;
+
        return rte_cryptodev_pmd_destroy(dev);
 }
 
diff --git a/drivers/crypto/scheduler/scheduler_pmd_ops.c 
b/drivers/crypto/scheduler/scheduler_pmd_ops.c
index 294aab4452..cc35196e7a 100644
--- a/drivers/crypto/scheduler/scheduler_pmd_ops.c
+++ b/drivers/crypto/scheduler/scheduler_pmd_ops.c
@@ -13,6 +13,206 @@
 
 #include "scheduler_pmd_private.h"
 
+struct scheduler_configured_sess_info {
+       uint8_t dev_id;
+       uint8_t driver_id;
+       union {
+               struct rte_cryptodev_sym_session *sess;
+               struct {
+                       struct rte_security_session *sec_sess;
+                       struct rte_security_ctx *sec_ctx;
+               };
+       };
+};
+
+static int
+scheduler_session_create(void *sess, void *sess_params,
+               struct scheduler_ctx *sched_ctx,
+               enum rte_crypto_op_sess_type session_type)
+{
+       struct rte_mempool *mp = rte_mempool_from_obj(sess);
+       struct scheduler_session_ctx *sess_ctx;
+       struct scheduler_configured_sess_info configured_sess[
+                       RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS] = {{0}};
+       uint32_t i, j, n_configured_sess = 0;
+       int ret = 0;
+
+       if (session_type == RTE_CRYPTO_OP_WITH_SESSION)
+               sess_ctx = CRYPTODEV_GET_SYM_SESS_PRIV(
+                               (struct rte_cryptodev_sym_session *)sess);
+       else
+               sess_ctx = SECURITY_GET_SESS_PRIV(
+                               (struct rte_security_session *)sess);
+
+       if (mp == NULL)
+               return -EINVAL;
+
+       for (i = 0; i < sched_ctx->nb_workers; i++) {
+               struct scheduler_worker *worker = &sched_ctx->workers[i];
+               struct rte_cryptodev *dev = &rte_cryptodevs[worker->dev_id];
+               uint8_t next_worker = 0;
+
+               for (j = 0; j < n_configured_sess; j++) {
+                       if (configured_sess[j].driver_id == worker->driver_id) {
+                               if (session_type == RTE_CRYPTO_OP_WITH_SESSION)
+                                       sess_ctx->worker_sess[i] =
+                                               configured_sess[j].sess;
+                               else
+                                       sess_ctx->worker_sec_sess[i] =
+                                               configured_sess[j].sec_sess;
+
+                               next_worker = 1;
+                               break;
+                       }
+               }
+               if (next_worker)
+                       continue;
+
+               if (rte_mempool_avail_count(mp) == 0) {
+                       ret = -ENOMEM;
+                       goto error_exit;
+               }
+
+               if (session_type == RTE_CRYPTO_OP_WITH_SESSION) {
+                       struct rte_cryptodev_sym_session *worker_sess =
+                               rte_cryptodev_sym_session_create(worker->dev_id,
+                                               (struct rte_crypto_sym_xform *)
+                                               sess_params, mp);
+
+                       if (worker_sess == NULL) {
+                               ret = -rte_errno;
+                               goto error_exit;
+                       }
+
+                       worker_sess->opaque_data = (uint64_t)sess;
+                       sess_ctx->worker_sess[i] = worker_sess;
+                       configured_sess[n_configured_sess].sess = worker_sess;
+               } else {
+                       struct rte_security_session *worker_sess =
+                               rte_security_session_create(dev->security_ctx,
+                                       (struct rte_security_session_conf *)
+                                       sess_params, mp);
+
+                       if (worker_sess == NULL) {
+                               ret = -rte_errno;
+                               goto error_exit;
+                       }
+
+                       worker_sess->opaque_data = (uint64_t)sess;
+                       sess_ctx->worker_sec_sess[i] = worker_sess;
+                       configured_sess[n_configured_sess].sec_sess =
+                                                       worker_sess;
+                       configured_sess[n_configured_sess].sec_ctx =
+                                                       dev->security_ctx;
+               }
+
+               configured_sess[n_configured_sess].driver_id =
+                                                       worker->driver_id;
+               configured_sess[n_configured_sess].dev_id = worker->dev_id;
+               n_configured_sess++;
+       }
+
+       return 0;
+
+error_exit:
+       sess_ctx->ref_cnt = sched_ctx->ref_cnt;
+       for (i = 0; i < n_configured_sess; i++) {
+               if (session_type == RTE_CRYPTO_OP_WITH_SESSION)
+                       rte_cryptodev_sym_session_free(
+                                               configured_sess[i].dev_id,
+                                               configured_sess[i].sess);
+               else
+                       rte_security_session_destroy(
+                                               configured_sess[i].sec_ctx,
+                                               configured_sess[i].sec_sess);
+       }
+
+       return ret;
+}
+
+static void
+scheduler_session_destroy(void *sess, struct scheduler_ctx *sched_ctx,
+               uint8_t session_type)
+{
+       struct scheduler_session_ctx *sess_ctx;
+       struct scheduler_configured_sess_info deleted_sess[
+                       RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS] = {{0}};
+       uint32_t i, j, n_deleted_sess = 0;
+
+       if (session_type == RTE_CRYPTO_OP_WITH_SESSION)
+               sess_ctx = CRYPTODEV_GET_SYM_SESS_PRIV(
+                               (struct rte_cryptodev_sym_session *)sess);
+       else
+               sess_ctx = SECURITY_GET_SESS_PRIV(
+                               (struct rte_security_session *)sess);
+
+       if (sched_ctx->ref_cnt != sess_ctx->ref_cnt) {
+               CR_SCHED_LOG(WARNING,
+                       "Worker updated between session creation/deletion. "
+                       "The session may not be freed fully.");
+       }
+
+       for (i = 0; i < sched_ctx->nb_workers; i++) {
+               struct scheduler_worker *worker = &sched_ctx->workers[i];
+               struct rte_cryptodev *dev = &rte_cryptodevs[worker->dev_id];
+               uint8_t next_worker = 0;
+
+               for (j = 0; j < n_deleted_sess; j++) {
+                       if (deleted_sess[j].driver_id == worker->driver_id) {
+                               if (session_type == RTE_CRYPTO_OP_WITH_SESSION)
+                                       sess_ctx->worker_sess[i] = NULL;
+                               else
+                                       sess_ctx->worker_sec_sess[i] = NULL;
+
+                               next_worker = 1;
+                               break;
+                       }
+               }
+               if (next_worker)
+                       continue;
+
+               if (session_type == RTE_CRYPTO_OP_WITH_SESSION) {
+                       rte_cryptodev_sym_session_free(worker->dev_id,
+                                               sess_ctx->worker_sess[i]);
+                       sess_ctx->worker_sess[i] = NULL;
+               } else {
+                       rte_security_session_destroy(dev->security_ctx,
+                                               sess_ctx->worker_sec_sess[i]);
+                       sess_ctx->worker_sec_sess[i] = NULL;
+               }
+
+               deleted_sess[n_deleted_sess++].driver_id = worker->driver_id;
+       }
+}
+
+static unsigned int
+scheduler_session_size_get(struct scheduler_ctx *sched_ctx,
+               uint8_t session_type)
+{
+       uint8_t i = 0;
+       uint32_t max_priv_sess_size = 0;
+
+       /* Check what is the maximum private session size for all workers */
+       for (i = 0; i < sched_ctx->nb_workers; i++) {
+               uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
+               struct rte_cryptodev *dev = &rte_cryptodevs[worker_dev_id];
+               struct rte_security_ctx *sec_ctx = dev->security_ctx;
+               uint32_t priv_sess_size = 0;
+
+               if (session_type == RTE_CRYPTO_OP_WITH_SESSION) {
+                       priv_sess_size =
+                               (*dev->dev_ops->sym_session_get_size)(dev);
+               } else {
+                       priv_sess_size = (*sec_ctx->ops->session_get_size)(dev);
+               }
+
+               if (max_priv_sess_size < priv_sess_size)
+                       max_priv_sess_size = priv_sess_size;
+       }
+
+       return max_priv_sess_size;
+}
+
 /** attaching the workers predefined by scheduler's EAL options */
 static int
 scheduler_attach_init_worker(struct rte_cryptodev *dev)
@@ -265,10 +465,7 @@ scheduler_pmd_close(struct rte_cryptodev *dev)
                sched_ctx->private_ctx = NULL;
        }
 
-       if (sched_ctx->capabilities) {
-               rte_free(sched_ctx->capabilities);
-               sched_ctx->capabilities = NULL;
-       }
+       scheduler_free_capabilities(sched_ctx);
 
        return 0;
 }
@@ -451,92 +648,22 @@ scheduler_pmd_qp_setup(struct rte_cryptodev *dev, 
uint16_t qp_id,
 }
 
 static uint32_t
-scheduler_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
+scheduler_pmd_sym_session_get_size(struct rte_cryptodev *dev)
 {
        struct scheduler_ctx *sched_ctx = dev->data->dev_private;
-       uint8_t i = 0;
-       uint32_t max_priv_sess_size = 0;
-
-       /* Check what is the maximum private session size for all workers */
-       for (i = 0; i < sched_ctx->nb_workers; i++) {
-               uint8_t worker_dev_id = sched_ctx->workers[i].dev_id;
-               struct rte_cryptodev *dev = &rte_cryptodevs[worker_dev_id];
-               uint32_t priv_sess_size = 
(*dev->dev_ops->sym_session_get_size)(dev);
 
-               if (max_priv_sess_size < priv_sess_size)
-                       max_priv_sess_size = priv_sess_size;
-       }
-
-       return max_priv_sess_size;
+       return scheduler_session_size_get(sched_ctx, 
RTE_CRYPTO_OP_WITH_SESSION);
 }
 
-struct scheduler_configured_sess_info {
-       uint8_t dev_id;
-       uint8_t driver_id;
-       struct rte_cryptodev_sym_session *sess;
-};
-
 static int
 scheduler_pmd_sym_session_configure(struct rte_cryptodev *dev,
        struct rte_crypto_sym_xform *xform,
        struct rte_cryptodev_sym_session *sess)
 {
        struct scheduler_ctx *sched_ctx = dev->data->dev_private;
-       struct rte_mempool *mp = rte_mempool_from_obj(sess);
-       struct scheduler_session_ctx *sess_ctx = 
CRYPTODEV_GET_SYM_SESS_PRIV(sess);
-       struct scheduler_configured_sess_info configured_sess[
-                       RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS] = {{0}};
-       uint32_t i, j, n_configured_sess = 0;
-       int ret = 0;
-
-       if (mp == NULL)
-               return -EINVAL;
 
-       for (i = 0; i < sched_ctx->nb_workers; i++) {
-               struct scheduler_worker *worker = &sched_ctx->workers[i];
-               struct rte_cryptodev_sym_session *worker_sess;
-               uint8_t next_worker = 0;
-
-               for (j = 0; j < n_configured_sess; j++) {
-                       if (configured_sess[j].driver_id ==
-                                       worker->driver_id) {
-                               sess_ctx->worker_sess[i] =
-                                       configured_sess[j].sess;
-                               next_worker = 1;
-                               break;
-                       }
-               }
-               if (next_worker)
-                       continue;
-
-               if (rte_mempool_avail_count(mp) == 0) {
-                       ret = -ENOMEM;
-                       goto error_exit;
-               }
-
-               worker_sess = rte_cryptodev_sym_session_create(worker->dev_id,
-                       xform, mp);
-               if (worker_sess == NULL) {
-                       ret = -rte_errno;
-                       goto error_exit;
-               }
-
-               worker_sess->opaque_data = (uint64_t)sess;
-               sess_ctx->worker_sess[i] = worker_sess;
-               configured_sess[n_configured_sess].driver_id =
-                       worker->driver_id;
-               configured_sess[n_configured_sess].dev_id = worker->dev_id;
-               configured_sess[n_configured_sess].sess = worker_sess;
-               n_configured_sess++;
-       }
-
-       return 0;
-error_exit:
-       sess_ctx->ref_cnt = sched_ctx->ref_cnt;
-       for (i = 0; i < n_configured_sess; i++)
-               rte_cryptodev_sym_session_free(configured_sess[i].dev_id,
-                       configured_sess[i].sess);
-       return ret;
+       return scheduler_session_create((void *)sess, (void *)xform, sched_ctx,
+                               RTE_CRYPTO_OP_WITH_SESSION);
 }
 
 /** Clear the memory of session so it doesn't leave key material behind */
@@ -545,37 +672,9 @@ scheduler_pmd_sym_session_clear(struct rte_cryptodev *dev,
                struct rte_cryptodev_sym_session *sess)
 {
        struct scheduler_ctx *sched_ctx = dev->data->dev_private;
-       struct scheduler_session_ctx *sess_ctx = 
CRYPTODEV_GET_SYM_SESS_PRIV(sess);
-       struct scheduler_configured_sess_info deleted_sess[
-                       RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS] = {{0}};
-       uint32_t i, j, n_deleted_sess = 0;
-
-       if (sched_ctx->ref_cnt != sess_ctx->ref_cnt) {
-               CR_SCHED_LOG(WARNING,
-                       "Worker updated between session creation/deletion. "
-                       "The session may not be freed fully.");
-       }
-
-       for (i = 0; i < sched_ctx->nb_workers; i++) {
-               struct scheduler_worker *worker = &sched_ctx->workers[i];
-               uint8_t next_worker = 0;
-
-               for (j = 0; j < n_deleted_sess; j++) {
-                       if (deleted_sess[j].driver_id == worker->driver_id) {
-                               sess_ctx->worker_sess[i] = NULL;
-                               next_worker = 1;
-                               break;
-                       }
-               }
-               if (next_worker)
-                       continue;
-
-               rte_cryptodev_sym_session_free(worker->dev_id,
-                       sess_ctx->worker_sess[i]);
 
-               deleted_sess[n_deleted_sess++].driver_id = worker->driver_id;
-               sess_ctx->worker_sess[i] = NULL;
-       }
+       scheduler_session_destroy((void *)sess, sched_ctx,
+                               RTE_CRYPTO_OP_WITH_SESSION);
 }
 
 static struct rte_cryptodev_ops scheduler_pmd_ops = {
@@ -598,3 +697,62 @@ static struct rte_cryptodev_ops scheduler_pmd_ops = {
 };
 
 struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops = &scheduler_pmd_ops;
+
+/** Configure a scheduler session from a security session configuration */
+static int
+scheduler_pmd_sec_sess_create(void *dev, struct rte_security_session_conf 
*conf,
+                       struct rte_security_session *sess)
+{
+       struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
+       struct scheduler_ctx *sched_ctx = cdev->data->dev_private;
+
+       return scheduler_session_create((void *)sess, (void *)conf, sched_ctx,
+                               RTE_CRYPTO_OP_SECURITY_SESSION);
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static int
+scheduler_pmd_sec_sess_destroy(void *dev,
+                              struct rte_security_session *sess)
+{
+       struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
+       struct scheduler_ctx *sched_ctx = cdev->data->dev_private;
+
+       scheduler_session_destroy((void *)sess, sched_ctx,
+                               RTE_CRYPTO_OP_SECURITY_SESSION);
+
+       return 0;
+}
+
+/** Get sync security capabilities for scheduler pmds */
+static const struct rte_security_capability *
+scheduler_pmd_sec_capa_get(void *dev)
+{
+       struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
+       struct scheduler_ctx *sched_ctx = cdev->data->dev_private;
+
+       return sched_ctx->sec_capabilities;
+}
+
+static unsigned int
+scheduler_pmd_sec_sess_size_get(void *dev)
+{
+       struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
+       struct scheduler_ctx *sched_ctx = cdev->data->dev_private;
+
+       return scheduler_session_size_get(sched_ctx,
+                               RTE_CRYPTO_OP_SECURITY_SESSION);
+}
+
+static struct rte_security_ops scheduler_pmd_sec_ops = {
+               .session_create = scheduler_pmd_sec_sess_create,
+               .session_update = NULL,
+               .session_get_size = scheduler_pmd_sec_sess_size_get,
+               .session_stats_get = NULL,
+               .session_destroy = scheduler_pmd_sec_sess_destroy,
+               .set_pkt_metadata = NULL,
+               .capabilities_get = scheduler_pmd_sec_capa_get
+};
+
+struct rte_security_ops *rte_crypto_scheduler_pmd_sec_ops =
+                                                       &scheduler_pmd_sec_ops;
diff --git a/drivers/crypto/scheduler/scheduler_pmd_private.h 
b/drivers/crypto/scheduler/scheduler_pmd_private.h
index 36d0bb6307..fadab80425 100644
--- a/drivers/crypto/scheduler/scheduler_pmd_private.h
+++ b/drivers/crypto/scheduler/scheduler_pmd_private.h
@@ -6,6 +6,8 @@
 #define _SCHEDULER_PMD_PRIVATE_H
 
 #include "rte_cryptodev_scheduler.h"
+#include <rte_security.h>
+#include <rte_security_driver.h>
 
 #define CRYPTODEV_NAME_SCHEDULER_PMD   crypto_scheduler
 /**< Scheduler Crypto PMD device name */
@@ -30,7 +32,8 @@ struct scheduler_ctx {
        /**< private scheduler context pointer */
 
        struct rte_cryptodev_capabilities *capabilities;
-       uint32_t nb_capabilities;
+       struct rte_security_capability *sec_capabilities;
+       struct rte_cryptodev_capabilities **sec_crypto_capabilities;
 
        uint32_t max_nb_queue_pairs;
 
@@ -64,8 +67,12 @@ struct scheduler_qp_ctx {
 
 struct scheduler_session_ctx {
        uint32_t ref_cnt;
-       struct rte_cryptodev_sym_session *worker_sess[
-               RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS];
+       union {
+               struct rte_cryptodev_sym_session *worker_sess[
+                       RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS];
+               struct rte_security_session *worker_sec_sess[
+                       RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS];
+       };
 };
 
 extern uint8_t cryptodev_scheduler_driver_id;
@@ -108,7 +115,22 @@ scheduler_order_drain(struct rte_ring *order_ring,
 }
 
 static __rte_always_inline void
-scheduler_set_worker_session(struct rte_crypto_op **ops, uint16_t nb_ops,
+scheduler_set_single_worker_session(struct rte_crypto_op *op,
+               uint8_t worker_idx)
+{
+       if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+               struct scheduler_session_ctx *sess_ctx =
+                               CRYPTODEV_GET_SYM_SESS_PRIV(op->sym->session);
+               op->sym->session = sess_ctx->worker_sess[worker_idx];
+       } else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+               struct scheduler_session_ctx *sess_ctx =
+                               SECURITY_GET_SESS_PRIV(op->sym->session);
+               op->sym->session = sess_ctx->worker_sec_sess[worker_idx];
+       }
+}
+
+static __rte_always_inline void
+scheduler_set_worker_sessions(struct rte_crypto_op **ops, uint16_t nb_ops,
                uint8_t worker_index)
 {
        struct rte_crypto_op **op = ops;
@@ -129,52 +151,34 @@ scheduler_set_worker_session(struct rte_crypto_op **ops, 
uint16_t nb_ops,
                        rte_prefetch0(op[7]->sym->session);
                }
 
-               if (op[0]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
-                       struct scheduler_session_ctx *sess_ctx =
-                               
CRYPTODEV_GET_SYM_SESS_PRIV(op[0]->sym->session);
-                       op[0]->sym->session =
-                               sess_ctx->worker_sess[worker_index];
-               }
-
-               if (op[1]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
-                       struct scheduler_session_ctx *sess_ctx =
-                               
CRYPTODEV_GET_SYM_SESS_PRIV(op[1]->sym->session);
-                       op[1]->sym->session =
-                               sess_ctx->worker_sess[worker_index];
-               }
-
-               if (op[2]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
-                       struct scheduler_session_ctx *sess_ctx =
-                               
CRYPTODEV_GET_SYM_SESS_PRIV(op[2]->sym->session);
-                       op[2]->sym->session =
-                               sess_ctx->worker_sess[worker_index];
-               }
-
-               if (op[3]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
-                       struct scheduler_session_ctx *sess_ctx =
-                               
CRYPTODEV_GET_SYM_SESS_PRIV(op[3]->sym->session);
-                       op[3]->sym->session =
-                               sess_ctx->worker_sess[worker_index];
-               }
+               scheduler_set_single_worker_session(op[0], worker_index);
+               scheduler_set_single_worker_session(op[1], worker_index);
+               scheduler_set_single_worker_session(op[2], worker_index);
+               scheduler_set_single_worker_session(op[3], worker_index);
 
                op += 4;
                n -= 4;
        }
 
        while (n--) {
-               if (op[0]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
-                       struct scheduler_session_ctx *sess_ctx =
-                               
CRYPTODEV_GET_SYM_SESS_PRIV(op[0]->sym->session);
-
-                       op[0]->sym->session =
-                               sess_ctx->worker_sess[worker_index];
-                       op++;
-               }
+               scheduler_set_single_worker_session(op[0], worker_index);
+               op++;
        }
 }
 
 static __rte_always_inline void
-scheduler_retrieve_session(struct rte_crypto_op **ops, uint16_t nb_ops)
+scheduler_retrieve_single_session(struct rte_crypto_op *op)
+{
+       if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
+               op->sym->session = (void *)(uintptr_t)
+                       
rte_cryptodev_sym_session_opaque_data_get(op->sym->session);
+       else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
+               op->sym->session = (void *)(uintptr_t)
+                       rte_security_session_opaque_data_get(op->sym->session);
+}
+
+static __rte_always_inline void
+scheduler_retrieve_sessions(struct rte_crypto_op **ops, uint16_t nb_ops)
 {
        uint16_t n = nb_ops;
        struct rte_crypto_op **op = ops;
@@ -194,32 +198,66 @@ scheduler_retrieve_session(struct rte_crypto_op **ops, 
uint16_t nb_ops)
                        rte_prefetch0(op[7]->sym->session);
                }
 
-               if (op[0]->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
-                       op[0]->sym->session = (void *)(uintptr_t)
-                               
rte_cryptodev_sym_session_opaque_data_get(op[0]->sym->session);
-               if (op[1]->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
-                       op[1]->sym->session = (void *)(uintptr_t)
-                               
rte_cryptodev_sym_session_opaque_data_get(op[1]->sym->session);
-               if (op[2]->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
-                       op[2]->sym->session = (void *)(uintptr_t)
-                               
rte_cryptodev_sym_session_opaque_data_get(op[2]->sym->session);
-               if (op[3]->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
-                       op[3]->sym->session = (void *)(uintptr_t)
-                               
rte_cryptodev_sym_session_opaque_data_get(op[3]->sym->session);
+               scheduler_retrieve_single_session(op[0]);
+               scheduler_retrieve_single_session(op[1]);
+               scheduler_retrieve_single_session(op[2]);
+               scheduler_retrieve_single_session(op[3]);
 
                op += 4;
                n -= 4;
        }
 
        while (n--) {
-               if (op[0]->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
-                       op[0]->sym->session = (void *)(uintptr_t)
-                               
rte_cryptodev_sym_session_opaque_data_get(op[0]->sym->session);
+               scheduler_retrieve_single_session(op[0]);
                op++;
        }
 }
 
+static __rte_always_inline uint32_t
+scheduler_get_job_len(struct rte_crypto_op *op)
+{
+       uint32_t job_len;
+
+       /* op_len is initialized as cipher data length, if
+        * it is 0, then it is set to auth data length
+        */
+       job_len = op->sym->cipher.data.length;
+       job_len += (op->sym->cipher.data.length == 0) *
+                                       op->sym->auth.data.length;
+
+       return job_len;
+}
+
+static __rte_always_inline void
+scheduler_free_capabilities(struct scheduler_ctx *sched_ctx)
+{
+       uint32_t i;
+
+       if (sched_ctx->capabilities) {
+               rte_free(sched_ctx->capabilities);
+               sched_ctx->capabilities = NULL;
+       }
+
+       if (sched_ctx->sec_crypto_capabilities) {
+               i = 0;
+               while (sched_ctx->sec_crypto_capabilities[i] != NULL) {
+                       rte_free(sched_ctx->sec_crypto_capabilities[i]);
+                       sched_ctx->sec_crypto_capabilities[i] = NULL;
+                       i++;
+               }
+
+               rte_free(sched_ctx->sec_crypto_capabilities);
+               sched_ctx->sec_crypto_capabilities = NULL;
+       }
+
+       if (sched_ctx->sec_capabilities) {
+               rte_free(sched_ctx->sec_capabilities);
+               sched_ctx->sec_capabilities = NULL;
+       }
+}
+
 /** device specific operations function pointer structure */
 extern struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops;
+extern struct rte_security_ops *rte_crypto_scheduler_pmd_sec_ops;
 
 #endif /* _SCHEDULER_PMD_PRIVATE_H */
diff --git a/drivers/crypto/scheduler/scheduler_roundrobin.c 
b/drivers/crypto/scheduler/scheduler_roundrobin.c
index ad3f8b842a..08041887a8 100644
--- a/drivers/crypto/scheduler/scheduler_roundrobin.c
+++ b/drivers/crypto/scheduler/scheduler_roundrobin.c
@@ -28,11 +28,11 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, 
uint16_t nb_ops)
        if (unlikely(nb_ops == 0))
                return 0;
 
-       scheduler_set_worker_session(ops, nb_ops, worker_idx);
+       scheduler_set_worker_sessions(ops, nb_ops, worker_idx);
        processed_ops = rte_cryptodev_enqueue_burst(worker->dev_id,
                        worker->qp_id, ops, nb_ops);
        if (processed_ops < nb_ops)
-               scheduler_retrieve_session(ops + processed_ops,
+               scheduler_retrieve_sessions(ops + processed_ops,
                        nb_ops - processed_ops);
 
        worker->nb_inflight_cops += processed_ops;
@@ -87,7 +87,7 @@ schedule_dequeue(void *qp, struct rte_crypto_op **ops, 
uint16_t nb_ops)
 
        nb_deq_ops = rte_cryptodev_dequeue_burst(worker->dev_id,
                        worker->qp_id, ops, nb_ops);
-       scheduler_retrieve_session(ops, nb_deq_ops);
+       scheduler_retrieve_sessions(ops, nb_deq_ops);
        last_worker_idx += 1;
        last_worker_idx %= rr_qp_ctx->nb_workers;
 
-- 
2.25.1


Reply via email to