> Currently only one qp will be used for one core. The number of qps can > be increased to match the number of lcore params. > > Signed-off-by: Anoob Joseph <ano...@marvell.com> > Signed-off-by: Lukasz Bartosik <lbarto...@marvell.com> > --- > examples/ipsec-secgw/ipsec-secgw.c | 32 ++++++++++++++++++++++---------- > examples/ipsec-secgw/ipsec.c | 13 +++++++------ > examples/ipsec-secgw/ipsec.h | 9 ++++++++- > 3 files changed, 37 insertions(+), 17 deletions(-) > > diff --git a/examples/ipsec-secgw/ipsec-secgw.c > b/examples/ipsec-secgw/ipsec-secgw.c > index 3b5aaf6..b49dfef 100644 > --- a/examples/ipsec-secgw/ipsec-secgw.c > +++ b/examples/ipsec-secgw/ipsec-secgw.c > @@ -72,7 +72,7 @@ > > #define MAX_LCORE_PARAMS 1024 > > -#define UNPROTECTED_PORT(port) (unprotected_port_mask & (1 << portid)) > +#define UNPROTECTED_PORT(port) (unprotected_port_mask & (1 << port)) > > /* > * Configurable number of RX/TX ring descriptors > @@ -968,20 +968,25 @@ route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf > *pkts[], uint8_t nb_pkts) > } > > static inline void > -process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts, > - uint8_t nb_pkts, uint16_t portid) > +process_pkts(struct lcore_conf *qconf, struct lcore_rx_queue *rx_queue, > + struct rte_mbuf **pkts, uint8_t nb_pkts) > { > struct ipsec_traffic traffic; > > prepare_traffic(pkts, &traffic, nb_pkts); > > + qconf->inbound.port_id = rx_queue->port_id; > + qconf->inbound.queue_id = rx_queue->queue_id; > + qconf->outbound.port_id = rx_queue->port_id; > + qconf->outbound.queue_id = rx_queue->queue_id; > + > if (unlikely(single_sa)) { > - if (UNPROTECTED_PORT(portid)) > + if (UNPROTECTED_PORT(rx_queue->port_id)) > process_pkts_inbound_nosp(&qconf->inbound, &traffic); > else > process_pkts_outbound_nosp(&qconf->outbound, &traffic); > } else { > - if (UNPROTECTED_PORT(portid)) > + if (UNPROTECTED_PORT(rx_queue->port_id)) > process_pkts_inbound(&qconf->inbound, &traffic); > else > process_pkts_outbound(&qconf->outbound, &traffic); > @@ -1169,7 +1174,7 @@ main_loop(__attribute__((unused)) void *dummy) > pkts, MAX_PKT_BURST); > > if (nb_rx > 0) > - process_pkts(qconf, pkts, nb_rx, portid); > + process_pkts(qconf, &rxql[i], pkts, nb_rx); > > /* dequeue and process completed crypto-ops */ > if (UNPROTECTED_PORT(portid)) > @@ -1709,6 +1714,8 @@ add_mapping(struct rte_hash *map, const char *str, > uint16_t cdev_id, > unsigned long i; > struct cdev_key key = { 0 }; > > + key.port_id = params->port_id; > + key.queue_id = params->queue_id; > key.lcore_id = params->lcore_id; > if (cipher) > key.cipher_algo = cipher->sym.cipher.algo; > @@ -1722,7 +1729,9 @@ add_mapping(struct rte_hash *map, const char *str, > uint16_t cdev_id, > return 0; > > for (i = 0; i < ipsec_ctx->nb_qps; i++) > - if (ipsec_ctx->tbl[i].id == cdev_id) > + if (ipsec_ctx->tbl[i].id == cdev_id && > + ipsec_ctx->tbl[i].port_id == key.port_id && > + ipsec_ctx->tbl[i].port_queue_id == key.queue_id)
I didn't test the patch, but at first glance this approach seems reasonable to me with one main objection - I don't think this new mapping method should become default and only possible one. Probably need an extra option to allow user to select, while keeping current mapping method as a default one. Then this option could be translated into some global mask that will be applied to cdev_key before add/lookup or so. > break; > > if (i == ipsec_ctx->nb_qps) { > @@ -1733,9 +1742,12 @@ add_mapping(struct rte_hash *map, const char *str, > uint16_t cdev_id, > } > ipsec_ctx->tbl[i].id = cdev_id; > ipsec_ctx->tbl[i].qp = qp; > + ipsec_ctx->tbl[i].port_id = key.port_id; > + ipsec_ctx->tbl[i].port_queue_id = key.queue_id; > ipsec_ctx->nb_qps++; > - printf("%s cdev mapping: lcore %u using cdev %u qp %u " > - "(cdev_id_qp %lu)\n", str, key.lcore_id, > + printf("%s cdev mapping: lcore %u, portid %u, queueid %u " > + "using cdev %u qp %u (cdev_id_qp %lu)\n", > + str, key.lcore_id, key.port_id, key.queue_id, > cdev_id, qp, i); > } > > @@ -1849,7 +1861,7 @@ cryptodevs_init(void) > rte_panic("Failed to create cdev_map hash table, errno = %d\n", > rte_errno); > > - printf("lcore/cryptodev/qp mappings:\n"); > + printf("lcore/portid/queueid/cryptodev/qp mappings:\n"); > > idx = 0; > for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) { > diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c > index d4b5712..68059d8 100644 > --- a/examples/ipsec-secgw/ipsec.c > +++ b/examples/ipsec-secgw/ipsec.c > @@ -63,6 +63,8 @@ create_lookaside_session(struct ipsec_ctx *ipsec_ctx, > struct ipsec_sa *sa, > struct cdev_key key = { 0 }; > > key.lcore_id = (uint8_t)rte_lcore_id(); > + key.port_id = ipsec_ctx->port_id; > + key.queue_id = ipsec_ctx->queue_id; > > key.cipher_algo = (uint8_t)sa->cipher_algo; > key.auth_algo = (uint8_t)sa->auth_algo; > @@ -72,12 +74,11 @@ create_lookaside_session(struct ipsec_ctx *ipsec_ctx, > struct ipsec_sa *sa, > (void **)&cdev_id_qp); > if (ret < 0) { > RTE_LOG(ERR, IPSEC, > - "No cryptodev: core %u, cipher_algo %u, " > - "auth_algo %u, aead_algo %u\n", > - key.lcore_id, > - key.cipher_algo, > - key.auth_algo, > - key.aead_algo); > + "No cryptodev: core %u, port_id %u, " > + "queue_id %u, cipher_algo %u, auth_algo %u, " > + "aead_algo %u\n", > + key.lcore_id, key.port_id, key.queue_id, > + key.cipher_algo, key.auth_algo, key.aead_algo); > return -1; > } > > diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h > index 8e07521..b76cb50 100644 > --- a/examples/ipsec-secgw/ipsec.h > +++ b/examples/ipsec-secgw/ipsec.h > @@ -180,6 +180,8 @@ struct cdev_qp { > uint16_t qp; > uint16_t in_flight; > uint16_t len; > + uint16_t port_id; > + uint16_t port_queue_id; > struct rte_crypto_op *buf[MAX_PKT_BURST] __rte_aligned(sizeof(void *)); > }; > > @@ -197,10 +199,15 @@ struct ipsec_ctx { > uint16_t ol_pkts_cnt; > uint64_t ipv4_offloads; > uint64_t ipv6_offloads; > + /* port_id and queue_id are used to select crypto qp */ > + uint16_t port_id; > + uint16_t queue_id; > }; > > struct cdev_key { > - uint16_t lcore_id; > + uint16_t port_id; > + uint8_t queue_id; > + uint8_t lcore_id; > uint8_t cipher_algo; > uint8_t auth_algo; > uint8_t aead_algo; > -- > 2.7.4