> > >> Add application inbound/outbound worker thread and IPsec application
> > >> processing code for event mode.
> > >>
> > >> Example ipsec-secgw command in app mode:
> > >> ipsec-secgw -w 0002:02:00.0,ipsec_in_max_spi=128 -w
> > >> 0002:03:00.0,ipsec_in_max_spi=128 -w 0002:0e:00.0 -w 0002:10:00.1
> > >> --log-level=8 -c 0x1 -- -P -p 0x3 -u 0x1 --config "(1,0,0),(0,0,0)"
> > >> -f aes-gcm.cfg --transfer-mode event --event-schedule-type parallel
> > >>
> > >> Signed-off-by: Anoob Joseph <ano...@marvell.com>
> > >> Signed-off-by: Ankur Dwivedi <adwiv...@marvell.com>
> > >> Signed-off-by: Lukasz Bartosik <lbarto...@marvell.com>
> > >> ---
> > >
> > > ...
> > >
> > >> +static inline enum pkt_type
> > >> +process_ipsec_get_pkt_type(struct rte_mbuf *pkt, uint8_t **nlp) {
> > >> +        struct rte_ether_hdr *eth;
> > >> +
> > >> +        eth = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
> > >> +        if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
> > >> +                *nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
> > >> +                                offsetof(struct ip, ip_p));
> > >> +                if (**nlp == IPPROTO_ESP)
> > >> +                        return PKT_TYPE_IPSEC_IPV4;
> > >> +                else
> > >> +                        return PKT_TYPE_PLAIN_IPV4;
> > >> +        } else if (eth->ether_type ==
> > >> +rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6))
> > >> {
> > >> +                *nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
> > >> +                                offsetof(struct ip6_hdr, ip6_nxt));
> > >> +                if (**nlp == IPPROTO_ESP)
> > >> +                        return PKT_TYPE_IPSEC_IPV6;
> > >> +                else
> > >> +                        return PKT_TYPE_PLAIN_IPV6;
> > >> +        }
> > >> +
> > >> +        /* Unknown/Unsupported type */
> > >> +        return PKT_TYPE_INVALID;
> > >> +}
> > >> +
> > >> +static inline void
> > >> +update_mac_addrs(struct rte_mbuf *pkt, uint16_t portid) {
> > >> +        struct rte_ether_hdr *ethhdr;
> > >> +
> > >> +        ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
> > >> +        memcpy(&ethhdr->s_addr, &ethaddr_tbl[portid].src,
> > >> RTE_ETHER_ADDR_LEN);
> > >> +        memcpy(&ethhdr->d_addr, &ethaddr_tbl[portid].dst,
> > >> RTE_ETHER_ADDR_LEN);
> > >> +}
> > >>
> > >>  static inline void
> > >>  ipsec_event_pre_forward(struct rte_mbuf *m, unsigned int port_id) @@
> > >> -61,6 +101,290 @@ prepare_out_sessions_tbl(struct sa_ctx *sa_out,
> > >>          }
> > >>  }
> > >>
> > >> +static inline int
> > >> +check_sp(struct sp_ctx *sp, const uint8_t *nlp, uint32_t *sa_idx) {
> > >> +        uint32_t res;
> > >> +
> > >> +        if (unlikely(sp == NULL))
> > >> +                return 0;
> > >> +
> > >> +        rte_acl_classify((struct rte_acl_ctx *)sp, &nlp, &res, 1,
> > >> +                        DEFAULT_MAX_CATEGORIES);
> > >> +
> > >> +        if (unlikely(res == 0)) {
> > >> +                /* No match */
> > >> +                return 0;
> > >> +        }
> > >> +
> > >> +        if (res == DISCARD)
> > >> +                return 0;
> > >> +        else if (res == BYPASS) {
> > >> +                *sa_idx = -1;
> > >> +                return 1;
> > >> +        }
> > >> +
> > >> +        *sa_idx = res - 1;
> > >> +        return 1;
> > >> +}
> > >> +
> > >> +static inline uint16_t
> > >> +route4_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx) {
> > >> +        uint32_t dst_ip;
> > >> +        uint16_t offset;
> > >> +        uint32_t hop;
> > >> +        int ret;
> > >> +
> > >> +        offset = RTE_ETHER_HDR_LEN + offsetof(struct ip, ip_dst);
> > >> +        dst_ip = *rte_pktmbuf_mtod_offset(pkt, uint32_t *, offset);
> > >> +        dst_ip = rte_be_to_cpu_32(dst_ip);
> > >> +
> > >> +        ret = rte_lpm_lookup((struct rte_lpm *)rt_ctx, dst_ip, &hop);
> > >> +
> > >> +        if (ret == 0) {
> > >> +                /* We have a hit */
> > >> +                return hop;
> > >> +        }
> > >> +
> > >> +        /* else */
> > >> +        return RTE_MAX_ETHPORTS;
> > >> +}
> > >> +
> > >> +/* TODO: To be tested */
> > >> +static inline uint16_t
> > >> +route6_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx) {
> > >> +        uint8_t dst_ip[16];
> > >> +        uint8_t *ip6_dst;
> > >> +        uint16_t offset;
> > >> +        uint32_t hop;
> > >> +        int ret;
> > >> +
> > >> +        offset = RTE_ETHER_HDR_LEN + offsetof(struct ip6_hdr, ip6_dst);
> > >> +        ip6_dst = rte_pktmbuf_mtod_offset(pkt, uint8_t *, offset);
> > >> +        memcpy(&dst_ip[0], ip6_dst, 16);
> > >> +
> > >> +        ret = rte_lpm6_lookup((struct rte_lpm6 *)rt_ctx, dst_ip, &hop);
> > >> +
> > >> +        if (ret == 0) {
> > >> +                /* We have a hit */
> > >> +                return hop;
> > >> +        }
> > >> +
> > >> +        /* else */
> > >> +        return RTE_MAX_ETHPORTS;
> > >> +}
> > >> +
> > >> +static inline uint16_t
> > >> +get_route(struct rte_mbuf *pkt, struct route_table *rt, enum
> > >> +pkt_type type) {
> > >> +        if (type == PKT_TYPE_PLAIN_IPV4 || type == PKT_TYPE_IPSEC_IPV4)
> > >> +                return route4_pkt(pkt, rt->rt4_ctx);
> > >> +        else if (type == PKT_TYPE_PLAIN_IPV6 || type == 
> > >> PKT_TYPE_IPSEC_IPV6)
> > >> +                return route6_pkt(pkt, rt->rt6_ctx);
> > >> +
> > >> +        return RTE_MAX_ETHPORTS;
> > >> +}
> > >
> > > Is it not possible to use the existing functions for finding routes, 
> > > checking
> > packet types and checking security policies.
> > > It will be very difficult to manage two separate functions for same
> > > work. I can see that the pkt->data_offs Are not required to be updated
> > > in the inline case, but can we split the existing functions in two so 
> > > that they can
> > be Called in the appropriate cases.
> > >
> > > As you have said in the cover note as well to add lookaside protocol
> > > support. I also tried adding it, and it will get very Difficult to manage 
> > > separate
> > functions for separate code paths.
> > >
> >
> > [Lukasz] This was also Konstantin's comment during review of one of previous
> > revisions.
> > The prepare_one_packet() and prepare_tx_pkt() do much more than we need
> > and for performance reasons we crafted new functions. For example,
> > process_ipsec_get_pkt_type function returns nlp and whether packet type is
> > plain or IPsec. That's all. Prepare_one_packet() process packets in chunks 
> > and
> > does much more - it adjusts mbuf and packet length then it demultiplex 
> > packets
> > into plain and IPsec flows and finally does inline checks. This is similar 
> > for
> > update_mac_addrs() vs prepare_tx_pkt() and check_sp() vs inbound_sp_sa() 
> > that
> > prepare_tx_pkt() and inbound_sp_sa() do more that we need in event mode.
> >
> > I understand your concern from the perspective of code maintenance but on 
> > the
> > other hand we are concerned with performance.
> > The current code is not optimized to support multiple mode processing
> > introduced with rte_security. We can work on a common routines once we have
> > other modes also added, so that we can come up with a better solution than
> > what we have today.
> >
> > >> +
> > >> +static inline int
> > >> +process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
> > >> +                struct rte_event *ev)
> > >> +{
> > >> +        struct ipsec_sa *sa = NULL;
> > >> +        struct rte_mbuf *pkt;
> > >> +        uint16_t port_id = 0;
> > >> +        enum pkt_type type;
> > >> +        uint32_t sa_idx;
> > >> +        uint8_t *nlp;
> > >> +
> > >> +        /* Get pkt from event */
> > >> +        pkt = ev->mbuf;
> > >> +
> > >> +        /* Check the packet type */
> > >> +        type = process_ipsec_get_pkt_type(pkt, &nlp);
> > >> +
> > >> +        switch (type) {
> > >> +        case PKT_TYPE_PLAIN_IPV4:
> > >> +                if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) {
> > >> +                        if (unlikely(pkt->ol_flags &
> > >> +                                     PKT_RX_SEC_OFFLOAD_FAILED)) {
> > >> +                                RTE_LOG(ERR, IPSEC,
> > >> +                                        "Inbound security offload 
> > >> failed\n");
> > >> +                                goto drop_pkt_and_exit;
> > >> +                        }
> > >> +                        sa = pkt->userdata;
> > >> +                }
> > >> +
> > >> +                /* Check if we have a match */
> > >> +                if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
> > >> +                        /* No valid match */
> > >> +                        goto drop_pkt_and_exit;
> > >> +                }
> > >> +                break;
> > >> +
> > >> +        case PKT_TYPE_PLAIN_IPV6:
> > >> +                if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) {
> > >> +                        if (unlikely(pkt->ol_flags &
> > >> +                                     PKT_RX_SEC_OFFLOAD_FAILED)) {
> > >> +                                RTE_LOG(ERR, IPSEC,
> > >> +                                        "Inbound security offload 
> > >> failed\n");
> > >> +                                goto drop_pkt_and_exit;
> > >> +                        }
> > >> +                        sa = pkt->userdata;
> > >> +                }
> > >> +
> > >> +                /* Check if we have a match */
> > >> +                if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
> > >> +                        /* No valid match */
> > >> +                        goto drop_pkt_and_exit;
> > >> +                }
> > >> +                break;
> > >> +
> > >> +        default:
> > >> +                RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", 
> > >> type);
> > >> +                goto drop_pkt_and_exit;
> > >> +        }
> > >> +
> > >> +        /* Check if the packet has to be bypassed */
> > >> +        if (sa_idx == BYPASS)
> > >> +                goto route_and_send_pkt;
> > >> +
> > >> +        /* Validate sa_idx */
> > >> +        if (sa_idx >= ctx->sa_ctx->nb_sa)
> > >> +                goto drop_pkt_and_exit;
> > >> +
> > >> +        /* Else the packet has to be protected with SA */
> > >> +
> > >> +        /* If the packet was IPsec processed, then SA pointer should be 
> > >> set */
> > >> +        if (sa == NULL)
> > >> +                goto drop_pkt_and_exit;
> > >> +
> > >> +        /* SPI on the packet should match with the one in SA */
> > >> +        if (unlikely(sa->spi != ctx->sa_ctx->sa[sa_idx].spi))
> > >> +                goto drop_pkt_and_exit;
> > >> +
> > >> +route_and_send_pkt:
> > >> +        port_id = get_route(pkt, rt, type);
> > >> +        if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
> > >> +                /* no match */
> > >> +                goto drop_pkt_and_exit;
> > >> +        }
> > >> +        /* else, we have a matching route */
> > >> +
> > >> +        /* Update mac addresses */
> > >> +        update_mac_addrs(pkt, port_id);
> > >> +
> > >> +        /* Update the event with the dest port */
> > >> +        ipsec_event_pre_forward(pkt, port_id);
> > >> +        return 1;
> > >> +
> > >> +drop_pkt_and_exit:
> > >> +        RTE_LOG(ERR, IPSEC, "Inbound packet dropped\n");
> > >> +        rte_pktmbuf_free(pkt);
> > >> +        ev->mbuf = NULL;
> > >> +        return 0;
> > >> +}
> > >> +
> > >> +static inline int
> > >> +process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
> > >> +                struct rte_event *ev)
> > >> +{
> > >> +        struct rte_ipsec_session *sess;
> > >> +        struct sa_ctx *sa_ctx;
> > >> +        struct rte_mbuf *pkt;
> > >> +        uint16_t port_id = 0;
> > >> +        struct ipsec_sa *sa;
> > >> +        enum pkt_type type;
> > >> +        uint32_t sa_idx;
> > >> +        uint8_t *nlp;
> > >> +
> > >> +        /* Get pkt from event */
> > >> +        pkt = ev->mbuf;
> > >> +
> > >> +        /* Check the packet type */
> > >> +        type = process_ipsec_get_pkt_type(pkt, &nlp);
> > >> +
> > >> +        switch (type) {
> > >> +        case PKT_TYPE_PLAIN_IPV4:
> > >> +                /* Check if we have a match */
> > >> +                if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
> > >> +                        /* No valid match */
> > >> +                        goto drop_pkt_and_exit;
> > >> +                }
> > >> +                break;
> > >> +        case PKT_TYPE_PLAIN_IPV6:
> > >> +                /* Check if we have a match */
> > >> +                if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
> > >> +                        /* No valid match */
> > >> +                        goto drop_pkt_and_exit;
> > >> +                }
> > >> +                break;
> > >> +        default:
> > >> +                /*
> > >> +                 * Only plain IPv4 & IPv6 packets are allowed
> > >> +                 * on protected port. Drop the rest.
> > >> +                 */
> > >> +                RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", 
> > >> type);
> > >> +                goto drop_pkt_and_exit;
> > >> +        }
> > >> +
> > >> +        /* Check if the packet has to be bypassed */
> > >> +        if (sa_idx == BYPASS) {
> > >> +                port_id = get_route(pkt, rt, type);
> > >> +                if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
> > >> +                        /* no match */
> > >> +                        goto drop_pkt_and_exit;
> > >> +                }
> > >> +                /* else, we have a matching route */
> > >> +                goto send_pkt;
> > >> +        }
> > >> +
> > >> +        /* Validate sa_idx */
> > >> +        if (sa_idx >= ctx->sa_ctx->nb_sa)
> > >> +                goto drop_pkt_and_exit;
> > >> +
> > >> +        /* Else the packet has to be protected */
> > >> +
> > >> +        /* Get SA ctx*/
> > >> +        sa_ctx = ctx->sa_ctx;
> > >> +
> > >> +        /* Get SA */
> > >> +        sa = &(sa_ctx->sa[sa_idx]);
> > >> +
> > >> +        /* Get IPsec session */
> > >> +        sess = ipsec_get_primary_session(sa);
> > >> +
> > >> +        /* Allow only inline protocol for now */
> > >> +        if (sess->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
> > >> +                RTE_LOG(ERR, IPSEC, "SA type not supported\n");
> > >> +                goto drop_pkt_and_exit;
> > >> +        }
> > >> +
> > >> +        if (sess->security.ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA)
> > >> +                pkt->userdata = sess->security.ses;
> > >> +
> > >> +        /* Mark the packet for Tx security offload */
> > >> +        pkt->ol_flags |= PKT_TX_SEC_OFFLOAD;
> > >> +
> > >> +        /* Get the port to which this pkt need to be submitted */
> > >> +        port_id = sa->portid;
> > >> +
> > >> +send_pkt:
> > >> +        /* Update mac addresses */
> > >> +        update_mac_addrs(pkt, port_id);
> > >> +
> > >> +        /* Update the event with the dest port */
> > >> +        ipsec_event_pre_forward(pkt, port_id);
> > >
> > > How is IP checksum getting updated for the processed packet.
> > > If the hardware is not updating it, should we add a fallback mechanism
> > > for SW based Checksum update.
> > >
> >
> > [Lukasz] In case of outbound inline protocol checksum has to be calculated 
> > by
> > HW as final packet is formed by crypto device. There is no need to 
> > calculate it in
> > SW.
> >
> > >> +        return 1;
> > >
> > > It will be better to use some MACROS while returning Like
> > > #define PKT_FORWARD   1
> > > #define PKT_DROPPED     0
> > > #define PKT_POSTED       2  /*may be for lookaside cases */
> > >
> > >> +
> > >> +drop_pkt_and_exit:
> > >> +        RTE_LOG(ERR, IPSEC, "Outbound packet dropped\n");
> > >> +        rte_pktmbuf_free(pkt);
> > >> +        ev->mbuf = NULL;
> > >> +        return 0;
> > >> +}
> > >> +
> > >>  /*
> > >>   * Event mode exposes various operating modes depending on the
> > >>   * capabilities of the event device and the operating mode @@ -68,7
> > >> +392,7 @@ prepare_out_sessions_tbl(struct sa_ctx *sa_out,
> > >>   */
> > >>
> > >>  /* Workers registered */
> > >> -#define IPSEC_EVENTMODE_WORKERS         1
> > >> +#define IPSEC_EVENTMODE_WORKERS         2
> > >>
> > >>  /*
> > >>   * Event mode worker
> > >> @@ -146,7 +470,7 @@ ipsec_wrkr_non_burst_int_port_drv_mode(struct
> > >> eh_event_link_info *links,
> > >>                          }
> > >>
> > >>                          /* Save security session */
> > >> -                        pkt->udata64 = (uint64_t) sess_tbl[port_id];
> > >> +                        pkt->userdata = sess_tbl[port_id];
> > >>
> > >>                          /* Mark the packet for Tx security offload */
> > >>                          pkt->ol_flags |= PKT_TX_SEC_OFFLOAD; @@ -165,6
> > +489,94 @@
> > >> ipsec_wrkr_non_burst_int_port_drv_mode(struct
> > >> eh_event_link_info *links,
> > >>          }
> > >>  }
> > >>
> > >> +/*
> > >> + * Event mode worker
> > >> + * Operating parameters : non-burst - Tx internal port - app mode
> > >> +*/ static void ipsec_wrkr_non_burst_int_port_app_mode(struct
> > >> +eh_event_link_info *links,
> > >> +                uint8_t nb_links)
> > >> +{
> > >> +        struct lcore_conf_ev_tx_int_port_wrkr lconf;
> > >> +        unsigned int nb_rx = 0;
> > >> +        struct rte_event ev;
> > >> +        uint32_t lcore_id;
> > >> +        int32_t socket_id;
> > >> +        int ret;
> > >> +
> > >> +        /* Check if we have links registered for this lcore */
> > >> +        if (nb_links == 0) {
> > >> +                /* No links registered - exit */
> > >> +                return;
> > >> +        }
> > >> +
> > >> +        /* We have valid links */
> > >> +
> > >> +        /* Get core ID */
> > >> +        lcore_id = rte_lcore_id();
> > >> +
> > >> +        /* Get socket ID */
> > >> +        socket_id = rte_lcore_to_socket_id(lcore_id);
> > >> +
> > >> +        /* Save routing table */
> > >> +        lconf.rt.rt4_ctx = socket_ctx[socket_id].rt_ip4;
> > >> +        lconf.rt.rt6_ctx = socket_ctx[socket_id].rt_ip6;
> > >> +        lconf.inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
> > >> +        lconf.inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
> > >> +        lconf.inbound.sa_ctx = socket_ctx[socket_id].sa_in;
> > >> +        lconf.inbound.session_pool = socket_ctx[socket_id].session_pool;
> > >
> > > Session_priv_pool should also be added for both inbound and outbound
> > >
> >
> > [Lukasz] I will add it in V5.
> 
> [Anoob] Actually, why do need both session_pool and private_pool? I think 
> it's a remnant from the time we had session being created when
> the first packet arrives.
> 
> @Konstantin, thoughts?

I think we do need it for lksd sessions.
See create_lookaside_session() in ipsec.c

> 
> >
> > >> +        lconf.outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
> > >> +        lconf.outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
> > >> +        lconf.outbound.sa_ctx = socket_ctx[socket_id].sa_out;
> > >> +        lconf.outbound.session_pool = 
> > >> socket_ctx[socket_id].session_pool;
> > >> +
> > >> +        RTE_LOG(INFO, IPSEC,
> > >> +                "Launching event mode worker (non-burst - Tx internal 
> > >> port - "
> > >> +                "app mode) on lcore %d\n", lcore_id);
> > >> +
> > >> +        /* Check if it's single link */
> > >> +        if (nb_links != 1) {
> > >> +                RTE_LOG(INFO, IPSEC,
> > >> +                        "Multiple links not supported. Using first 
> > >> link\n");
> > >> +        }
> > >> +
> > >> +        RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", 
> > >> lcore_id,
> > >> +                links[0].event_port_id);
> > >> +
> > >> +        while (!force_quit) {
> > >> +                /* Read packet from event queues */
> > >> +                nb_rx = rte_event_dequeue_burst(links[0].eventdev_id,
> > >> +                                links[0].event_port_id,
> > >> +                                &ev,     /* events */
> > >> +                                1,       /* nb_events */
> > >> +                                0        /* timeout_ticks */);
> > >> +
> > >> +                if (nb_rx == 0)
> > >> +                        continue;
> > >> +
> > >
> > > Event type should be checked here before dereferencing it.
> > >
> >
> > [Lukasz] I will add event type check in V5.
> >
> > >> +                if (is_unprotected_port(ev.mbuf->port))
> > >> +                        ret = process_ipsec_ev_inbound(&lconf.inbound,
> > >> +                                                        &lconf.rt, &ev);
> > >> +                else
> > >> +                        ret = process_ipsec_ev_outbound(&lconf.outbound,
> > >> +                                                        &lconf.rt, &ev);
> > >> +                if (ret != 1)
> > >> +                        /* The pkt has been dropped */
> > >> +                        continue;
> > >> +
> > >> +                /*
> > >> +                 * Since tx internal port is available, events can be
> > >> +                 * directly enqueued to the adapter and it would be
> > >> +                 * internally submitted to the eth device.
> > >> +                 */
> > >> +                rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
> > >> +                                links[0].event_port_id,
> > >> +                                &ev,    /* events */
> > >> +                                1,      /* nb_events */
> > >> +                                0       /* flags */);
> > >> +        }
> > >> +}
> > >> +
> > >>  static uint8_t
> > >>  ipsec_eventmode_populate_wrkr_params(struct eh_app_worker_params
> > >> *wrkrs)
> > >>  {
> > >> @@ -180,6 +592,14 @@ ipsec_eventmode_populate_wrkr_params(struct
> > >> eh_app_worker_params *wrkrs)
> > >>          wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER;
> > >>          wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_drv_mode;
> > >>          wrkr++;
> > >> +        nb_wrkr_param++;
> > >> +
> > >> +        /* Non-burst - Tx internal port - app mode */
> > >> +        wrkr->cap.burst = EH_RX_TYPE_NON_BURST;
> > >> +        wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
> > >> +        wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_APP;
> > >> +        wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_app_mode;
> > >> +        nb_wrkr_param++;
> > >>
> > >>          return nb_wrkr_param;
> > >>  }
> > >> diff --git a/examples/ipsec-secgw/ipsec_worker.h b/examples/ipsec-
> > >> secgw/ipsec_worker.h new file mode 100644 index 0000000..87b4f22
> > >> --- /dev/null
> > >> +++ b/examples/ipsec-secgw/ipsec_worker.h
> > >> @@ -0,0 +1,35 @@
> > >> +/* SPDX-License-Identifier: BSD-3-Clause
> > >> + * Copyright (C) 2020 Marvell International Ltd.
> > >> + */
> > >> +#ifndef _IPSEC_WORKER_H_
> > >> +#define _IPSEC_WORKER_H_
> > >> +
> > >> +#include "ipsec.h"
> > >> +
> > >> +enum pkt_type {
> > >> +        PKT_TYPE_PLAIN_IPV4 = 1,
> > >> +        PKT_TYPE_IPSEC_IPV4,
> > >> +        PKT_TYPE_PLAIN_IPV6,
> > >> +        PKT_TYPE_IPSEC_IPV6,
> > >> +        PKT_TYPE_INVALID
> > >> +};
> > >> +
> > >> +struct route_table {
> > >> +        struct rt_ctx *rt4_ctx;
> > >> +        struct rt_ctx *rt6_ctx;
> > >> +};
> > >> +
> > >> +/*
> > >> + * Conf required by event mode worker with tx internal port  */
> > >> +struct lcore_conf_ev_tx_int_port_wrkr {
> > >> +        struct ipsec_ctx inbound;
> > >> +        struct ipsec_ctx outbound;
> > >> +        struct route_table rt;
> > >> +} __rte_cache_aligned;
> > >> +
> > >> +void ipsec_poll_mode_worker(void);
> > >> +
> > >> +int ipsec_launch_one_lcore(void *args);
> > >> +
> > >> +#endif /* _IPSEC_WORKER_H_ */
> > >> --
> > >> 2.7.4
> > >

Reply via email to