On Tue, Jan 09, 2018 at 10:19:35AM -0600, Gage Eads wrote: > This commit fixes a bug in which, when the sw PMD is reconfigured, it would > leave stale IQ chunk pointers in each queue's IQ structure. Now, the PMD > initializes all IQs at eventdev start time and releases all IQ chunk > pointers at eventdev stop time (which has the consequence that any events > in a queue when the eventdev is stopped will be lost). This approach should > be resilient to any reconfiguration done between the stop and start, such > as adding or removing queues. > > This commit also fixes two potential issues in iq_chunk.h. iq_init() > now initializes the IQ's count field to 0, and iq_dequeue_burst() sets > iq->head to the appropriate next pointer. > > Fixes: 5b5e476e59a4 ("event/sw: use dynamically-sized IQs") > Signed-off-by: Gage Eads <gage.e...@intel.com> > --- > drivers/event/sw/iq_chunk.h | 14 +++++++++- > drivers/event/sw/sw_evdev.c | 62 > ++++++++++++++++++++++++++++++++------------- > 2 files changed, 57 insertions(+), 19 deletions(-) > > diff --git a/drivers/event/sw/iq_chunk.h b/drivers/event/sw/iq_chunk.h > index 29f5a35..2fa724c 100644 > --- a/drivers/event/sw/iq_chunk.h > +++ b/drivers/event/sw/iq_chunk.h > @@ -73,12 +73,24 @@ iq_free_chunk(struct sw_evdev *sw, struct sw_queue_chunk > *chunk) > } > > static __rte_always_inline void > +iq_free_chunk_list(struct sw_evdev *sw, struct sw_queue_chunk *head) > +{ > + while (head) { > + struct sw_queue_chunk *next; > + next = head->next; > + iq_free_chunk(sw, head); > + head = next; > + } > +} > + > +static __rte_always_inline void > iq_init(struct sw_evdev *sw, struct sw_iq *iq) > { > iq->head = iq_alloc_chunk(sw); > iq->tail = iq->head; > iq->head_idx = 0; > iq->tail_idx = 0; > + iq->count = 0; > } > > static __rte_always_inline void > @@ -154,7 +166,7 @@ iq_dequeue_burst(struct sw_evdev *sw, > > done: > if (unlikely(index == SW_EVS_PER_Q_CHUNK)) { > - struct sw_queue_chunk *next = iq->head->next; > + struct sw_queue_chunk *next = current->next; > iq_free_chunk(sw, current); > iq->head = next; > iq->head_idx = 0; > diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c > index 1ef6340..7430a5d 100644 > --- a/drivers/event/sw/sw_evdev.c > +++ b/drivers/event/sw/sw_evdev.c > @@ -245,9 +245,6 @@ qid_init(struct sw_evdev *sw, unsigned int idx, int type, > char buf[IQ_ROB_NAMESIZE]; > struct sw_qid *qid = &sw->qids[idx]; > > - for (i = 0; i < SW_IQS_MAX; i++) > - iq_init(sw, &qid->iq[i]); > - > /* Initialize the FID structures to no pinning (-1), and zero packets */ > const struct sw_fid_t fid = {.cq = -1, .pcount = 0}; > for (i = 0; i < RTE_DIM(qid->fids); i++) > @@ -325,11 +322,6 @@ qid_init(struct sw_evdev *sw, unsigned int idx, int type, > return 0; > > cleanup: > - for (i = 0; i < SW_IQS_MAX; i++) { > - if (qid->iq[i].head) > - iq_free_chunk(sw, qid->iq[i].head); > - } > - > if (qid->reorder_buffer) { > rte_free(qid->reorder_buffer); > qid->reorder_buffer = NULL; > @@ -348,13 +340,6 @@ sw_queue_release(struct rte_eventdev *dev, uint8_t id) > { > struct sw_evdev *sw = sw_pmd_priv(dev); > struct sw_qid *qid = &sw->qids[id]; > - uint32_t i; > - > - for (i = 0; i < SW_IQS_MAX; i++) { > - if (!qid->iq[i].head) > - continue; > - iq_free_chunk(sw, qid->iq[i].head); > - } > > if (qid->type == RTE_SCHED_TYPE_ORDERED) { > rte_free(qid->reorder_buffer); > @@ -388,6 +373,41 @@ sw_queue_setup(struct rte_eventdev *dev, uint8_t > queue_id, > } > > static void > +sw_init_qid_iqs(struct sw_evdev *sw) > +{ > + int i, j; > + > + /* Initialize the IQ memory of all configured qids */ > + for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) { > + struct sw_qid *qid = &sw->qids[i]; > + > + if (!qid->initialized) > + continue; > + > + for (j = 0; j < SW_IQS_MAX; j++) > + iq_init(sw, &qid->iq[j]); > + } > +} > + > +static void > +sw_clean_qid_iqs(struct sw_evdev *sw) > +{ > + int i, j; > + > + /* Release the IQ memory of all configured qids */ > + for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) { > + struct sw_qid *qid = &sw->qids[i]; > + > + for (j = 0; j < SW_IQS_MAX; j++) { > + if (!qid->iq[j].head) > + continue; > + iq_free_chunk_list(sw, qid->iq[j].head); > + qid->iq[j].head = NULL; > + } > + } > +} > + > +static void > sw_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id, > struct rte_event_queue_conf *conf) > { > @@ -434,7 +454,10 @@ sw_dev_configure(const struct rte_eventdev *dev) > num_chunks = ((SW_INFLIGHT_EVENTS_TOTAL/SW_EVS_PER_Q_CHUNK)+1) + > sw->qid_count*SW_IQS_MAX*2; > > - /* If this is a reconfiguration, free the previous IQ allocation */ > + /* If this is a reconfiguration, free the previous IQ allocation. All > + * IQ chunk references were cleaned out of the QIDs in sw_stop(), and > + * will be reinitialized in sw_start(). > + */ > if (sw->chunks) > rte_free(sw->chunks); > > @@ -667,8 +690,8 @@ sw_start(struct rte_eventdev *dev) > > /* check all queues are configured and mapped to ports*/ > for (i = 0; i < sw->qid_count; i++) > - if (sw->qids[i].iq[0].head == NULL || > - sw->qids[i].cq_num_mapped_cqs == 0) { > + if (!sw->qids[i].initialized || > + sw->qids[i].cq_num_mapped_cqs == 0) { > SW_LOG_ERR("Queue %d not configured\n", i); > return -ENOLINK; > } > @@ -689,6 +712,8 @@ sw_start(struct rte_eventdev *dev) > } > } > > + sw_init_qid_iqs(sw); > + > if (sw_xstats_init(sw) < 0) > return -EINVAL; > > @@ -702,6 +727,7 @@ static void > sw_stop(struct rte_eventdev *dev) > { > struct sw_evdev *sw = sw_pmd_priv(dev); > + sw_clean_qid_iqs(sw); > sw_xstats_uninit(sw); > sw->started = 0; > rte_smp_wmb(); > -- > 2.7.4 >
Sw eventdev works well alongside Rx adapter now. Cheers, Pavan. Acked-by: Pavan Nikhilesh <pbhagavat...@caviumnetworks.com>