On Tue, Aug 23, 2016 at 4:51 PM, Barry Spinney <spin...@mellanox.com> wrote:
> Both odp_pkt_queue.c and odp_traffic_mngr.c has similar bugs where > an array was malloc'd but the valid indicies run from 1 to the number > of entries malloc'd. This is because queue_num == 0 is used as a special > value indicating an invalid queue_num. The value 0 is used for this > same special purpose throughout the traffic mngr. > The fix involved using "queue_num - 1" as the array index, but only > after checking that queue_num is not 0. Also in a few cases a common > function used to get and check the queue_num was used in a few more > cases. > If the root issue is that 0 is being used as the invalid indicator, wouldn't another (and potentially simpler solution, as I believe Maxim suggested) to simply change the invalid distinguished value to 0xFFFFFFFF ? That's what linux-generic does for most of its ODP_xxx_INVALID symbols (and for this same reason to avoid indexing confusion). > > Signed-off-by: Barry Spinney <spin...@mellanox.com> > --- > platform/linux-generic/odp_pkt_queue.c | 12 ++++++------ > platform/linux-generic/odp_traffic_mngr.c | 26 +++++++++++--------------- > 2 files changed, 17 insertions(+), 21 deletions(-) > > diff --git a/platform/linux-generic/odp_pkt_queue.c > b/platform/linux-generic/odp_pkt_queue.c > index 7734ee9..7c6cd87 100644 > --- a/platform/linux-generic/odp_pkt_queue.c > +++ b/platform/linux-generic/odp_pkt_queue.c > @@ -92,7 +92,7 @@ static int pkt_queue_free_list_add(queue_pool_t *pool, > queue_blks_t *queue_blks; > queue_blk_t *queue_blk; > uint32_t which_region, blks_added, num_blks, start_idx; > - uint32_t malloc_len, blks_to_add, cnt; > + uint32_t malloc_len, blks_to_add, cnt, i; > > which_region = pool->current_region; > blks_added = 0; > @@ -102,7 +102,6 @@ static int pkt_queue_free_list_add(queue_pool_t *pool, > num_blks = region_desc->num_blks; > queue_blks = region_desc->queue_blks; > if (!queue_blks) { > - uint32_t i; > malloc_len = num_blks * sizeof(queue_blk_t); > queue_blks = malloc(malloc_len); > > @@ -263,13 +262,13 @@ int _odp_pkt_queue_append(_odp_int_queue_pool_t > queue_pool, > return -3; > > pool->total_pkt_appends++; > - first_blk_idx = pool->queue_num_tbl[queue_num]; > + first_blk_idx = pool->queue_num_tbl[queue_num - 1]; > if (first_blk_idx == 0) { > first_blk = queue_blk_alloc(pool, &first_blk_idx); > if (!first_blk) > return -1; > > - pool->queue_num_tbl[queue_num] = first_blk_idx; > + pool->queue_num_tbl[queue_num - 1] = first_blk_idx; > init_queue_blk(first_blk); > first_blk->pkts[0] = pkt; > return 0; > @@ -316,7 +315,7 @@ int _odp_pkt_queue_remove(_odp_int_queue_pool_t > queue_pool, > if ((queue_num == 0) || (pool->max_queue_num < queue_num)) > return -2; > > - first_blk_idx = pool->queue_num_tbl[queue_num]; > + first_blk_idx = pool->queue_num_tbl[queue_num - 1]; > if (first_blk_idx == 0) > return 0; /* pkt queue is empty. */ > > @@ -344,7 +343,8 @@ int _odp_pkt_queue_remove(_odp_int_queue_pool_t > queue_pool, > > first_blk->tail_queue_blk_idx; > } > > - pool->queue_num_tbl[queue_num] = > next_blk_idx; > + pool->queue_num_tbl[queue_num - 1] = > + next_blk_idx; > queue_blk_free(pool, first_blk, > first_blk_idx); > } > > diff --git a/platform/linux-generic/odp_traffic_mngr.c > b/platform/linux-generic/odp_traffic_mngr.c > index a5271ed..4fe07ef 100644 > --- a/platform/linux-generic/odp_traffic_mngr.c > +++ b/platform/linux-generic/odp_traffic_mngr.c > @@ -109,7 +109,7 @@ static tm_queue_obj_t *get_tm_queue_obj(tm_system_t > *tm_system, > return NULL; > > queue_num = pkt_desc->queue_num; > - tm_queue_obj = tm_system->queue_num_tbl[queue_num]; > + tm_queue_obj = tm_system->queue_num_tbl[queue_num - 1]; > return tm_queue_obj; > } > > @@ -1596,11 +1596,10 @@ static odp_bool_t tm_consume_sent_pkt(tm_system_t > *tm_system, > tm_queue_obj_t *tm_queue_obj; > odp_packet_t pkt; > pkt_desc_t *new_pkt_desc; > - uint32_t queue_num, pkt_len; > + uint32_t pkt_len; > int rc; > > - queue_num = sent_pkt_desc->queue_num; > - tm_queue_obj = tm_system->queue_num_tbl[queue_num]; > + tm_queue_obj = get_tm_queue_obj(tm_system, sent_pkt_desc); > if (!tm_queue_obj) > return false; > > @@ -2088,15 +2087,11 @@ static void tm_send_pkt(tm_system_t *tm_system, > uint32_t max_sends) > tm_queue_obj_t *tm_queue_obj; > odp_packet_t odp_pkt; > pkt_desc_t *pkt_desc; > - uint32_t cnt, queue_num; > + uint32_t cnt; > > for (cnt = 1; cnt <= max_sends; cnt++) { > pkt_desc = &tm_system->egress_pkt_desc; > - queue_num = pkt_desc->queue_num; > - if (queue_num == 0) > - return; > - > - tm_queue_obj = tm_system->queue_num_tbl[queue_num]; > + tm_queue_obj = get_tm_queue_obj(tm_system, pkt_desc); > if (!tm_queue_obj) > return; > > @@ -2149,7 +2144,8 @@ static int tm_process_input_work_queue(tm_system_t > *tm_system, > return rc; > } > > - tm_queue_obj = tm_system->queue_num_tbl[work_ > item.queue_num]; > + tm_queue_obj = > + tm_system->queue_num_tbl[work_item.queue_num - 1]; > pkt = work_item.pkt; > if (!tm_queue_obj) { > odp_packet_free(pkt); > @@ -2204,7 +2200,7 @@ static int tm_process_expired_timers(tm_system_t > *tm_system, > > queue_num = (timer_context & 0xFFFFFFFF) >> 4; > timer_seq = timer_context >> 32; > - tm_queue_obj = tm_system->queue_num_tbl[queue_num]; > + tm_queue_obj = tm_system->queue_num_tbl[queue_num - 1]; > if (!tm_queue_obj) > return work_done; > > @@ -3904,7 +3900,7 @@ odp_tm_queue_t odp_tm_queue_create(odp_tm_t odp_tm, > tm_queue_obj->tm_qentry.s.enqueue = queue_tm_reenq; > tm_queue_obj->tm_qentry.s.enqueue_multi = queue_tm_reenq_multi; > > - tm_system->queue_num_tbl[tm_queue_obj->queue_num] = tm_queue_obj; > + tm_system->queue_num_tbl[tm_queue_obj->queue_num - 1] = > tm_queue_obj; > odp_ticketlock_lock(&tm_system->tm_system_lock); > if (params->shaper_profile != ODP_TM_INVALID) > tm_shaper_config_set(tm_system, params->shaper_profile, > @@ -3972,7 +3968,7 @@ int odp_tm_queue_destroy(odp_tm_queue_t tm_queue) > > /* Now that all of the checks are done, time to so some freeing. */ > odp_ticketlock_lock(&tm_system->tm_system_lock); > - tm_system->queue_num_tbl[tm_queue_obj->queue_num] = NULL; > + tm_system->queue_num_tbl[tm_queue_obj->queue_num - 1] = NULL; > > /* First delete any associated tm_wred_node and then the > tm_queue_obj > * itself */ > @@ -4634,7 +4630,7 @@ void odp_tm_stats_print(odp_tm_t odp_tm) > > max_queue_num = tm_system->next_queue_num; > for (queue_num = 1; queue_num < max_queue_num; queue_num++) { - tm_queue_obj = tm_system->queue_num_tbl[queue_num]; > + tm_queue_obj = tm_system->queue_num_tbl[queue_num - 1]; > This loop runs from 1 to max_queue_num - 1, but with this change doesn't this mean that it's now running from 0 to max_queue_num - 2? Can the last element of the array [max_queue_num - 1] ever be used? Shouldn't the test be changed to queue_num <= max_queue_num? > if (tm_queue_obj && tm_queue_obj->pkts_rcvd_cnt != 0) > ODP_DBG("queue_num=%u priority=%u rcvd=%u > enqueued=%u " > "dequeued=%u consumed=%u\n", > -- > 2.1.2 > >