Hmm. Nevermind.
Please, ignore my previous comments to this patch.
Patch seems compliant to spec, but the spec is not very clear.

Best regards, Ilya Maximets.

On 23.01.2019 16:09, Ilya Maximets wrote:
> On 22.01.2019 20:01, Tiwei Bie wrote:
>> This patch mainly fixed below issues in the packed ring based
>> control vq support in virtio driver:
>>
>> 1. When parsing the used descriptors, we have to track the
>>    number of descs that we need to skip;
>> 2. vq->vq_free_cnt was decreased twice for a same desc;
>>
>> Meanwhile, make the function name consistent with other parts.
>>
>> Fixes: ec194c2f1895 ("net/virtio: support packed queue in send command")
>> Fixes: a4270ea4ff79 ("net/virtio: check head desc with correct wrap counter")
>>
>> Signed-off-by: Tiwei Bie <tiwei....@intel.com>
>> ---
>>  drivers/net/virtio/virtio_ethdev.c | 62 ++++++++++++++----------------
>>  drivers/net/virtio/virtqueue.h     | 12 +-----
>>  2 files changed, 31 insertions(+), 43 deletions(-)
>>
>> diff --git a/drivers/net/virtio/virtio_ethdev.c 
>> b/drivers/net/virtio/virtio_ethdev.c
>> index ee5a98b7c..a3fe65599 100644
>> --- a/drivers/net/virtio/virtio_ethdev.c
>> +++ b/drivers/net/virtio/virtio_ethdev.c
>> @@ -142,16 +142,17 @@ static const struct rte_virtio_xstats_name_off 
>> rte_virtio_txq_stat_strings[] = {
>>  struct virtio_hw_internal virtio_hw_internal[RTE_MAX_ETHPORTS];
>>  
>>  static struct virtio_pmd_ctrl *
>> -virtio_pq_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl 
>> *ctrl,
>> -                   int *dlen, int pkt_num)
>> +virtio_send_command_packed(struct virtnet_ctl *cvq,
>> +                       struct virtio_pmd_ctrl *ctrl,
>> +                       int *dlen, int pkt_num)
>>  {
>>      struct virtqueue *vq = cvq->vq;
>>      int head;
>>      struct vring_packed_desc *desc = vq->ring_packed.desc_packed;
>>      struct virtio_pmd_ctrl *result;
>> -    bool avail_wrap_counter, used_wrap_counter;
>> -    uint16_t flags;
>> +    bool avail_wrap_counter;
>>      int sum = 0;
>> +    int nb_descs = 0;
>>      int k;
>>  
>>      /*
>> @@ -162,11 +163,10 @@ virtio_pq_send_command(struct virtnet_ctl *cvq, struct 
>> virtio_pmd_ctrl *ctrl,
>>       */
>>      head = vq->vq_avail_idx;
>>      avail_wrap_counter = vq->avail_wrap_counter;
>> -    used_wrap_counter = vq->used_wrap_counter;
>> -    desc[head].flags = VRING_DESC_F_NEXT;
>>      desc[head].addr = cvq->virtio_net_hdr_mem;
>>      desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
>>      vq->vq_free_cnt--;
>> +    nb_descs++;
>>      if (++vq->vq_avail_idx >= vq->vq_nentries) {
>>              vq->vq_avail_idx -= vq->vq_nentries;
>>              vq->avail_wrap_counter ^= 1;
>> @@ -177,55 +177,51 @@ virtio_pq_send_command(struct virtnet_ctl *cvq, struct 
>> virtio_pmd_ctrl *ctrl,
>>                      + sizeof(struct virtio_net_ctrl_hdr)
>>                      + sizeof(ctrl->status) + sizeof(uint8_t) * sum;
>>              desc[vq->vq_avail_idx].len = dlen[k];
>> -            flags = VRING_DESC_F_NEXT;
> 
> Looks like barriers was badly placed here before this patch.
> Anyway, you need a write barrier here between {addr, len} and flags updates.
> 
>> +            desc[vq->vq_avail_idx].flags = VRING_DESC_F_NEXT |
>> +                    VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
>> +                    VRING_DESC_F_USED(!vq->avail_wrap_counter);
>>              sum += dlen[k];
>>              vq->vq_free_cnt--;
>> -            flags |= VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
>> -                     VRING_DESC_F_USED(!vq->avail_wrap_counter);
>> -            desc[vq->vq_avail_idx].flags = flags;
>> -            rte_smp_wmb();
>> -            vq->vq_free_cnt--;
>> +            nb_descs++;
>>              if (++vq->vq_avail_idx >= vq->vq_nentries) {
>>                      vq->vq_avail_idx -= vq->vq_nentries;
>>                      vq->avail_wrap_counter ^= 1;
>>              }
>>      }
>>  
>> -
>>      desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem
>>              + sizeof(struct virtio_net_ctrl_hdr);
>>      desc[vq->vq_avail_idx].len = sizeof(ctrl->status);
>> -    flags = VRING_DESC_F_WRITE;
>> -    flags |= VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
>> -             VRING_DESC_F_USED(!vq->avail_wrap_counter);
>> -    desc[vq->vq_avail_idx].flags = flags;
>> -    flags = VRING_DESC_F_NEXT;
>> -    flags |= VRING_DESC_F_AVAIL(avail_wrap_counter) |
>> -             VRING_DESC_F_USED(!avail_wrap_counter);
>> -    desc[head].flags = flags;
>> -    rte_smp_wmb();
>> -
> 
> Same here. We need a write barrier to be sure that {addr, len} written
> before updating flags.
> 
> Another way to avoid most of barriers is to work similar to
> 'flush_shadow_used_ring_packed',
> i.e. update all the data in a loop - write barrier - update all the flags.
> 
>> +    desc[vq->vq_avail_idx].flags = VRING_DESC_F_WRITE |
>> +            VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
>> +            VRING_DESC_F_USED(!vq->avail_wrap_counter);
>>      vq->vq_free_cnt--;
>> +    nb_descs++;
>>      if (++vq->vq_avail_idx >= vq->vq_nentries) {
>>              vq->vq_avail_idx -= vq->vq_nentries;
>>              vq->avail_wrap_counter ^= 1;
>>      }
>>  
>> +    virtio_wmb(vq->hw->weak_barriers);
>> +    desc[head].flags = VRING_DESC_F_NEXT |
>> +            VRING_DESC_F_AVAIL(avail_wrap_counter) |
>> +            VRING_DESC_F_USED(!avail_wrap_counter);
>> +
>> +    virtio_wmb(vq->hw->weak_barriers);
>>      virtqueue_notify(vq);
>>  
>>      /* wait for used descriptors in virtqueue */
>> -    do {
>> -            rte_rmb();
>> +    while (!desc_is_used(&desc[head], vq))
>>              usleep(100);
>> -    } while (!__desc_is_used(&desc[head], used_wrap_counter));
>> +
>> +    virtio_rmb(vq->hw->weak_barriers);
>>  
>>      /* now get used descriptors */
>> -    while (desc_is_used(&desc[vq->vq_used_cons_idx], vq)) {
>> -            vq->vq_free_cnt++;
>> -            if (++vq->vq_used_cons_idx >= vq->vq_nentries) {
>> -                    vq->vq_used_cons_idx -= vq->vq_nentries;
>> -                    vq->used_wrap_counter ^= 1;
>> -            }
>> +    vq->vq_free_cnt += nb_descs;
>> +    vq->vq_used_cons_idx += nb_descs;
>> +    if (vq->vq_used_cons_idx >= vq->vq_nentries) {
>> +            vq->vq_used_cons_idx -= vq->vq_nentries;
>> +            vq->used_wrap_counter ^= 1;
>>      }
>>  
>>      result = cvq->virtio_net_hdr_mz->addr;
>> @@ -266,7 +262,7 @@ virtio_send_command(struct virtnet_ctl *cvq, struct 
>> virtio_pmd_ctrl *ctrl,
>>              sizeof(struct virtio_pmd_ctrl));
>>  
>>      if (vtpci_packed_queue(vq->hw)) {
>> -            result = virtio_pq_send_command(cvq, ctrl, dlen, pkt_num);
>> +            result = virtio_send_command_packed(cvq, ctrl, dlen, pkt_num);
>>              goto out_unlock;
>>      }
>>  
>> diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
>> index 7fcde5643..ca9d8e6e3 100644
>> --- a/drivers/net/virtio/virtqueue.h
>> +++ b/drivers/net/virtio/virtqueue.h
>> @@ -281,7 +281,7 @@ struct virtio_tx_region {
>>  };
>>  
>>  static inline int
>> -__desc_is_used(struct vring_packed_desc *desc, bool wrap_counter)
>> +desc_is_used(struct vring_packed_desc *desc, struct virtqueue *vq)
>>  {
>>      uint16_t used, avail, flags;
>>  
>> @@ -289,16 +289,9 @@ __desc_is_used(struct vring_packed_desc *desc, bool 
>> wrap_counter)
>>      used = !!(flags & VRING_DESC_F_USED(1));
>>      avail = !!(flags & VRING_DESC_F_AVAIL(1));
>>  
>> -    return avail == used && used == wrap_counter;
>> +    return avail == used && used == vq->used_wrap_counter;
>>  }
>>  
>> -static inline int
>> -desc_is_used(struct vring_packed_desc *desc, struct virtqueue *vq)
>> -{
>> -    return __desc_is_used(desc, vq->used_wrap_counter);
>> -}
>> -
>> -
>>  static inline void
>>  vring_desc_init_packed(struct virtqueue *vq, int n)
>>  {
>> @@ -354,7 +347,6 @@ virtqueue_enable_intr_packed(struct virtqueue *vq)
>>  {
>>      uint16_t *event_flags = &vq->ring_packed.driver_event->desc_event_flags;
>>  
>> -
>>      if (vq->event_flags_shadow == RING_EVENT_FLAGS_DISABLE) {
>>              virtio_wmb(vq->hw->weak_barriers);
>>              vq->event_flags_shadow = RING_EVENT_FLAGS_ENABLE;
>>

Reply via email to