From: Danylo Vodopianov <dvo-...@napatech.com>

Structures were enhanced with PACKED virtqueue fields.
Managed function was extended with packed ring configuration and
initialization support.

Signed-off-by: Danylo Vodopianov <dvo-...@napatech.com>
---
 drivers/net/ntnic/dbsconfig/ntnic_dbsconfig.c | 191 +++++++++++++++++-
 drivers/net/ntnic/include/ntnic_virt_queue.h  |  27 +++
 2 files changed, 208 insertions(+), 10 deletions(-)

diff --git a/drivers/net/ntnic/dbsconfig/ntnic_dbsconfig.c 
b/drivers/net/ntnic/dbsconfig/ntnic_dbsconfig.c
index 5232a95eaa..46b4c4415c 100644
--- a/drivers/net/ntnic/dbsconfig/ntnic_dbsconfig.c
+++ b/drivers/net/ntnic/dbsconfig/ntnic_dbsconfig.c
@@ -69,16 +69,33 @@ enum nthw_virt_queue_usage {
 
 struct nthw_virt_queue {
        /* Pointers to virt-queue structs */
-       struct {
-               /* SPLIT virtqueue */
-               struct virtq_avail *p_avail;
-               struct virtq_used *p_used;
-               struct virtq_desc *p_desc;
-               /* Control variables for virt-queue structs */
-               uint16_t am_idx;
-               uint16_t used_idx;
-               uint16_t cached_idx;
-               uint16_t tx_descr_avail_idx;
+       union {
+               struct {
+                       /* SPLIT virtqueue */
+                       struct virtq_avail *p_avail;
+                       struct virtq_used *p_used;
+                       struct virtq_desc *p_desc;
+                       /* Control variables for virt-queue structs */
+                       uint16_t am_idx;
+                       uint16_t used_idx;
+                       uint16_t cached_idx;
+                       uint16_t tx_descr_avail_idx;
+               };
+               struct {
+                       /* PACKED virtqueue */
+                       struct pvirtq_event_suppress *driver_event;
+                       struct pvirtq_event_suppress *device_event;
+                       struct pvirtq_desc *desc;
+                       /*
+                        * when in-order release used Tx packets from FPGA it 
may collapse
+                        * into a batch. When getting new Tx buffers we may 
only need
+                        * partial
+                        */
+                       uint16_t next_avail;
+                       uint16_t next_used;
+                       uint16_t avail_wrap_count;
+                       uint16_t used_wrap_count;
+               };
        };
 
        /* Array with packet buffers */
@@ -108,6 +125,11 @@ struct nthw_virt_queue {
        void *desc_struct_phys_addr;
 };
 
+struct pvirtq_struct_layout_s {
+       size_t driver_event_offset;
+       size_t device_event_offset;
+};
+
 static struct nthw_virt_queue rxvq[MAX_VIRT_QUEUES];
 static struct nthw_virt_queue txvq[MAX_VIRT_QUEUES];
 
@@ -606,6 +628,143 @@ nthw_setup_mngd_tx_virt_queue_split(nthw_dbs_t 
*p_nthw_dbs,
        return &txvq[index];
 }
 
+/*
+ * Packed Ring
+ */
+static int nthw_setup_managed_virt_queue_packed(struct nthw_virt_queue *vq,
+       struct pvirtq_struct_layout_s *pvirtq_layout,
+       struct nthw_memory_descriptor *p_virt_struct_area,
+       struct nthw_memory_descriptor *p_packet_buffers,
+       uint16_t flags,
+       int rx)
+{
+       /* page aligned */
+       assert(((uintptr_t)p_virt_struct_area->phys_addr & 0xfff) == 0);
+       assert(p_packet_buffers);
+
+       /* clean canvas */
+       memset(p_virt_struct_area->virt_addr, 0,
+               sizeof(struct pvirtq_desc) * vq->queue_size +
+               sizeof(struct pvirtq_event_suppress) * 2 + sizeof(int) * 
vq->queue_size);
+
+       pvirtq_layout->device_event_offset = sizeof(struct pvirtq_desc) * 
vq->queue_size;
+       pvirtq_layout->driver_event_offset =
+               pvirtq_layout->device_event_offset + sizeof(struct 
pvirtq_event_suppress);
+
+       vq->desc = p_virt_struct_area->virt_addr;
+       vq->device_event = (void *)((uintptr_t)vq->desc + 
pvirtq_layout->device_event_offset);
+       vq->driver_event = (void *)((uintptr_t)vq->desc + 
pvirtq_layout->driver_event_offset);
+
+       vq->next_avail = 0;
+       vq->next_used = 0;
+       vq->avail_wrap_count = 1;
+       vq->used_wrap_count = 1;
+
+       /*
+        * Only possible if FPGA always delivers in-order
+        * Buffer ID used is the index in the p_packet_buffers array
+        */
+       unsigned int i;
+       struct pvirtq_desc *p_desc = vq->desc;
+
+       for (i = 0; i < vq->queue_size; i++) {
+               if (rx) {
+                       p_desc[i].addr = 
(uint64_t)p_packet_buffers[i].phys_addr;
+                       p_desc[i].len = p_packet_buffers[i].len;
+               }
+
+               p_desc[i].id = i;
+               p_desc[i].flags = flags;
+       }
+
+       if (rx)
+               vq->avail_wrap_count ^= 1;      /* filled up available buffers 
for Rx */
+       else
+               vq->used_wrap_count ^= 1;       /* pre-fill free buffer IDs */
+
+       if (vq->queue_size == 0)
+               return -1;      /* don't allocate memory with size of 0 bytes */
+
+       vq->p_virtual_addr = malloc(vq->queue_size * sizeof(*p_packet_buffers));
+
+       if (vq->p_virtual_addr == NULL)
+               return -1;
+
+       memcpy(vq->p_virtual_addr, p_packet_buffers, vq->queue_size * 
sizeof(*p_packet_buffers));
+
+       /* Not used yet by FPGA - make sure we disable */
+       vq->device_event->flags = RING_EVENT_FLAGS_DISABLE;
+
+       return 0;
+}
+
+static struct nthw_virt_queue *
+nthw_setup_managed_rx_virt_queue_packed(nthw_dbs_t *p_nthw_dbs,
+       uint32_t index,
+       uint32_t queue_size,
+       uint32_t host_id,
+       uint32_t header,
+       struct nthw_memory_descriptor *p_virt_struct_area,
+       struct nthw_memory_descriptor *p_packet_buffers,
+       int irq_vector)
+{
+       struct pvirtq_struct_layout_s pvirtq_layout;
+       struct nthw_virt_queue *vq = &rxvq[index];
+       /* Set size and setup packed vq ring */
+       vq->queue_size = queue_size;
+
+       /* Use Avail flag bit == 1 because wrap bit is initially set to 1 - and 
Used is inverse */
+       if (nthw_setup_managed_virt_queue_packed(vq, &pvirtq_layout, 
p_virt_struct_area,
+                       p_packet_buffers,
+                       VIRTQ_DESC_F_WRITE | VIRTQ_DESC_F_AVAIL, 1) != 0)
+               return NULL;
+
+       nthw_setup_rx_virt_queue(p_nthw_dbs, index, 0x8000, 0,  /* start wrap 
ring counter as 1 */
+               (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+                       pvirtq_layout.driver_event_offset),
+               (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+                       pvirtq_layout.device_event_offset),
+               p_virt_struct_area->phys_addr, (uint16_t)queue_size, host_id,
+               header, PACKED_RING, irq_vector);
+
+       vq->usage = NTHW_VIRTQ_MANAGED;
+       return vq;
+}
+
+static struct nthw_virt_queue *
+nthw_setup_managed_tx_virt_queue_packed(nthw_dbs_t *p_nthw_dbs,
+       uint32_t index,
+       uint32_t queue_size,
+       uint32_t host_id,
+       uint32_t port,
+       uint32_t virtual_port,
+       uint32_t header,
+       int irq_vector,
+       uint32_t in_order,
+       struct nthw_memory_descriptor *p_virt_struct_area,
+       struct nthw_memory_descriptor *p_packet_buffers)
+{
+       struct pvirtq_struct_layout_s pvirtq_layout;
+       struct nthw_virt_queue *vq = &txvq[index];
+       /* Set size and setup packed vq ring */
+       vq->queue_size = queue_size;
+
+       if (nthw_setup_managed_virt_queue_packed(vq, &pvirtq_layout, 
p_virt_struct_area,
+                       p_packet_buffers, 0, 0) != 0)
+               return NULL;
+
+       nthw_setup_tx_virt_queue(p_nthw_dbs, index, 0x8000, 0,  /* start wrap 
ring counter as 1 */
+               (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+                       pvirtq_layout.driver_event_offset),
+               (void *)((uintptr_t)p_virt_struct_area->phys_addr +
+                       pvirtq_layout.device_event_offset),
+               p_virt_struct_area->phys_addr, (uint16_t)queue_size, host_id,
+               port, virtual_port, header, PACKED_RING, irq_vector, in_order);
+
+       vq->usage = NTHW_VIRTQ_MANAGED;
+       return vq;
+}
+
 /*
  * Create a Managed Rx Virt Queue
  *
@@ -630,6 +789,11 @@ nthw_setup_mngd_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
                                host_id, header, p_virt_struct_area,
                                p_packet_buffers, irq_vector);
 
+       case PACKED_RING:
+               return nthw_setup_managed_rx_virt_queue_packed(p_nthw_dbs, 
index, queue_size,
+                               host_id, header, p_virt_struct_area,
+                               p_packet_buffers, irq_vector);
+
        default:
                break;
        }
@@ -666,6 +830,13 @@ nthw_setup_mngd_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
                                p_virt_struct_area,
                                p_packet_buffers);
 
+       case PACKED_RING:
+               return nthw_setup_managed_tx_virt_queue_packed(p_nthw_dbs, 
index, queue_size,
+                               host_id, port, virtual_port, header,
+                               irq_vector, in_order,
+                               p_virt_struct_area,
+                               p_packet_buffers);
+
        default:
                break;
        }
diff --git a/drivers/net/ntnic/include/ntnic_virt_queue.h 
b/drivers/net/ntnic/include/ntnic_virt_queue.h
index 97cb474dc8..d4c9a9835a 100644
--- a/drivers/net/ntnic/include/ntnic_virt_queue.h
+++ b/drivers/net/ntnic/include/ntnic_virt_queue.h
@@ -45,6 +45,9 @@ struct __rte_aligned(8) virtq_desc {
        uint16_t next;
 };
 
+/* additional packed ring flags */
+#define VIRTQ_DESC_F_AVAIL     (1 << 7)
+
 /* descr phys address must be 16 byte aligned */
 struct __rte_aligned(16) pvirtq_desc {
        /* Buffer Address. */
@@ -57,6 +60,30 @@ struct __rte_aligned(16) pvirtq_desc {
        uint16_t flags;
 };
 
+/* Disable events */
+#define RING_EVENT_FLAGS_DISABLE 0x1
+
+struct __rte_aligned(16) pvirtq_event_suppress {
+       union {
+               struct {
+                       /* Descriptor Ring Change Event Offset */
+                       uint16_t desc_event_off : 15;
+                       /* Descriptor Ring Change Event Wrap Counter */
+                       uint16_t desc_event_wrap : 1;
+               };
+               /* If desc_event_flags set to RING_EVENT_FLAGS_DESC */
+               uint16_t desc;
+       };
+
+       union {
+               struct {
+                       uint16_t desc_event_flags : 2;  /* Descriptor Ring 
Change Event Flags */
+                       uint16_t reserved : 14; /* Reserved, set to 0 */
+               };
+               uint16_t flags;
+       };
+};
+
 /*
  * Common virtq descr
  */
-- 
2.45.0

Reply via email to