Author: mw
Date: Tue Oct 31 12:41:07 2017
New Revision: 325236
URL: https://svnweb.freebsd.org/changeset/base/325236

Log:
  Update ena-com HAL to v1.1.4.3 and update driver accordingly
  
  The newest ena-com HAL supports LLQv2 and introduces
  API changes. In order not to break the driver compilation
  it was updated/fixed in a following way:
  
  * Change version of the driver to 0.8.0
  * Provide reset cause when triggering reset of the device
  * Reset device after attach fails
  * In the reset task free management irq after calling ena_down. Admin
    queue can still be used before ena_down is called, or when it is
    being handled
  * Do not reset device if ena_reset_task fails
  * Move call of the ena_com_dev_reset to the ena_down() routine - it
    should be called only if interface was up
  * Use different function for checking empty space on the sq ring
    (ena-com API change)
  * Fix typo on ENA_TX_CLEANUP_THRESHOLD
  * Change checking for EPERM with EOPNOTSUPP - change in the ena-com API
  * Minor style fixes
  
  Submitted by: Michal Krawczyk <m...@semihalf.com>
  Obtained from: Amazon.com, Inc.
                 Semihalf
  Sponsored by: Amazon.com, Inc.
  Differential Revision: https://reviews.freebsd.org/D12143

Added:
  head/sys/contrib/ena-com/ena_defs/
     - copied from r325234, vendor-sys/ena-com/dist/ena_defs/
Modified:
  head/sys/contrib/ena-com/ena_com.c
  head/sys/contrib/ena-com/ena_com.h
  head/sys/contrib/ena-com/ena_eth_com.c
  head/sys/contrib/ena-com/ena_eth_com.h
  head/sys/contrib/ena-com/ena_plat.h
  head/sys/dev/ena/ena.c
  head/sys/dev/ena/ena.h
  head/sys/dev/ena/ena_sysctl.c
Directory Properties:
  head/sys/contrib/ena-com/   (props changed)

Modified: head/sys/contrib/ena-com/ena_com.c
==============================================================================
--- head/sys/contrib/ena-com/ena_com.c  Tue Oct 31 12:23:02 2017        
(r325235)
+++ head/sys/contrib/ena-com/ena_com.c  Tue Oct 31 12:41:07 2017        
(r325236)
@@ -45,6 +45,13 @@
 #define ENA_ASYNC_QUEUE_DEPTH 16
 #define ENA_ADMIN_QUEUE_DEPTH 32
 
+#ifdef ENA_EXTENDED_STATS
+
+#define ENA_HISTOGRAM_ACTIVE_MASK_OFFSET 0xF08
+#define ENA_EXTENDED_STAT_GET_FUNCT(_funct_queue) (_funct_queue & 0xFFFF)
+#define ENA_EXTENDED_STAT_GET_QUEUE(_funct_queue) (_funct_queue >> 16)
+
+#endif /* ENA_EXTENDED_STATS */
 #define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \
                ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \
                | (ENA_COMMON_SPEC_VERSION_MINOR))
@@ -65,6 +72,10 @@
 
 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
 
+#define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT        4
+
+#define ENA_REGS_ADMIN_INTR_MASK 1
+
 /*****************************************************************************/
 /*****************************************************************************/
 /*****************************************************************************/
@@ -102,7 +113,7 @@ static inline int ena_com_mem_addr_set(struct ena_com_
        }
 
        ena_addr->mem_addr_low = (u32)addr;
-       ena_addr->mem_addr_high = (u64)addr >> 32;
+       ena_addr->mem_addr_high = (u16)((u64)addr >> 32);
 
        return 0;
 }
@@ -238,12 +249,9 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd
        tail_masked = admin_queue->sq.tail & queue_size_mask;
 
        /* In case of queue FULL */
-       cnt = admin_queue->sq.tail - admin_queue->sq.head;
+       cnt = ATOMIC32_READ(&admin_queue->outstanding_cmds);
        if (cnt >= admin_queue->q_depth) {
-               ena_trc_dbg("admin queue is FULL (tail %d head %d depth: %d)\n",
-                           admin_queue->sq.tail,
-                           admin_queue->sq.head,
-                           admin_queue->q_depth);
+               ena_trc_dbg("admin queue is full.\n");
                admin_queue->stats.out_of_space++;
                return ERR_PTR(ENA_COM_NO_SPACE);
        }
@@ -278,6 +286,7 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd
        if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
                admin_queue->sq.phase = !admin_queue->sq.phase;
 
+       ENA_DB_SYNC(&admin_queue->sq.mem_handle);
        ENA_REG_WRITE32(admin_queue->bus, admin_queue->sq.tail,
                        admin_queue->sq.db_addr);
 
@@ -362,21 +371,43 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_
                                               io_sq->desc_addr.phys_addr,
                                               io_sq->desc_addr.mem_handle);
                }
-       } else {
+
+               if (!io_sq->desc_addr.virt_addr) {
+                       ena_trc_err("memory allocation failed");
+                       return ENA_COM_NO_MEM;
+               }
+       }
+
+       if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+               /* Allocate bounce buffers */
+               io_sq->bounce_buf_ctrl.buffer_size = 
ena_dev->llq_info.desc_list_entry_size;
+               io_sq->bounce_buf_ctrl.buffers_num = 
ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
+               io_sq->bounce_buf_ctrl.next_to_use = 0;
+
+               size = io_sq->bounce_buf_ctrl.buffer_size * 
io_sq->bounce_buf_ctrl.buffers_num;
+
                ENA_MEM_ALLOC_NODE(ena_dev->dmadev,
                                   size,
-                                  io_sq->desc_addr.virt_addr,
+                                  io_sq->bounce_buf_ctrl.base_buffer,
                                   ctx->numa_node,
                                   dev_node);
-               if (!io_sq->desc_addr.virt_addr) {
-                       io_sq->desc_addr.virt_addr =
-                               ENA_MEM_ALLOC(ena_dev->dmadev, size);
+               if (!io_sq->bounce_buf_ctrl.base_buffer)
+                       io_sq->bounce_buf_ctrl.base_buffer = 
ENA_MEM_ALLOC(ena_dev->dmadev, size);
+
+               if (!io_sq->bounce_buf_ctrl.base_buffer) {
+                       ena_trc_err("bounce buffer memory allocation failed");
+                       return ENA_COM_NO_MEM;
                }
-       }
 
-       if (!io_sq->desc_addr.virt_addr) {
-               ena_trc_err("memory allocation failed");
-               return ENA_COM_NO_MEM;
+               memcpy(&io_sq->llq_info, &ena_dev->llq_info, 
sizeof(io_sq->llq_info));
+
+               /* Initiate the first bounce buffer */
+               io_sq->llq_buf_ctrl.curr_bounce_buf =
+                       ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
+               memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
+                      0x0, io_sq->llq_info.desc_list_entry_size);
+               io_sq->llq_buf_ctrl.descs_left_in_line =
+                       io_sq->llq_info.descs_num_before_header;
        }
 
        io_sq->tail = 0;
@@ -507,7 +538,7 @@ static int ena_com_comp_status_to_errno(u8 comp_status
        case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
                return ENA_COM_NO_MEM;
        case ENA_ADMIN_UNSUPPORTED_OPCODE:
-               return ENA_COM_PERMISSION;
+               return ENA_COM_UNSUPPORTED;
        case ENA_ADMIN_BAD_OPCODE:
        case ENA_ADMIN_MALFORMED_REQUEST:
        case ENA_ADMIN_ILLEGAL_PARAMETER:
@@ -532,7 +563,7 @@ static int ena_com_wait_and_process_admin_cq_polling(s
                 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
 
                 if (comp_ctx->status != ENA_CMD_SUBMITTED)
-                    break;
+                       break;
 
                if (ENA_TIME_EXPIRE(timeout)) {
                        ena_trc_err("Wait for completion (polling) timeout\n");
@@ -567,6 +598,75 @@ err:
        return ret;
 }
 
+static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
+                                  struct ena_admin_feature_llq_desc *llq_desc)
+{
+       struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
+
+       memset(llq_info, 0, sizeof(*llq_info));
+
+       switch (llq_desc->header_location_ctrl) {
+       case ENA_ADMIN_INLINE_HEADER:
+               llq_info->inline_header = true;
+               break;
+       case ENA_ADMIN_HEADER_RING:
+               llq_info->inline_header = false;
+               break;
+       default:
+               ena_trc_err("Invalid header location control\n");
+               return -EINVAL;
+       }
+
+       switch (llq_desc->entry_size_ctrl) {
+       case ENA_ADMIN_LIST_ENTRY_SIZE_128B:
+               llq_info->desc_list_entry_size = 128;
+               break;
+       case ENA_ADMIN_LIST_ENTRY_SIZE_192B:
+               llq_info->desc_list_entry_size = 192;
+               break;
+       case ENA_ADMIN_LIST_ENTRY_SIZE_256B:
+               llq_info->desc_list_entry_size = 256;
+               break;
+       default:
+               ena_trc_err("Invalid entry_size_ctrl %d\n",
+                           llq_desc->entry_size_ctrl);
+               return -EINVAL;
+       }
+
+       if ((llq_info->desc_list_entry_size & 0x7)) {
+               /* The desc list entry size should be whole multiply of 8
+                * This requirement comes from __iowrite64_copy()
+                */
+               ena_trc_err("illegal entry size %d\n",
+                           llq_info->desc_list_entry_size);
+               return -EINVAL;
+       }
+
+       if (llq_info->inline_header) {
+               llq_info->desc_stride_ctrl = llq_desc->descriptors_stride_ctrl;
+               if ((llq_info->desc_stride_ctrl != 
ENA_ADMIN_SINGLE_DESC_PER_ENTRY) &&
+                   (llq_info->desc_stride_ctrl != 
ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)) {
+                       ena_trc_err("Invalid desc_stride_ctrl %d\n",
+                                   llq_info->desc_stride_ctrl);
+                       return -EINVAL;
+               }
+       } else {
+               llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
+       }
+
+       if (llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY)
+               llq_info->descs_per_entry = llq_info->desc_list_entry_size /
+                       sizeof(struct ena_eth_io_tx_desc);
+       else
+               llq_info->descs_per_entry = 1;
+
+       llq_info->descs_num_before_header = 
llq_desc->desc_num_before_header_ctrl;
+
+       return 0;
+}
+
+
+
 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx 
*comp_ctx,
                                                        struct 
ena_com_admin_queue *admin_queue)
 {
@@ -614,13 +714,14 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *
        struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
        volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
                mmio_read->read_resp;
-       u32 mmio_read_reg, timeout, ret;
+       u32 mmio_read_reg, ret, i;
        unsigned long flags;
-       int i;
+       u32 timeout = mmio_read->reg_read_to;
 
        ENA_MIGHT_SLEEP();
 
-       timeout = mmio_read->reg_read_to ? : ENA_REG_READ_TIMEOUT;
+       if (timeout == 0)
+               timeout = ENA_REG_READ_TIMEOUT;
 
        /* If readless is disabled, perform regular read */
        if (!mmio_read->readless_supported)
@@ -745,17 +846,20 @@ static void ena_com_io_queue_free(struct ena_com_dev *
        if (io_sq->desc_addr.virt_addr) {
                size = io_sq->desc_entry_size * io_sq->q_depth;
 
-               if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
-                       ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
-                                             size,
-                                             io_sq->desc_addr.virt_addr,
-                                             io_sq->desc_addr.phys_addr,
-                                             io_sq->desc_addr.mem_handle);
-               else
-                       ENA_MEM_FREE(ena_dev->dmadev, 
io_sq->desc_addr.virt_addr);
+               ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
+                                     size,
+                                     io_sq->desc_addr.virt_addr,
+                                     io_sq->desc_addr.phys_addr,
+                                     io_sq->desc_addr.mem_handle);
 
                io_sq->desc_addr.virt_addr = NULL;
        }
+
+       if (io_sq->bounce_buf_ctrl.base_buffer) {
+               size = io_sq->llq_info.desc_list_entry_size * 
ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
+               ENA_MEM_FREE(ena_dev->dmadev, 
io_sq->bounce_buf_ctrl.base_buffer);
+               io_sq->bounce_buf_ctrl.base_buffer = NULL;
+       }
 }
 
 static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
@@ -807,7 +911,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *
 
        if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
                ena_trc_dbg("Feature %d isn't supported\n", feature_id);
-               return ENA_COM_PERMISSION;
+               return ENA_COM_UNSUPPORTED;
        }
 
        memset(&get_cmd, 0x0, sizeof(get_cmd));
@@ -1366,7 +1470,7 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_de
                ena_trc_warn("Trying to set unsupported aenq events. supported 
flag: %x asked flag: %x\n",
                             get_resp.u.aenq.supported_groups,
                             groups_flag);
-               return ENA_COM_PERMISSION;
+               return ENA_COM_UNSUPPORTED;
        }
 
        memset(&cmd, 0x0, sizeof(cmd));
@@ -1480,7 +1584,6 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev
 
        if (admin_queue->comp_ctx)
                ENA_MEM_FREE(ena_dev->dmadev, admin_queue->comp_ctx);
-
        admin_queue->comp_ctx = NULL;
        size = ADMIN_SQ_SIZE(admin_queue->q_depth);
        if (sq->entries)
@@ -1503,6 +1606,12 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev
 
 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
 {
+       u32 mask_value = 0;
+
+       if (polling)
+               mask_value = ENA_REGS_ADMIN_INTR_MASK;
+
+       ENA_REG_WRITE32(ena_dev->bus, mask_value, ena_dev->reg_bar + 
ENA_REGS_INTR_MASK_OFF);
        ena_dev->admin_queue.polling = polling;
 }
 
@@ -1790,11 +1899,20 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_
        if (!rc)
                memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
                       sizeof(get_resp.u.hw_hints));
-       else if (rc == ENA_COM_PERMISSION)
+       else if (rc == ENA_COM_UNSUPPORTED)
                memset(&get_feat_ctx->hw_hints, 0x0, 
sizeof(get_feat_ctx->hw_hints));
        else
                return rc;
 
+       rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ);
+       if (!rc)
+               memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
+                      sizeof(get_resp.u.llq));
+       else if (rc == ENA_COM_UNSUPPORTED)
+               memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
+       else
+               return rc;
+
        return 0;
 }
 
@@ -1827,6 +1945,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev
        struct ena_admin_aenq_common_desc *aenq_common;
        struct ena_com_aenq *aenq  = &dev->aenq;
        ena_aenq_handler handler_cb;
+       unsigned long long timestamp;
        u16 masked_head, processed = 0;
        u8 phase;
 
@@ -1838,11 +1957,12 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev
        /* Go over all the events */
        while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) ==
                phase) {
-               ena_trc_dbg("AENQ! Group[%x] Syndrom[%x] timestamp: [%jus]\n",
+               timestamp = (unsigned long long)aenq_common->timestamp_low |
+                       ((unsigned long long)aenq_common->timestamp_high << 32);
+               ena_trc_dbg("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
                            aenq_common->group,
                            aenq_common->syndrom,
-                           (u64)aenq_common->timestamp_low +
-                           ((u64)aenq_common->timestamp_high << 32));
+                           timestamp);
 
                /* Handle specific event*/
                handler_cb = ena_com_get_specific_aenq_cb(dev,
@@ -1872,8 +1992,30 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev
        mb();
        ENA_REG_WRITE32(dev->bus, (u32)aenq->head, dev->reg_bar + 
ENA_REGS_AENQ_HEAD_DB_OFF);
 }
+#ifdef ENA_EXTENDED_STATS
+/*
+ * Sets the function Idx and Queue Idx to be used for
+ * get full statistics feature
+ *
+ */
+int ena_com_extended_stats_set_func_queue(struct ena_com_dev *ena_dev,
+                                         u32 func_queue)
+{
 
-int ena_com_dev_reset(struct ena_com_dev *ena_dev)
+       /* Function & Queue is acquired from user in the following format :
+        * Bottom Half word:    funct
+        * Top Half Word:       queue
+        */
+       ena_dev->stats_func = ENA_EXTENDED_STAT_GET_FUNCT(func_queue);
+       ena_dev->stats_queue = ENA_EXTENDED_STAT_GET_QUEUE(func_queue);
+
+       return 0;
+}
+
+#endif /* ENA_EXTENDED_STATS */
+
+int ena_com_dev_reset(struct ena_com_dev *ena_dev,
+                     enum ena_regs_reset_reason_types reset_reason)
 {
        u32 stat, timeout, cap, reset_val;
        int rc;
@@ -1901,6 +2043,8 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev)
 
        /* start reset */
        reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
+       reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
+                       ENA_REGS_DEV_CTL_RESET_REASON_MASK;
        ENA_REG_WRITE32(ena_dev->bus, reset_val, ena_dev->reg_bar + 
ENA_REGS_DEV_CTL_OFF);
 
        /* Write again the MMIO read request address */
@@ -1973,7 +2117,52 @@ int ena_com_get_dev_basic_stats(struct ena_com_dev *en
 
        return ret;
 }
+#ifdef ENA_EXTENDED_STATS
 
+int ena_com_get_dev_extended_stats(struct ena_com_dev *ena_dev, char *buff,
+                                  u32 len)
+{
+       struct ena_com_stats_ctx ctx;
+       struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx.get_cmd;
+       ena_mem_handle_t mem_handle;
+       void *virt_addr;
+       dma_addr_t phys_addr;
+       int ret;
+
+       ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, len,
+                              virt_addr, phys_addr, mem_handle);
+       if (!virt_addr) {
+               ret = ENA_COM_NO_MEM;
+               goto done;
+       }
+       memset(&ctx, 0x0, sizeof(ctx));
+       ret = ena_com_mem_addr_set(ena_dev,
+                                  &get_cmd->u.control_buffer.address,
+                                  phys_addr);
+       if (unlikely(ret)) {
+               ena_trc_err("memory address set failed\n");
+               return ret;
+       }
+       get_cmd->u.control_buffer.length = len;
+
+       get_cmd->device_id = ena_dev->stats_func;
+       get_cmd->queue_idx = ena_dev->stats_queue;
+
+       ret = ena_get_dev_stats(ena_dev, &ctx,
+                               ENA_ADMIN_GET_STATS_TYPE_EXTENDED);
+       if (ret < 0)
+               goto free_ext_stats_mem;
+
+       ret = snprintf(buff, len, "%s", (char *)virt_addr);
+
+free_ext_stats_mem:
+       ENA_MEM_FREE_COHERENT(ena_dev->dmadev, len, virt_addr, phys_addr,
+                             mem_handle);
+done:
+       return ret;
+}
+#endif
+
 int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
 {
        struct ena_com_admin_queue *admin_queue;
@@ -1983,7 +2172,7 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, i
 
        if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
                ena_trc_dbg("Feature %d isn't supported\n", ENA_ADMIN_MTU);
-               return ENA_COM_PERMISSION;
+               return ENA_COM_UNSUPPORTED;
        }
 
        memset(&cmd, 0x0, sizeof(cmd));
@@ -2037,7 +2226,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_
                                                ENA_ADMIN_RSS_HASH_FUNCTION)) {
                ena_trc_dbg("Feature %d isn't supported\n",
                            ENA_ADMIN_RSS_HASH_FUNCTION);
-               return ENA_COM_PERMISSION;
+               return ENA_COM_UNSUPPORTED;
        }
 
        /* Validate hash function is supported */
@@ -2049,7 +2238,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_
        if (get_resp.u.flow_hash_func.supported_func & (1 << rss->hash_func)) {
                ena_trc_err("Func hash %d isn't supported by device, abort\n",
                            rss->hash_func);
-               return ENA_COM_PERMISSION;
+               return ENA_COM_UNSUPPORTED;
        }
 
        memset(&cmd, 0x0, sizeof(cmd));
@@ -2108,7 +2297,7 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena
 
        if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) {
                ena_trc_err("Flow hash function %d isn't supported\n", func);
-               return ENA_COM_PERMISSION;
+               return ENA_COM_UNSUPPORTED;
        }
 
        switch (func) {
@@ -2201,7 +2390,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
                                                ENA_ADMIN_RSS_HASH_INPUT)) {
                ena_trc_dbg("Feature %d isn't supported\n",
                            ENA_ADMIN_RSS_HASH_INPUT);
-               return ENA_COM_PERMISSION;
+               return ENA_COM_UNSUPPORTED;
        }
 
        memset(&cmd, 0x0, sizeof(cmd));
@@ -2282,7 +2471,7 @@ int ena_com_set_default_hash_ctrl(struct ena_com_dev *
                        ena_trc_err("hash control doesn't support all the 
desire configuration. proto %x supported %x selected %x\n",
                                    i, hash_ctrl->supported_fields[i].fields,
                                    hash_ctrl->selected_fields[i].fields);
-                       return ENA_COM_PERMISSION;
+                       return ENA_COM_UNSUPPORTED;
                }
        }
 
@@ -2360,7 +2549,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena
                                                
ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
                ena_trc_dbg("Feature %d isn't supported\n",
                            ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
-               return ENA_COM_PERMISSION;
+               return ENA_COM_UNSUPPORTED;
        }
 
        ret = ena_com_ind_tbl_convert_to_device(ena_dev);
@@ -2636,7 +2825,7 @@ int ena_com_init_interrupt_moderation(struct ena_com_d
                                 ENA_ADMIN_INTERRUPT_MODERATION);
 
        if (rc) {
-               if (rc == ENA_COM_PERMISSION) {
+               if (rc == ENA_COM_UNSUPPORTED) {
                        ena_trc_dbg("Feature %d isn't supported\n",
                                    ENA_ADMIN_INTERRUPT_MODERATION);
                        rc = 0;
@@ -2758,4 +2947,34 @@ void ena_com_get_intr_moderation_entry(struct ena_com_
        entry->pkts_per_interval =
        intr_moder_tbl[level].pkts_per_interval;
        entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval;
+}
+
+int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
+                           struct ena_admin_feature_llq_desc *llq)
+{
+       int rc;
+       int size;
+
+       if (llq->max_llq_num == 0) {
+               ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
+               return 0;
+       }
+
+       rc = ena_com_config_llq_info(ena_dev, llq);
+       if (rc)
+               return rc;
+
+       /* Validate the descriptor is not too big */
+       size = ena_dev->tx_max_header_size;
+       size += ena_dev->llq_info.descs_num_before_header *
+               sizeof(struct ena_eth_io_tx_desc);
+
+       if (unlikely(ena_dev->llq_info.desc_list_entry_size < size)) {
+               ena_trc_err("the size of the LLQ entry is smaller than 
needed\n");
+               return ENA_COM_INVAL;
+       }
+
+       ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
+
+       return 0;
 }

Modified: head/sys/contrib/ena-com/ena_com.h
==============================================================================
--- head/sys/contrib/ena-com/ena_com.h  Tue Oct 31 12:23:02 2017        
(r325235)
+++ head/sys/contrib/ena-com/ena_com.h  Tue Oct 31 12:41:07 2017        
(r325236)
@@ -133,6 +133,15 @@ struct ena_com_tx_meta {
        u16 l4_hdr_len; /* In words */
 };
 
+struct ena_com_llq_info {
+       bool inline_header;
+       u16 desc_stride_ctrl;
+
+       u16 desc_list_entry_size;
+       u16 descs_num_before_header;
+       u16 descs_per_entry;
+};
+
 struct ena_com_io_cq {
        struct ena_com_io_desc_addr cdesc_addr;
        void *bus;
@@ -171,6 +180,20 @@ struct ena_com_io_cq {
 
 } ____cacheline_aligned;
 
+struct ena_com_io_bounce_buffer_control {
+       u8 *base_buffer;
+       u16 next_to_use;
+       u16 buffer_size;
+       u16 buffers_num;  /* Must be a power of 2 */
+};
+
+/* This struct is to keep tracking the current location of the next llq entry 
*/
+struct ena_com_llq_pkt_ctrl {
+       u8 *curr_bounce_buf;
+       u16 idx;
+       u16 descs_left_in_line;
+};
+
 struct ena_com_io_sq {
        struct ena_com_io_desc_addr desc_addr;
        void *bus;
@@ -183,6 +206,9 @@ struct ena_com_io_sq {
 
        u32 msix_vector;
        struct ena_com_tx_meta cached_tx_meta;
+       struct ena_com_llq_info llq_info;
+       struct ena_com_llq_pkt_ctrl llq_buf_ctrl;
+       struct ena_com_io_bounce_buffer_control bounce_buf_ctrl;
 
        u16 q_depth;
        u16 qid;
@@ -190,6 +216,7 @@ struct ena_com_io_sq {
        u16 idx;
        u16 tail;
        u16 next_to_comp;
+       u16 llq_last_copy_tail;
        u32 tx_max_header_size;
        u8 phase;
        u8 desc_entry_size;
@@ -321,6 +348,7 @@ struct ena_com_dev {
        void __iomem *mem_bar;
        void *dmadev;
        void *bus;
+
        enum ena_admin_placement_policy_type tx_mem_queue_type;
        u32 tx_max_header_size;
        u16 stats_func; /* Selected function for extended statistic dump */
@@ -337,6 +365,8 @@ struct ena_com_dev {
        u16 intr_delay_resolution;
        u32 intr_moder_tx_interval;
        struct ena_intr_moder_entry *intr_moder_tbl;
+
+       struct ena_com_llq_info llq_info;
 };
 
 struct ena_com_dev_get_features_ctx {
@@ -345,6 +375,7 @@ struct ena_com_dev_get_features_ctx {
        struct ena_admin_feature_aenq_desc aenq;
        struct ena_admin_feature_offload_desc offload;
        struct ena_admin_ena_hw_hints hw_hints;
+       struct ena_admin_feature_llq_desc llq;
 };
 
 struct ena_com_create_io_ctx {
@@ -426,10 +457,12 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev
 
 /* ena_com_dev_reset - Perform device FLR to the device.
  * @ena_dev: ENA communication layer struct
+ * @reset_reason: Specify what is the trigger for the reset in case of an 
error.
  *
  * @return - 0 on success, negative value on failure.
  */
-int ena_com_dev_reset(struct ena_com_dev *ena_dev);
+int ena_com_dev_reset(struct ena_com_dev *ena_dev,
+                     enum ena_regs_reset_reason_types reset_reason);
 
 /* ena_com_create_io_queue - Create io queue.
  * @ena_dev: ENA communication layer struct
@@ -939,6 +972,15 @@ void ena_com_get_intr_moderation_entry(struct ena_com_
                                       enum ena_intr_moder_level level,
                                       struct ena_intr_moder_entry *entry);
 
+
+/* ena_com_config_dev_mode - Configure the placement policy of the device.
+ * @ena_dev: ENA communication layer struct
+ * @llq: LLQ feature descriptor, retrieve via ena_com_get_dev_attr_feat.
+ *
+ */
+int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
+                           struct ena_admin_feature_llq_desc *llq);
+
 static inline bool ena_com_get_adaptive_moderation_enabled(struct ena_com_dev 
*ena_dev)
 {
        return ena_dev->adaptive_coalescing;
@@ -1048,6 +1090,30 @@ static inline void ena_com_update_intr_reg(struct ena_
                intr_reg->intr_control |= ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK;
 }
 
+static inline u8 *ena_com_get_next_bounce_buffer(struct 
ena_com_io_bounce_buffer_control *bounce_buf_ctrl)
+{
+       u16 size, buffers_num;
+       u8 *buf;
+
+       size = bounce_buf_ctrl->buffer_size;
+       buffers_num = bounce_buf_ctrl->buffers_num;
+
+       buf = bounce_buf_ctrl->base_buffer +
+               (bounce_buf_ctrl->next_to_use++ & (buffers_num - 1)) * size;
+
+       prefetch(bounce_buf_ctrl->base_buffer +
+               (bounce_buf_ctrl->next_to_use & (buffers_num - 1)) * size);
+
+       return buf;
+}
+
+#ifdef ENA_EXTENDED_STATS
+int ena_com_get_dev_extended_stats(struct ena_com_dev *ena_dev, char *buff,
+                                  u32 len);
+
+int ena_com_extended_stats_set_func_queue(struct ena_com_dev *ena_dev,
+                                         u32 funct_queue);
+#endif
 #if defined(__cplusplus)
 }
 #endif /* __cplusplus */

Modified: head/sys/contrib/ena-com/ena_eth_com.c
==============================================================================
--- head/sys/contrib/ena-com/ena_eth_com.c      Tue Oct 31 12:23:02 2017        
(r325235)
+++ head/sys/contrib/ena-com/ena_eth_com.c      Tue Oct 31 12:41:07 2017        
(r325236)
@@ -64,7 +64,7 @@ static inline void ena_com_cq_inc_head(struct ena_com_
                io_cq->phase ^= 1;
 }
 
-static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
+static inline void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
 {
        u16 tail_masked;
        u32 offset;
@@ -76,22 +76,27 @@ static inline void *get_sq_desc(struct ena_com_io_sq *
        return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
 }
 
-static inline void ena_com_copy_curr_sq_desc_to_dev(struct ena_com_io_sq 
*io_sq)
+static inline void ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq 
*io_sq,
+                                                     u8 *bounce_buffer)
 {
-       u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
-       u32 offset = tail_masked * io_sq->desc_entry_size;
+       struct ena_com_llq_info *llq_info = &io_sq->llq_info;
 
-       /* In case this queue isn't a LLQ */
-       if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
-               return;
+       u16 dst_tail_mask;
+       u32 dst_offset;
 
-       memcpy_toio(io_sq->desc_addr.pbuf_dev_addr + offset,
-                   io_sq->desc_addr.virt_addr + offset,
-                   io_sq->desc_entry_size);
-}
+       dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1);
+       dst_offset = dst_tail_mask * llq_info->desc_list_entry_size;
 
-static inline void ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
-{
+       /* Make sure everything was written into the bounce buffer before
+        * writing the bounce buffer to the device
+        */
+       wmb();
+
+       /* The line is completed. Copy it to dev */
+       ENA_MEMCPY_TO_DEVICE_64(io_sq->desc_addr.pbuf_dev_addr + dst_offset,
+                               bounce_buffer,
+                               llq_info->desc_list_entry_size);
+
        io_sq->tail++;
 
        /* Switch phase bit in case of wrap around */
@@ -99,26 +104,124 @@ static inline void ena_com_sq_update_tail(struct ena_c
                io_sq->phase ^= 1;
 }
 
-static inline int ena_com_write_header(struct ena_com_io_sq *io_sq,
-                                      u8 *head_src, u16 header_len)
+static inline int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
+                                                u8 *header_src,
+                                                u16 header_len)
 {
-       u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
-       u8 __iomem *dev_head_addr =
-               io_sq->header_addr + (tail_masked * io_sq->tx_max_header_size);
+       struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
+       struct ena_com_llq_info *llq_info = &io_sq->llq_info;
+       u8 *bounce_buffer = pkt_ctrl->curr_bounce_buf;
+       u16 header_offset;
 
        if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
                return 0;
 
-       if (unlikely(!io_sq->header_addr)) {
-               ena_trc_err("Push buffer header ptr is NULL\n");
-               return ENA_COM_INVAL;
+       header_offset =
+               llq_info->descs_num_before_header * io_sq->desc_entry_size;
+
+       if (unlikely((header_offset + header_len) >  
llq_info->desc_list_entry_size)) {
+               ena_trc_err("trying to write header larger than llq entry can 
accommodate\n");
+               return ENA_COM_FAULT;
        }
 
-       memcpy_toio(dev_head_addr, head_src, header_len);
+       if (unlikely(!bounce_buffer)) {
+               ena_trc_err("bounce buffer is NULL\n");
+               return ENA_COM_FAULT;
+       }
 
+       memcpy(bounce_buffer + header_offset, header_src, header_len);
+
        return 0;
 }
 
+static inline void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
+{
+       struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
+       u8 *bounce_buffer;
+       void *sq_desc;
+
+       bounce_buffer = pkt_ctrl->curr_bounce_buf;
+
+       if (unlikely(!bounce_buffer)) {
+               ena_trc_err("bounce buffer is NULL\n");
+               return NULL;
+       }
+
+       sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size;
+       pkt_ctrl->idx++;
+       pkt_ctrl->descs_left_in_line--;
+
+       return sq_desc;
+}
+
+static inline void ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
+{
+       struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
+       struct ena_com_llq_info *llq_info = &io_sq->llq_info;
+
+       if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
+               return;
+
+       /* bounce buffer was used, so write it and get a new one */
+       if (pkt_ctrl->idx) {
+               ena_com_write_bounce_buffer_to_dev(io_sq,
+                                                  pkt_ctrl->curr_bounce_buf);
+               pkt_ctrl->curr_bounce_buf =
+                       ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
+                       memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
+                              0x0, llq_info->desc_list_entry_size);
+       }
+
+       pkt_ctrl->idx = 0;
+       pkt_ctrl->descs_left_in_line = llq_info->descs_num_before_header;
+}
+
+static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
+{
+       if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
+               return get_sq_desc_llq(io_sq);
+
+       return get_sq_desc_regular_queue(io_sq);
+}
+
+static inline void ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
+{
+       struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
+       struct ena_com_llq_info *llq_info = &io_sq->llq_info;
+
+       if (!pkt_ctrl->descs_left_in_line) {
+               ena_com_write_bounce_buffer_to_dev(io_sq,
+                                                  pkt_ctrl->curr_bounce_buf);
+
+               pkt_ctrl->curr_bounce_buf =
+                       ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
+                       memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
+                              0x0, llq_info->desc_list_entry_size);
+
+               pkt_ctrl->idx = 0;
+               if (llq_info->desc_stride_ctrl == 
ENA_ADMIN_SINGLE_DESC_PER_ENTRY)
+                       pkt_ctrl->descs_left_in_line = 1;
+               else
+                       pkt_ctrl->descs_left_in_line =
+                       llq_info->desc_list_entry_size / io_sq->desc_entry_size;
+       }
+}
+
+static inline void ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
+{
+
+       if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+               ena_com_sq_update_llq_tail(io_sq);
+               return;
+       }
+
+       io_sq->tail++;
+
+       /* Switch phase bit in case of wrap around */
+       if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
+               io_sq->phase ^= 1;
+}
+
 static inline struct ena_eth_io_rx_cdesc_base *
        ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
 {
@@ -228,7 +331,6 @@ static inline void ena_com_create_and_store_tx_meta_de
        memcpy(&io_sq->cached_tx_meta, ena_meta,
               sizeof(struct ena_com_tx_meta));
 
-       ena_com_copy_curr_sq_desc_to_dev(io_sq);
        ena_com_sq_update_tail(io_sq);
 }
 
@@ -271,10 +373,11 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
 {
        struct ena_eth_io_tx_desc *desc = NULL;
        struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs;
-       void *push_header = ena_tx_ctx->push_header;
+       void *buffer_to_push = ena_tx_ctx->push_header;
        u16 header_len = ena_tx_ctx->header_len;
        u16 num_bufs = ena_tx_ctx->num_bufs;
-       int total_desc, i, rc;
+       u16 start_tail = io_sq->tail;
+       int i, rc;
        bool have_meta;
        u64 addr_hi;
 
@@ -282,7 +385,7 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
                 "wrong Q type");
 
        /* num_bufs +1 for potential meta desc */
-       if (ena_com_sq_empty_space(io_sq) < (num_bufs + 1)) {
+       if (!ena_com_sq_have_enough_space(io_sq, num_bufs + 1)) {
                ena_trc_err("Not enough space in the tx queue\n");
                return ENA_COM_NO_MEM;
        }
@@ -293,8 +396,10 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
                return ENA_COM_INVAL;
        }
 
-       /* start with pushing the header (if needed) */
-       rc = ena_com_write_header(io_sq, push_header, header_len);
+       if (unlikely((io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) 
&& !buffer_to_push))
+               return ENA_COM_INVAL;
+
+       rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len);
        if (unlikely(rc))
                return rc;
 
@@ -305,11 +410,14 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
 
        /* If the caller doesn't want send packets */
        if (unlikely(!num_bufs && !header_len)) {
-               *nb_hw_desc = have_meta ? 0 : 1;
+               ena_com_close_bounce_buffer(io_sq);
+               *nb_hw_desc = io_sq->tail - start_tail;
                return 0;
        }
 
        desc = get_sq_desc(io_sq);
+       if (unlikely(!desc))
+               return ENA_COM_FAULT;
        memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
 
        /* Set first desc when we don't have meta descriptor */
@@ -361,10 +469,12 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
        for (i = 0; i < num_bufs; i++) {
                /* The first desc share the same desc as the header */
                if (likely(i != 0)) {
-                       ena_com_copy_curr_sq_desc_to_dev(io_sq);
                        ena_com_sq_update_tail(io_sq);
 
                        desc = get_sq_desc(io_sq);
+                       if (unlikely(!desc))
+                               return ENA_COM_FAULT;
+
                        memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
 
                        desc->len_ctrl |= (io_sq->phase <<
@@ -387,14 +497,11 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
        /* set the last desc indicator */
        desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
 
-       ena_com_copy_curr_sq_desc_to_dev(io_sq);
-
        ena_com_sq_update_tail(io_sq);
 
-       total_desc = ENA_MAX16(num_bufs, 1);
-       total_desc += have_meta ? 1 : 0;
+       ena_com_close_bounce_buffer(io_sq);
 
-       *nb_hw_desc = total_desc;
+       *nb_hw_desc = io_sq->tail - start_tail;
        return 0;
 }
 
@@ -456,10 +563,13 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *i
        ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
                 "wrong Q type");
 
-       if (unlikely(ena_com_sq_empty_space(io_sq) == 0))
+       if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1)))
                return ENA_COM_NO_SPACE;
 
        desc = get_sq_desc(io_sq);
+       if (unlikely(!desc))
+               return ENA_COM_FAULT;
+
        memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));
 
        desc->length = ena_buf->len;
@@ -500,6 +610,11 @@ int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *i
        cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
        if (cdesc_phase != expected_phase)
                return ENA_COM_TRY_AGAIN;
+
+       if (unlikely(cdesc->req_id >= io_cq->q_depth)) {
+               ena_trc_err("Invalid req id %d\n", cdesc->req_id);
+               return ENA_COM_INVAL;
+       }
 
        ena_com_cq_inc_head(io_cq);
 

Modified: head/sys/contrib/ena-com/ena_eth_com.h
==============================================================================
--- head/sys/contrib/ena-com/ena_eth_com.h      Tue Oct 31 12:23:02 2017        
(r325235)
+++ head/sys/contrib/ena-com/ena_eth_com.h      Tue Oct 31 12:41:07 2017        
(r325236)
@@ -98,7 +98,7 @@ static inline void ena_com_unmask_intr(struct ena_com_
        ENA_REG_WRITE32(io_cq->bus, intr_reg->intr_control, io_cq->unmask_reg);
 }
 
-static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq)
+static inline int ena_com_free_desc(struct ena_com_io_sq *io_sq)
 {
        u16 tail, next_to_comp, cnt;
 
@@ -107,6 +107,25 @@ static inline int ena_com_sq_empty_space(struct ena_co
        cnt = tail - next_to_comp;
 
        return io_sq->q_depth - 1 - cnt;

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
_______________________________________________
svn-src-head@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"

Reply via email to