Since auxiliary structure is associated with per rule, it can be
allocated in the same ipool allocation to save the extra overhead
of the *alloc header and the unneeded CPU cycles.

Fixes: 27d171b88031 ("net/mlx5: abstract flow action and enable reconfigure")
Cc: [email protected]

Signed-off-by: Bing Zhao <[email protected]>
---
 drivers/net/mlx5/linux/mlx5_os.c | 7 ++++---
 drivers/net/mlx5/mlx5_flow_hw.c  | 9 ++++-----
 2 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index 099ab65254..21387346b3 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -1643,16 +1643,17 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
        /* Read link status in case it is up and there will be no event. */
        mlx5_link_update(eth_dev, 0);
        /* Watch LSC interrupts between port probe and port start. */
-       priv->sh->port[priv->dev_port - 1].nl_ih_port_id =
-                                                       eth_dev->data->port_id;
+       priv->sh->port[priv->dev_port - 1].nl_ih_port_id = 
eth_dev->data->port_id;
        mlx5_set_link_up(eth_dev);
        for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) {
                icfg[i].release_mem_en = !!sh->config.reclaim_mode;
                if (sh->config.reclaim_mode)
                        icfg[i].per_core_cache = 0;
 #ifdef HAVE_MLX5_HWS_SUPPORT
-               if (priv->sh->config.dv_flow_en == 2)
+               if (priv->sh->config.dv_flow_en == 2) {
                        icfg[i].size = sizeof(struct rte_flow_hw) + 
sizeof(struct rte_flow_nt2hws);
+                       icfg[i].size += sizeof(struct rte_flow_hw_aux);
+               }
 #endif
                priv->flows[i] = mlx5_ipool_create(&icfg[i]);
                if (!priv->flows[i])
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index e0f79932a5..51fa3eafda 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -13582,8 +13582,9 @@ static int flow_hw_prepare(struct rte_eth_dev *dev,
        (*flow)->nt2hws = (struct rte_flow_nt2hws *)
                                ((uintptr_t)(*flow) + sizeof(struct 
rte_flow_hw));
        (*flow)->idx = idx;
-       (*flow)->nt2hws->flow_aux = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct 
rte_flow_hw_aux),
-                                   RTE_CACHE_LINE_SIZE, 
rte_dev_numa_node(dev->device));
+       (*flow)->nt2hws->flow_aux = (struct rte_flow_hw_aux *)
+               ((uintptr_t)((*flow)->nt2hws) + sizeof(struct rte_flow_nt2hws));
+
        if (!(*flow)->nt2hws->flow_aux)
                return rte_flow_error_set(error, ENOMEM,
                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
@@ -14234,10 +14235,8 @@ flow_hw_destroy(struct rte_eth_dev *dev, struct 
rte_flow_hw *flow)
          * Notice matcher destroy will take place when matcher's list is 
destroyed
          * , same as for DV.
          */
-       if (flow->nt2hws->flow_aux) {
-               mlx5_free(flow->nt2hws->flow_aux);
+       if (flow->nt2hws->flow_aux)
                flow->nt2hws->flow_aux = NULL;
-       }
        if (flow->nt2hws->rix_encap_decap) {
                flow_encap_decap_resource_release(dev, 
flow->nt2hws->rix_encap_decap);
                flow->nt2hws->rix_encap_decap = 0;
-- 
2.34.1

Reply via email to