[dpdk-dev] [PATCH 0/2] add symmetric toeplitz hash support

2019-07-24 Thread simei
From: Simei Su 

[PATCH 1/2] ethdev: add new hash function "Symmetric Toeplitz" supported by 
hardware.
[PATCH 2/2] app/testpmd: add command line support for symmetric toeplitz hash 
configuration.

Simei Su (2):
  ethdev: add symmetric toeplitz hash support
  app/testpmd: add symmetric toeplitz hash support

 app/test-pmd/cmdline.c  | 12 +---
 app/test-pmd/cmdline_flow.c | 12 +++-
 doc/guides/testpmd_app_ug/testpmd_funcs.rst |  2 +-
 lib/librte_ethdev/rte_flow.h|  1 +
 4 files changed, 22 insertions(+), 5 deletions(-)

-- 
1.8.3.1



[dpdk-dev] [PATCH 1/2] ethdev: add symmetric toeplitz hash support

2019-07-24 Thread simei
From: Simei Su 

Currently, there are DEFAULT,TOEPLITZ and SIMPLE_XOR hash funtion.
To support symmetric hash by rte_flow RSS action, this patch adds
new hash function "Symmetric Toeplitz" which is supported by some hardware.

Signed-off-by: Simei Su 
---
 lib/librte_ethdev/rte_flow.h | 1 +
 1 file changed, 1 insertion(+)

diff --git a/lib/librte_ethdev/rte_flow.h b/lib/librte_ethdev/rte_flow.h
index f3a8fb1..2a0e6d5 100644
--- a/lib/librte_ethdev/rte_flow.h
+++ b/lib/librte_ethdev/rte_flow.h
@@ -1744,6 +1744,7 @@ enum rte_eth_hash_function {
RTE_ETH_HASH_FUNCTION_DEFAULT = 0,
RTE_ETH_HASH_FUNCTION_TOEPLITZ, /**< Toeplitz */
RTE_ETH_HASH_FUNCTION_SIMPLE_XOR, /**< Simple XOR */
+   RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ, /**< Symmetric Toeplitz */
RTE_ETH_HASH_FUNCTION_MAX,
 };
 
-- 
1.8.3.1



[dpdk-dev] [PATCH 2/2] app/testpmd: add symmetric toeplitz hash support

2019-07-24 Thread simei
From: Simei Su 

This patch adds command line support for Symmetric Toeplitz
hash configuration.

Signed-off-by: Simei Su 
---
 app/test-pmd/cmdline.c  | 12 +---
 app/test-pmd/cmdline_flow.c | 12 +++-
 doc/guides/testpmd_app_ug/testpmd_funcs.rst |  2 +-
 3 files changed, 21 insertions(+), 5 deletions(-)

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index e7c4bee..29babe1 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -1078,7 +1078,7 @@ static void cmd_help_long_parsed(void *parsed_result,
"get_hash_global_config (port_id)\n"
"Get the global configurations of hash filters.\n\n"
 
-   "set_hash_global_config (port_id) 
(toeplitz|simple_xor|default)"
+   "set_hash_global_config (port_id) 
(toeplitz|simple_xor|symmetric_toeplitz|default)"
" 
(ipv4|ipv4-frag|ipv4-tcp|ipv4-udp|ipv4-sctp|ipv4-other|ipv6|"

"ipv6-frag|ipv6-tcp|ipv6-udp|ipv6-sctp|ipv6-other|l2_payload)"
" (enable|disable)\n"
@@ -12131,6 +12131,9 @@ struct cmd_get_hash_global_config_result {
case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR:
printf("Hash function is Simple XOR\n");
break;
+   case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ:
+   printf("Hash function is Symmetric Toeplitz\n");
+   break;
default:
printf("Unknown hash function\n");
break;
@@ -12204,6 +12207,9 @@ struct cmd_set_hash_global_config_result {
else if (!strcmp(res->hash_func, "simple_xor"))
info.info.global_conf.hash_func =
RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
+   else if (!strcmp(res->hash_func, "symmetric_toeplitz"))
+   info.info.global_conf.hash_func =
+   RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ;
else if (!strcmp(res->hash_func, "default"))
info.info.global_conf.hash_func =
RTE_ETH_HASH_FUNCTION_DEFAULT;
@@ -12233,7 +12239,7 @@ struct cmd_set_hash_global_config_result {
port_id, UINT16);
 cmdline_parse_token_string_t cmd_set_hash_global_config_hash_func =
TOKEN_STRING_INITIALIZER(struct cmd_set_hash_global_config_result,
-   hash_func, "toeplitz#simple_xor#default");
+   hash_func, "toeplitz#simple_xor#symmetric_toeplitz#default");
 cmdline_parse_token_string_t cmd_set_hash_global_config_flow_type =
TOKEN_STRING_INITIALIZER(struct cmd_set_hash_global_config_result,
flow_type,
@@ -12247,7 +12253,7 @@ struct cmd_set_hash_global_config_result {
.f = cmd_set_hash_global_config_parsed,
.data = NULL,
.help_str = "set_hash_global_config  "
-   "toeplitz|simple_xor|default "
+   "toeplitz|simple_xor|symmetric_toeplitz|default "
"ipv4|ipv4-frag|ipv4-tcp|ipv4-udp|ipv4-sctp|ipv4-other|"
"ipv6|ipv6-frag|ipv6-tcp|ipv6-udp|ipv6-sctp|ipv6-other|"
"l2_payload enable|disable",
diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
index 201bd9d..9f1dc46 100644
--- a/app/test-pmd/cmdline_flow.c
+++ b/app/test-pmd/cmdline_flow.c
@@ -205,6 +205,7 @@ enum index {
ACTION_RSS_FUNC_DEFAULT,
ACTION_RSS_FUNC_TOEPLITZ,
ACTION_RSS_FUNC_SIMPLE_XOR,
+   ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ,
ACTION_RSS_TYPES,
ACTION_RSS_TYPE,
ACTION_RSS_KEY,
@@ -2276,7 +2277,8 @@ static int comp_vc_action_rss_queue(struct context *, 
const struct token *,
.next = NEXT(action_rss,
 NEXT_ENTRY(ACTION_RSS_FUNC_DEFAULT,
ACTION_RSS_FUNC_TOEPLITZ,
-   ACTION_RSS_FUNC_SIMPLE_XOR)),
+   ACTION_RSS_FUNC_SIMPLE_XOR,
+   ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ)),
},
[ACTION_RSS_FUNC_DEFAULT] = {
.name = "default",
@@ -2293,6 +2295,11 @@ static int comp_vc_action_rss_queue(struct context *, 
const struct token *,
.help = "simple XOR hash function",
.call = parse_vc_action_rss_func,
},
+   [ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ] = {
+   .name = "symmetric_toeplitz",
+   .help = "Symmetric Toeplitz hash function",
+   .call = parse_vc_action_rss_func,
+   },
[ACTION_RSS_LEVEL] = {
.name = "level",
.help = 

[dpdk-dev] [RFC] ethdev: extend RSS offload types

2019-07-25 Thread simei
From: Simei Su 

This RFC reserves several bits as input set selection from
bottom of the 64 bits. The flow type is combined with input set
to represent rss types.

for example:
ETH_RSS_IPV4 | ETH_RSS_L3_SRC_ONLY: hash on src ip address only
ETH_RSS_IPV4_UDP | ETH_RSS_L4_DST_ONLY: hash on src/dst IP and
dst UDP port
ETH_RSS_L2_PAYLOAD | ETH_RSS_L2_DST_ONLY: hash on dst mac address

Signed-off-by: Simei Su 
---
 lib/librte_ethdev/rte_ethdev.h | 12 
 1 file changed, 12 insertions(+)

diff --git a/lib/librte_ethdev/rte_ethdev.h b/lib/librte_ethdev/rte_ethdev.h
index dc6596b..452d29f 100644
--- a/lib/librte_ethdev/rte_ethdev.h
+++ b/lib/librte_ethdev/rte_ethdev.h
@@ -508,6 +508,18 @@ struct rte_eth_rss_conf {
 #define ETH_RSS_GENEVE (1ULL << RTE_ETH_FLOW_GENEVE)
 #define ETH_RSS_NVGRE  (1ULL << RTE_ETH_FLOW_NVGRE)
 
+/*
+ * The following six macros are used combined with ETH_RSS_* to
+ * represent rss types. The structure rte_flow_action_rss.types is
+ * 64-bit wide and we reserve couple bits here for input set selection.
+ */
+#defineETH_RSS_INSET_L2_SRC   0x0400
+#defineETH_RSS_INSET_L2_DST   0x0800
+#defineETH_RSS_INSET_L3_SRC   0x1000
+#defineETH_RSS_INSET_L3_DST   0x2000
+#defineETH_RSS_INSET_L4_SRC   0x4000
+#defineETH_RSS_INSET_L4_DST   0x8000
+
 #define ETH_RSS_IP ( \
ETH_RSS_IPV4 | \
ETH_RSS_FRAG_IPV4 | \
-- 
1.8.3.1



[dpdk-dev] [RFC,v2] ethdev: extend RSS offload types

2019-07-25 Thread simei
From: Simei Su 

This RFC reserves several bits as input set selection from bottom
of the 64 bits. The flow type is combined with input set to
represent rss types.

Correct the input set mask to align with the definition in rte_ethdev.h.
for example:
ETH_RSS_IPV4 | ETH_RSS_INSET_L3_SRC: hash on src ip address only
ETH_RSS_IPV4_UDP | ETH_RSS_INSET_L4_DST: hash on src/dst IP and
dst UDP port
ETH_RSS_L2_PAYLOAD | ETH_RSS_INSET_L2_DST: hash on dst mac address

Signed-off-by: Simei Su 
---
 lib/librte_ethdev/rte_ethdev.h | 12 
 1 file changed, 12 insertions(+)

diff --git a/lib/librte_ethdev/rte_ethdev.h b/lib/librte_ethdev/rte_ethdev.h
index dc6596b..452d29f 100644
--- a/lib/librte_ethdev/rte_ethdev.h
+++ b/lib/librte_ethdev/rte_ethdev.h
@@ -508,6 +508,18 @@ struct rte_eth_rss_conf {
 #define ETH_RSS_GENEVE (1ULL << RTE_ETH_FLOW_GENEVE)
 #define ETH_RSS_NVGRE  (1ULL << RTE_ETH_FLOW_NVGRE)
 
+/*
+ * The following six macros are used combined with ETH_RSS_* to
+ * represent rss types. The structure rte_flow_action_rss.types is
+ * 64-bit wide and we reserve couple bits here for input set selection.
+ */
+#defineETH_RSS_INSET_L2_SRC   0x0400
+#defineETH_RSS_INSET_L2_DST   0x0800
+#defineETH_RSS_INSET_L3_SRC   0x1000
+#defineETH_RSS_INSET_L3_DST   0x2000
+#defineETH_RSS_INSET_L4_SRC   0x4000
+#defineETH_RSS_INSET_L4_DST   0x8000
+
 #define ETH_RSS_IP ( \
ETH_RSS_IPV4 | \
ETH_RSS_FRAG_IPV4 | \
-- 
1.8.3.1



[dpdk-dev] [PATCH v2 1/2] ethdev: add symmetric toeplitz hash support

2019-07-25 Thread simei
From: Simei Su 

Currently, there are DEFAULT,TOEPLITZ and SIMPLE_XOR hash function.
To support symmetric hash by rte_flow RSS action, this patch adds
new hash function "Symmetric Toeplitz" which is supported by some hardware.

Signed-off-by: Simei Su 
---
 lib/librte_ethdev/rte_flow.h | 1 +
 1 file changed, 1 insertion(+)

diff --git a/lib/librte_ethdev/rte_flow.h b/lib/librte_ethdev/rte_flow.h
index b66bf14..77a0c52 100644
--- a/lib/librte_ethdev/rte_flow.h
+++ b/lib/librte_ethdev/rte_flow.h
@@ -1814,6 +1814,7 @@ enum rte_eth_hash_function {
RTE_ETH_HASH_FUNCTION_DEFAULT = 0,
RTE_ETH_HASH_FUNCTION_TOEPLITZ, /**< Toeplitz */
RTE_ETH_HASH_FUNCTION_SIMPLE_XOR, /**< Simple XOR */
+   RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ, /**< Symmetric Toeplitz */
RTE_ETH_HASH_FUNCTION_MAX,
 };
 
-- 
1.8.3.1



[dpdk-dev] [PATCH v2 0/2] add symmetric toeplitz hash support

2019-07-25 Thread simei
From: Simei Su 

This v2 patch rebase to 19.08-rc2.
[PATCH v2 1/2] ethdev: add new hash function "Symmetric Toeplitz" supported by 
hardware.
[PATCH v2 2/2] app/testpmd: add command line support for symmetric toeplitz 
hash configuration.

Simei Su (2):
  ethdev: add symmetric toeplitz hash support
  app/testpmd: add symmetric toeplitz hash support

 app/test-pmd/cmdline.c  | 12 +---
 app/test-pmd/cmdline_flow.c | 12 +++-
 doc/guides/testpmd_app_ug/testpmd_funcs.rst |  2 +-
 lib/librte_ethdev/rte_flow.h|  1 +
 4 files changed, 22 insertions(+), 5 deletions(-)

-- 
1.8.3.1



[dpdk-dev] [PATCH v2 2/2] app/testpmd: add symmetric toeplitz hash support

2019-07-25 Thread simei
From: Simei Su 

This patch adds command line support for Symmetric Toeplitz
hash configuration.

Signed-off-by: Simei Su 
---
 app/test-pmd/cmdline.c  | 12 +---
 app/test-pmd/cmdline_flow.c | 12 +++-
 doc/guides/testpmd_app_ug/testpmd_funcs.rst |  2 +-
 3 files changed, 21 insertions(+), 5 deletions(-)

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index a28362d..60c159f 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -1055,7 +1055,7 @@ static void cmd_help_long_parsed(void *parsed_result,
"get_hash_global_config (port_id)\n"
"Get the global configurations of hash filters.\n\n"
 
-   "set_hash_global_config (port_id) 
(toeplitz|simple_xor|default)"
+   "set_hash_global_config (port_id) 
(toeplitz|simple_xor|symmetric_toeplitz|default)"
" 
(ipv4|ipv4-frag|ipv4-tcp|ipv4-udp|ipv4-sctp|ipv4-other|ipv6|"

"ipv6-frag|ipv6-tcp|ipv6-udp|ipv6-sctp|ipv6-other|l2_payload)"
" (enable|disable)\n"
@@ -12258,6 +12258,9 @@ struct cmd_get_hash_global_config_result {
case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR:
printf("Hash function is Simple XOR\n");
break;
+   case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ:
+   printf("Hash function is Symmetric Toeplitz\n");
+   break;
default:
printf("Unknown hash function\n");
break;
@@ -12331,6 +12334,9 @@ struct cmd_set_hash_global_config_result {
else if (!strcmp(res->hash_func, "simple_xor"))
info.info.global_conf.hash_func =
RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
+   else if (!strcmp(res->hash_func, "symmetric_toeplitz"))
+   info.info.global_conf.hash_func =
+   RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ;
else if (!strcmp(res->hash_func, "default"))
info.info.global_conf.hash_func =
RTE_ETH_HASH_FUNCTION_DEFAULT;
@@ -12360,7 +12366,7 @@ struct cmd_set_hash_global_config_result {
port_id, UINT16);
 cmdline_parse_token_string_t cmd_set_hash_global_config_hash_func =
TOKEN_STRING_INITIALIZER(struct cmd_set_hash_global_config_result,
-   hash_func, "toeplitz#simple_xor#default");
+   hash_func, "toeplitz#simple_xor#symmetric_toeplitz#default");
 cmdline_parse_token_string_t cmd_set_hash_global_config_flow_type =
TOKEN_STRING_INITIALIZER(struct cmd_set_hash_global_config_result,
flow_type,
@@ -12374,7 +12380,7 @@ struct cmd_set_hash_global_config_result {
.f = cmd_set_hash_global_config_parsed,
.data = NULL,
.help_str = "set_hash_global_config  "
-   "toeplitz|simple_xor|default "
+   "toeplitz|simple_xor|symmetric_toeplitz|default "
"ipv4|ipv4-frag|ipv4-tcp|ipv4-udp|ipv4-sctp|ipv4-other|"
"ipv6|ipv6-frag|ipv6-tcp|ipv6-udp|ipv6-sctp|ipv6-other|"
"l2_payload enable|disable",
diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
index 4958713..4968df9 100644
--- a/app/test-pmd/cmdline_flow.c
+++ b/app/test-pmd/cmdline_flow.c
@@ -220,6 +220,7 @@ enum index {
ACTION_RSS_FUNC_DEFAULT,
ACTION_RSS_FUNC_TOEPLITZ,
ACTION_RSS_FUNC_SIMPLE_XOR,
+   ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ,
ACTION_RSS_TYPES,
ACTION_RSS_TYPE,
ACTION_RSS_KEY,
@@ -2457,7 +2458,8 @@ static int comp_vc_action_rss_queue(struct context *, 
const struct token *,
.next = NEXT(action_rss,
 NEXT_ENTRY(ACTION_RSS_FUNC_DEFAULT,
ACTION_RSS_FUNC_TOEPLITZ,
-   ACTION_RSS_FUNC_SIMPLE_XOR)),
+   ACTION_RSS_FUNC_SIMPLE_XOR,
+   ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ)),
},
[ACTION_RSS_FUNC_DEFAULT] = {
.name = "default",
@@ -2474,6 +2476,11 @@ static int comp_vc_action_rss_queue(struct context *, 
const struct token *,
.help = "simple XOR hash function",
.call = parse_vc_action_rss_func,
},
+   [ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ] = {
+   .name = "symmetric_toeplitz",
+   .help = "Symmetric Toeplitz hash function",
+   .call = parse_vc_action_rss_func,
+   },
[ACTION_RSS_LEVEL] = {
.name = "level",
.help = 

[dpdk-dev] [RFC,v3] ethdev: extend RSS offload types

2019-07-28 Thread simei
From: Simei Su 

Make it easier to represent to define macro values as (1ULL << ###).

This RFC reserves several bits as input set selection from bottom
of the 64 bits. The flow type is combined with input set to
represent rss types.

for example:
ETH_RSS_IPV4 | ETH_RSS_INSET_L3_SRC: hash on src ip address only
ETH_RSS_IPV4_UDP | ETH_RSS_INSET_L4_DST: hash on src/dst IP and
dst UDP port
ETH_RSS_L2_PAYLOAD | ETH_RSS_INSET_L2_DST: hash on dst mac address

Signed-off-by: Simei Su 
---
 lib/librte_ethdev/rte_ethdev.h | 13 +
 1 file changed, 13 insertions(+)

diff --git a/lib/librte_ethdev/rte_ethdev.h b/lib/librte_ethdev/rte_ethdev.h
index dc6596b..8af6355 100644
--- a/lib/librte_ethdev/rte_ethdev.h
+++ b/lib/librte_ethdev/rte_ethdev.h
@@ -508,6 +508,19 @@ struct rte_eth_rss_conf {
 #define ETH_RSS_GENEVE (1ULL << RTE_ETH_FLOW_GENEVE)
 #define ETH_RSS_NVGRE  (1ULL << RTE_ETH_FLOW_NVGRE)
 
+/*
+ * The following six macros are used combined with ETH_RSS_* to
+ * represent rss types. The structure rte_flow_action_rss.types is
+ * 64-bit wide and we reserve couple bits here for input set selection
+ * from bottom of the 64 bits.
+ */
+#defineETH_RSS_INSET_L2_SRC   (1ULL << 63)
+#defineETH_RSS_INSET_L2_DST   (1ULL << 62)
+#defineETH_RSS_INSET_L3_SRC   (1ULL << 61)
+#defineETH_RSS_INSET_L3_DST   (1ULL << 60)
+#defineETH_RSS_INSET_L4_SRC   (1ULL << 59)
+#defineETH_RSS_INSET_L4_DST   (1ULL << 58)
+
 #define ETH_RSS_IP ( \
ETH_RSS_IPV4 | \
ETH_RSS_FRAG_IPV4 | \
-- 
1.8.3.1



[dpdk-dev] [RFC,v4] ethdev: extend RSS offload types

2019-07-31 Thread simei
From: Simei Su 

This RFC cover two aspects:
   (1)decouple RTE_ETH_FLOW_* and ETH_RSS_*. Because both serve
  different purposes.

   (2)reserve several bits as input set selection from bottom
  of the 64 bits.It is combined with exisiting ETH_RSS_* to
  represent rss types.

   for example:
 ETH_RSS_IPV4 | ETH_RSS_L3_SRC_ONLY: hash on src ip address only
 ETH_RSS_IPV4_UDP | ETH_RSS_L4_DST_ONLY: hash on src/dst IP and
 dst UDP port
 ETH_RSS_L2_PAYLOAD | ETH_RSS_L2_DST_ONLY: hash on dst mac address

   Testpmd modification part will be send out in later formal patch.

Signed-off-by: Simei Su 
---
 lib/librte_ethdev/rte_ethdev.h | 61 +-
 1 file changed, 36 insertions(+), 25 deletions(-)

diff --git a/lib/librte_ethdev/rte_ethdev.h b/lib/librte_ethdev/rte_ethdev.h
index dc6596b..9caf2bb 100644
--- a/lib/librte_ethdev/rte_ethdev.h
+++ b/lib/librte_ethdev/rte_ethdev.h
@@ -482,31 +482,42 @@ struct rte_eth_rss_conf {
 #define RTE_ETH_FLOW_MAX23
 
 /*
- * The RSS offload types are defined based on flow types.
- * Different NIC hardware may support different RSS offload
- * types. The supported flow types or RSS offload types can be queried by
- * rte_eth_dev_info_get().
- */
-#define ETH_RSS_IPV4   (1ULL << RTE_ETH_FLOW_IPV4)
-#define ETH_RSS_FRAG_IPV4  (1ULL << RTE_ETH_FLOW_FRAG_IPV4)
-#define ETH_RSS_NONFRAG_IPV4_TCP   (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP)
-#define ETH_RSS_NONFRAG_IPV4_UDP   (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP)
-#define ETH_RSS_NONFRAG_IPV4_SCTP  (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP)
-#define ETH_RSS_NONFRAG_IPV4_OTHER (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER)
-#define ETH_RSS_IPV6   (1ULL << RTE_ETH_FLOW_IPV6)
-#define ETH_RSS_FRAG_IPV6  (1ULL << RTE_ETH_FLOW_FRAG_IPV6)
-#define ETH_RSS_NONFRAG_IPV6_TCP   (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP)
-#define ETH_RSS_NONFRAG_IPV6_UDP   (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP)
-#define ETH_RSS_NONFRAG_IPV6_SCTP  (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP)
-#define ETH_RSS_NONFRAG_IPV6_OTHER (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER)
-#define ETH_RSS_L2_PAYLOAD (1ULL << RTE_ETH_FLOW_L2_PAYLOAD)
-#define ETH_RSS_IPV6_EX(1ULL << RTE_ETH_FLOW_IPV6_EX)
-#define ETH_RSS_IPV6_TCP_EX(1ULL << RTE_ETH_FLOW_IPV6_TCP_EX)
-#define ETH_RSS_IPV6_UDP_EX(1ULL << RTE_ETH_FLOW_IPV6_UDP_EX)
-#define ETH_RSS_PORT   (1ULL << RTE_ETH_FLOW_PORT)
-#define ETH_RSS_VXLAN  (1ULL << RTE_ETH_FLOW_VXLAN)
-#define ETH_RSS_GENEVE (1ULL << RTE_ETH_FLOW_GENEVE)
-#define ETH_RSS_NVGRE  (1ULL << RTE_ETH_FLOW_NVGRE)
+ * Here, we decouple RTE_ETH_FLOW_* and ETH_RSS_*. The following macros only
+ * make sense for RSS.
+ */
+#define ETH_RSS_IPV4   (1ULL << 2)
+#define ETH_RSS_FRAG_IPV4  (1ULL << 3)
+#define ETH_RSS_NONFRAG_IPV4_TCP   (1ULL << 4)
+#define ETH_RSS_NONFRAG_IPV4_UDP   (1ULL << 5)
+#define ETH_RSS_NONFRAG_IPV4_SCTP  (1ULL << 6)
+#define ETH_RSS_NONFRAG_IPV4_OTHER (1ULL << 7)
+#define ETH_RSS_IPV6   (1ULL << 8)
+#define ETH_RSS_FRAG_IPV6  (1ULL << 9)
+#define ETH_RSS_NONFRAG_IPV6_TCP   (1ULL << 10)
+#define ETH_RSS_NONFRAG_IPV6_UDP   (1ULL << 11)
+#define ETH_RSS_NONFRAG_IPV6_SCTP  (1ULL << 12)
+#define ETH_RSS_NONFRAG_IPV6_OTHER (1ULL << 13)
+#define ETH_RSS_L2_PAYLOAD (1ULL << 14)
+#define ETH_RSS_IPV6_EX(1ULL << 15)
+#define ETH_RSS_IPV6_TCP_EX(1ULL << 16)
+#define ETH_RSS_IPV6_UDP_EX(1ULL << 17)
+#define ETH_RSS_PORT   (1ULL << 18)
+#define ETH_RSS_VXLAN  (1ULL << 19)
+#define ETH_RSS_GENEVE (1ULL << 20)
+#define ETH_RSS_NVGRE  (1ULL << 21)
+
+/* The following six macros are used combined with ETH_RSS_* to
+ * represent rss types. The structure rte_flow_action_rss.types is
+ * 64-bit wide and we reserve couple bits here for input set selection
+ * from bottom of the 64 bits so that it doesn't impact future
+ * ETH_RSS_* definitions.
+ */
+#defineETH_RSS_L2_SRC_ONLY (1ULL << 63)
+#defineETH_RSS_L2_DST_ONLY (1ULL << 62)
+#defineETH_RSS_L3_SRC_ONLY (1ULL << 61)
+#defineETH_RSS_L3_DST_ONLY (1ULL << 60)
+#defineETH_RSS_L4_SRC_ONLY (1ULL << 59)
+#defineETH_RSS_L4_DST_ONLY (1ULL << 58)
 
 #define ETH_RSS_IP ( \
ETH_RSS_IPV4 | \
-- 
1.8.3.1



[dpdk-dev] [PATCH 0/2] extend RSS offload types

2019-08-08 Thread simei
From: Simei Su 

[PATCH 1/2] ethdev: add several bits for extending rss offload types.
[PATCH 2/2] app/testpmd: add cmdline support for extending rss types.

Simei Su (2):
  ethdev: extend RSS offload types
  app/testpmd: add RSS offload types extending support

 app/test-pmd/config.c  |  6 
 lib/librte_ethdev/rte_ethdev.h | 62 --
 2 files changed, 42 insertions(+), 26 deletions(-)

-- 
1.8.3.1



[dpdk-dev] [PATCH 2/2] app/testpmd: add RSS offload types extending support

2019-08-08 Thread simei
From: Simei Su 

This patch adds cmdline support for extended rss types configuration.

Signed-off-by: Simei Su 
---
 app/test-pmd/config.c | 6 ++
 1 file changed, 6 insertions(+)

diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index 1a5a5c1..b95bd43 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -103,6 +103,12 @@
{ "tcp", ETH_RSS_TCP },
{ "sctp", ETH_RSS_SCTP },
{ "tunnel", ETH_RSS_TUNNEL },
+   { "l2-src-only", ETH_RSS_L2_SRC_ONLY },
+   { "l2-dst-only", ETH_RSS_L2_DST_ONLY },
+   { "l3-src-only", ETH_RSS_L3_SRC_ONLY },
+   { "l3-dst-only", ETH_RSS_L3_DST_ONLY },
+   { "l4-src-only", ETH_RSS_L4_SRC_ONLY },
+   { "l4-dst-only", ETH_RSS_L4_DST_ONLY },
{ NULL, 0 },
 };
 
-- 
1.8.3.1



[dpdk-dev] [PATCH 1/2] ethdev: extend RSS offload types

2019-08-08 Thread simei
From: Simei Su 

This patch cover two aspects:
   (1)decouple RTE_ETH_FLOW_* and ETH_RSS_*. Because both serve
  different purposes.

   (2)reserve several bits as input set selection from bottom
  of the 64 bits. It is combined with exisitingi ETH_RSS_* to
  represent rss types.

   for example:
  ETH_RSS_IPV4 | ETH_RSS_L3_SRC_ONLY: hash on src ip address only
  ETH_RSS_IPV4_UDP | ETH_RSS_L4_DST_ONLY: hash on src/dst IP and
   dst UDP port
  ETH_RSS_L2_PAYLOAD | ETH_RSS_L2_DST_ONLY: hash on dst mac address

Signed-off-by: Simei Su 
---
 lib/librte_ethdev/rte_ethdev.h | 62 --
 1 file changed, 36 insertions(+), 26 deletions(-)

diff --git a/lib/librte_ethdev/rte_ethdev.h b/lib/librte_ethdev/rte_ethdev.h
index dc6596b..d47f7e9 100644
--- a/lib/librte_ethdev/rte_ethdev.h
+++ b/lib/librte_ethdev/rte_ethdev.h
@@ -481,32 +481,42 @@ struct rte_eth_rss_conf {
 #define RTE_ETH_FLOW_VXLAN_GPE  22 /**< VXLAN-GPE protocol based flow 
*/
 #define RTE_ETH_FLOW_MAX23
 
-/*
- * The RSS offload types are defined based on flow types.
- * Different NIC hardware may support different RSS offload
- * types. The supported flow types or RSS offload types can be queried by
- * rte_eth_dev_info_get().
- */
-#define ETH_RSS_IPV4   (1ULL << RTE_ETH_FLOW_IPV4)
-#define ETH_RSS_FRAG_IPV4  (1ULL << RTE_ETH_FLOW_FRAG_IPV4)
-#define ETH_RSS_NONFRAG_IPV4_TCP   (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP)
-#define ETH_RSS_NONFRAG_IPV4_UDP   (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP)
-#define ETH_RSS_NONFRAG_IPV4_SCTP  (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP)
-#define ETH_RSS_NONFRAG_IPV4_OTHER (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER)
-#define ETH_RSS_IPV6   (1ULL << RTE_ETH_FLOW_IPV6)
-#define ETH_RSS_FRAG_IPV6  (1ULL << RTE_ETH_FLOW_FRAG_IPV6)
-#define ETH_RSS_NONFRAG_IPV6_TCP   (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP)
-#define ETH_RSS_NONFRAG_IPV6_UDP   (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP)
-#define ETH_RSS_NONFRAG_IPV6_SCTP  (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP)
-#define ETH_RSS_NONFRAG_IPV6_OTHER (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER)
-#define ETH_RSS_L2_PAYLOAD (1ULL << RTE_ETH_FLOW_L2_PAYLOAD)
-#define ETH_RSS_IPV6_EX(1ULL << RTE_ETH_FLOW_IPV6_EX)
-#define ETH_RSS_IPV6_TCP_EX(1ULL << RTE_ETH_FLOW_IPV6_TCP_EX)
-#define ETH_RSS_IPV6_UDP_EX(1ULL << RTE_ETH_FLOW_IPV6_UDP_EX)
-#define ETH_RSS_PORT   (1ULL << RTE_ETH_FLOW_PORT)
-#define ETH_RSS_VXLAN  (1ULL << RTE_ETH_FLOW_VXLAN)
-#define ETH_RSS_GENEVE (1ULL << RTE_ETH_FLOW_GENEVE)
-#define ETH_RSS_NVGRE  (1ULL << RTE_ETH_FLOW_NVGRE)
+/* Here, we decouple RTE_ETH_FLOW_* and ETH_RSS_*. The following macros only
+ * make sense for RSS.
+ */
+#define ETH_RSS_IPV4   (1ULL << 2)
+#define ETH_RSS_FRAG_IPV4  (1ULL << 3)
+#define ETH_RSS_NONFRAG_IPV4_TCP   (1ULL << 4)
+#define ETH_RSS_NONFRAG_IPV4_UDP   (1ULL << 5)
+#define ETH_RSS_NONFRAG_IPV4_SCTP  (1ULL << 6)
+#define ETH_RSS_NONFRAG_IPV4_OTHER (1ULL << 7)
+#define ETH_RSS_IPV6   (1ULL << 8)
+#define ETH_RSS_FRAG_IPV6  (1ULL << 9)
+#define ETH_RSS_NONFRAG_IPV6_TCP   (1ULL << 10)
+#define ETH_RSS_NONFRAG_IPV6_UDP   (1ULL << 11)
+#define ETH_RSS_NONFRAG_IPV6_SCTP  (1ULL << 12)
+#define ETH_RSS_NONFRAG_IPV6_OTHER (1ULL << 13)
+#define ETH_RSS_L2_PAYLOAD (1ULL << 14)
+#define ETH_RSS_IPV6_EX(1ULL << 15)
+#define ETH_RSS_IPV6_TCP_EX(1ULL << 16)
+#define ETH_RSS_IPV6_UDP_EX(1ULL << 17)
+#define ETH_RSS_PORT   (1ULL << 18)
+#define ETH_RSS_VXLAN  (1ULL << 19)
+#define ETH_RSS_GENEVE (1ULL << 20)
+#define ETH_RSS_NVGRE  (1ULL << 21)
+
+/* The following six macros are used combined with ETH_RSS_* to
+ * represent rss types. The structure rte_flow_action_rss.types is
+ * 64-bit wide and we reserve couple bits here for input set selection
+ * from bottom of the 64 bits so that it doesn't impact future
+ * ETH_RSS_* definitions.
+ */
+#defineETH_RSS_L2_SRC_ONLY (1ULL << 63)
+#defineETH_RSS_L2_DST_ONLY (1ULL << 62)
+#defineETH_RSS_L3_SRC_ONLY (1ULL << 61)
+#defineETH_RSS_L3_DST_ONLY (1ULL << 60)
+#defineETH_RSS_L4_SRC_ONLY (1ULL << 59)
+#defineETH_RSS_L4_DST_ONLY (1ULL << 58)
 
 #define ETH_RSS_IP ( \
ETH_RSS_IPV4 | \
-- 
1.8.3.1



[dpdk-dev] [PATCH] doc: announce ABI change for RSS hash funtion

2019-07-03 Thread simei
From: Simei Su 

Add new field SYMMETRIC_TOEPLITZ in rte_eth_hash_function. This
can support symmetric hash function by rte_flow RSS action.

Signed-off-by: Simei Su 
---
 doc/guides/rel_notes/deprecation.rst | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/doc/guides/rel_notes/deprecation.rst 
b/doc/guides/rel_notes/deprecation.rst
index e2721fa..540285b 100644
--- a/doc/guides/rel_notes/deprecation.rst
+++ b/doc/guides/rel_notes/deprecation.rst
@@ -99,3 +99,5 @@ Deprecation Notices
   to set new power environment if power environment was already initialized.
   In this case the function will return -1 unless the environment is unset 
first
   (using ``rte_power_unset_env``). Other function usage scenarios will not 
change.
+
+* ethdev: New member in ``rte_eth_hash_funtion`` to support symmetric hash 
funtion.
-- 
1.8.3.1



[dpdk-dev] [RFC] ethdev: support symmetric hash function

2019-07-03 Thread simei
From: Simei Su 

Currently, there are DEFAULT,TOEPLITZ and SIMPLE_XOR hash funtion.
To support symmetric hash by rte_flow RSS action, this RFC introduces
SYMMETRIC_TOEPLITZ to rte_eth_hash_function.

Signed-off-by: Simei Su 
---
 lib/librte_ethdev/rte_flow.h | 1 +
 1 file changed, 1 insertion(+)

diff --git a/lib/librte_ethdev/rte_flow.h b/lib/librte_ethdev/rte_flow.h
index f3a8fb1..e3c4fe5 100644
--- a/lib/librte_ethdev/rte_flow.h
+++ b/lib/librte_ethdev/rte_flow.h
@@ -1744,6 +1744,7 @@ enum rte_eth_hash_function {
RTE_ETH_HASH_FUNCTION_DEFAULT = 0,
RTE_ETH_HASH_FUNCTION_TOEPLITZ, /**< Toeplitz */
RTE_ETH_HASH_FUNCTION_SIMPLE_XOR, /**< Simple XOR */
+   RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ, /**< Symmetric TOEPLITZ */
RTE_ETH_HASH_FUNCTION_MAX,
 };
 
-- 
1.8.3.1



[dpdk-dev] [PATCH] doc: announce ABI change for rte flow RSS action

2019-07-03 Thread simei
From: Simei Su 

Add new structure inputset in rte_flow_action_rss. This
can support input set configuration by rte_flow RSS action.

Signed-off-by: Simei Su 
---
 doc/guides/rel_notes/deprecation.rst | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/doc/guides/rel_notes/deprecation.rst 
b/doc/guides/rel_notes/deprecation.rst
index e2721fa..5cd360c 100644
--- a/doc/guides/rel_notes/deprecation.rst
+++ b/doc/guides/rel_notes/deprecation.rst
@@ -99,3 +99,6 @@ Deprecation Notices
   to set new power environment if power environment was already initialized.
   In this case the function will return -1 unless the environment is unset 
first
   (using ``rte_power_unset_env``). Other function usage scenarios will not 
change.
+
+* ethdev: New member in ``rte_flow_action_rss`` to support input set change
+  by rte_flow RSS action. It ignores spec and focuses on mask only.
-- 
1.8.3.1



[dpdk-dev] [RFC] ethdev: support input set change by RSS action

2019-07-03 Thread simei
From: Simei Su 

This RFC introduces inputset structure to rte_flow_action_rss to
support input set specific configuration by rte_flow RSS action.

We can give an testpmd command line example to make it more clear.

For example, below flow selects the l4 port as inputset for any
eth/ipv4/tcp packet: #flow create 0 ingress pattern eth / ipv4 / tcp /
end actions rss inputset tcp src mask 0x dst mask 0x /end

Signed-off-by: Simei Su 
---
 lib/librte_ethdev/rte_flow.h | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/lib/librte_ethdev/rte_flow.h b/lib/librte_ethdev/rte_flow.h
index f3a8fb1..2a455b6 100644
--- a/lib/librte_ethdev/rte_flow.h
+++ b/lib/librte_ethdev/rte_flow.h
@@ -1796,6 +1796,9 @@ struct rte_flow_action_rss {
uint32_t queue_num; /**< Number of entries in @p queue. */
const uint8_t *key; /**< Hash key. */
const uint16_t *queue; /**< Queue indices to use. */
+   struct rte_flow_item *inputset; /** Provide more specific inputset 
configuration.
+* ignore spec, only mask.
+*/
 };
 
 /**
-- 
1.8.3.1



[dpdk-dev] [DPDK] net/ice: fix Rx statistics

2019-04-24 Thread simei
The RX stats will increase even no packets sent, this patch fix this issue
by modifying ipackets and ibytes statistics based on vsi instead of port
to avoid statistics error.

Fixes: a37bde56314d ("net/ice: support statistics")
Cc: sta...@dpdk.org

Signed-off-by: Simei Su 
---
 drivers/net/ice/ice_ethdev.c | 9 -
 1 file changed, 4 insertions(+), 5 deletions(-)

diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 0946b19..1c851ac 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -3305,15 +3305,14 @@ static int ice_rx_queue_intr_disable(struct rte_eth_dev 
*dev,
/* call read registers - updates values, now write them to struct */
ice_read_stats_registers(pf, hw);
 
-   stats->ipackets = ns->eth.rx_unicast +
- ns->eth.rx_multicast +
- ns->eth.rx_broadcast -
- ns->eth.rx_discards -
+   stats->ipackets = pf->main_vsi->eth_stats.rx_unicast +
+ pf->main_vsi->eth_stats.rx_multicast +
+ pf->main_vsi->eth_stats.rx_broadcast -
  pf->main_vsi->eth_stats.rx_discards;
stats->opackets = ns->eth.tx_unicast +
  ns->eth.tx_multicast +
  ns->eth.tx_broadcast;
-   stats->ibytes   = ns->eth.rx_bytes;
+   stats->ibytes   = pf->main_vsi->eth_stats.rx_bytes;
stats->obytes   = ns->eth.tx_bytes;
stats->oerrors  = ns->eth.tx_errors +
  pf->main_vsi->eth_stats.tx_errors;
-- 
1.8.3.1



[PATCH] net/idpf: fix incorrect status calculation

2023-10-16 Thread Simei Su
Fix the incorrect ingress and egress packet number calculation.

Fixes: 7514d76d407b ("net/idpf: add basic statistics")
Cc: sta...@dpdk.org

Signed-off-by: Simei Su 
---
 drivers/net/idpf/idpf_ethdev.c | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index 3af7cf0..d716e57 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -281,9 +281,11 @@ idpf_dev_stats_get(struct rte_eth_dev *dev, struct 
rte_eth_stats *stats)
 
idpf_vport_stats_update(&vport->eth_stats_offset, pstats);
stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
-   pstats->rx_broadcast - pstats->rx_discards;
+   pstats->rx_broadcast + pstats->rx_discards +
+   pstats->rx_errors + pstats->rx_unknown_protocol;
stats->opackets = pstats->tx_broadcast + pstats->tx_multicast +
-   pstats->tx_unicast;
+   pstats->tx_unicast + pstats->tx_discards +
+   pstats->tx_errors;
stats->ierrors = pstats->rx_errors;
stats->imissed = pstats->rx_discards;
stats->oerrors = pstats->tx_errors + pstats->tx_discards;
-- 
2.9.5



[PATCH] net/cpfl: fix incorrect status calculation

2023-10-16 Thread Simei Su
Fix the incorrect ingress and egress packet number calculation.

Fixes: e3289d8fb63f ("net/cpfl: support basic statistics")
Cc: sta...@dpdk.org

Signed-off-by: Simei Su 
---
 drivers/net/cpfl/cpfl_ethdev.c | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 189072a..75d2d22 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -313,9 +313,11 @@ cpfl_dev_stats_get(struct rte_eth_dev *dev, struct 
rte_eth_stats *stats)
 
idpf_vport_stats_update(&vport->eth_stats_offset, pstats);
stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
-   pstats->rx_broadcast - pstats->rx_discards;
+   pstats->rx_broadcast + pstats->rx_discards +
+   pstats->rx_errors + pstats->rx_unknown_protocol;
stats->opackets = pstats->tx_broadcast + pstats->tx_multicast +
-   pstats->tx_unicast;
+   pstats->tx_unicast + pstats->tx_discards +
+   pstats->tx_errors;
stats->imissed = pstats->rx_discards;
stats->ierrors = pstats->rx_errors;
stats->oerrors = pstats->tx_errors + pstats->tx_discards;
-- 
2.9.5



[PATCH] net/i40e: rework maximum frame size configuration

2023-01-16 Thread Simei Su
This patch removes unnecessary link status check.

Fixes: a4ba77367923 ("net/i40e: enable maximum frame size at port level")
Fixes: 2184f7cdeeaa ("net/i40e: fix max frame size config at port level")
Fixes: 719469f13b11 ("net/i40e: fix jumbo frame Rx with X722")
Cc: sta...@dpdk.org

Signed-off-by: Simei Su 
---
 drivers/net/i40e/i40e_ethdev.c | 47 +-
 1 file changed, 10 insertions(+), 37 deletions(-)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 7726a89d..e21e4d9 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -387,7 +387,6 @@ static int i40e_set_default_mac_addr(struct rte_eth_dev 
*dev,
  struct rte_ether_addr *mac_addr);
 
 static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
-static void i40e_set_mac_max_frame(struct rte_eth_dev *dev, uint16_t size);
 
 static int i40e_ethertype_filter_convert(
const struct rte_eth_ethertype_filter *input,
@@ -2467,8 +2466,16 @@ i40e_dev_start(struct rte_eth_dev *dev)
"please call hierarchy_commit() "
"before starting the port");
 
-   max_frame_size = dev->data->mtu + I40E_ETH_OVERHEAD;
-   i40e_set_mac_max_frame(dev, max_frame_size);
+   max_frame_size = dev->data->mtu ?
+   dev->data->mtu + I40E_ETH_OVERHEAD :
+   I40E_FRAME_SIZE_MAX;
+
+   /* Set the max frame size to HW*/
+   ret = i40e_aq_set_mac_config(hw, max_frame_size, TRUE, false, 0, NULL);
+   if (ret) {
+   PMD_DRV_LOG(ERR, "Fail to set mac config");
+   return ret;
+   }
 
return I40E_SUCCESS;
 
@@ -12123,40 +12130,6 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
return ret;
 }
 
-static void
-i40e_set_mac_max_frame(struct rte_eth_dev *dev, uint16_t size)
-{
-   struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-   uint32_t rep_cnt = MAX_REPEAT_TIME;
-   struct rte_eth_link link;
-   enum i40e_status_code status;
-   bool can_be_set = true;
-
-   /*
-* I40E_MEDIA_TYPE_BASET link up can be ignored
-* I40E_MEDIA_TYPE_BASET link down that hw->phy.media_type
-* is I40E_MEDIA_TYPE_UNKNOWN
-*/
-   if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET &&
-   hw->phy.media_type != I40E_MEDIA_TYPE_UNKNOWN) {
-   do {
-   update_link_reg(hw, &link);
-   if (link.link_status)
-   break;
-   rte_delay_ms(CHECK_INTERVAL);
-   } while (--rep_cnt);
-   can_be_set = !!link.link_status;
-   }
-
-   if (can_be_set) {
-   status = i40e_aq_set_mac_config(hw, size, TRUE, 0, false, NULL);
-   if (status != I40E_SUCCESS)
-   PMD_DRV_LOG(ERR, "Failed to set max frame size at port 
level");
-   } else {
-   PMD_DRV_LOG(ERR, "Set max frame size at port level not 
applicable on link down");
-   }
-}
-
 RTE_LOG_REGISTER_SUFFIX(i40e_logtype_init, init, NOTICE);
 RTE_LOG_REGISTER_SUFFIX(i40e_logtype_driver, driver, NOTICE);
 #ifdef RTE_ETHDEV_DEBUG_RX
-- 
2.9.5



RE: [PATCH v2] net/i40e: don't check link status on device start

2023-01-16 Thread Su, Simei
Hi David,

> -Original Message-
> From: David Marchand 
> Sent: Friday, January 13, 2023 9:53 PM
> To: Zhang, Helin 
> Cc: Zhang, Yuying ; Xing, Beilei
> ; Mcnamara, John ;
> dev@dpdk.org; sta...@dpdk.org; Zhang, Qi Z ; Dapeng
> Yu ; Wenxuan Wu 
> Subject: Re: [PATCH v2] net/i40e: don't check link status on device start
> 
> On Fri, Jan 13, 2023 at 2:51 PM Zhang, Helin  wrote:
> >
> >
> >
> > > -Original Message-
> > > From: David Marchand 
> > > Sent: Friday, January 13, 2023 9:47 PM
> > > To: Zhang, Helin 
> > > Cc: Zhang, Yuying ; Xing, Beilei
> > > ; Mcnamara, John ;
> > > dev@dpdk.org; sta...@dpdk.org; Zhang, Qi Z ;
> > > Dapeng Yu ; Wenxuan Wu
> 
> > > Subject: Re: [PATCH v2] net/i40e: don't check link status on device
> > > start
> > >
> > > On Fri, Jan 13, 2023 at 2:39 PM Zhang, Helin 
> wrote:
> > > >
> > > >
> > > >
> > > > > -Original Message-
> > > > > From: David Marchand 
> > > > > Sent: Friday, January 13, 2023 9:33 PM
> > > > > To: Zhang, Yuying ; Xing, Beilei
> > > > > ; Mcnamara, John
> > > > > 
> > > > > Cc: dev@dpdk.org; sta...@dpdk.org; Zhang, Qi Z
> > > > > ; Dapeng Yu ;
> > > > > Wenxuan
> > > Wu
> > > > > 
> > > > > Subject: Re: [PATCH v2] net/i40e: don't check link status on
> > > > > device start
> > > > >
> > > > > Hello i40e maintainers, John,
> > > > >
> > > > > On Mon, Jan 9, 2023 at 10:21 AM David Marchand
> > > > >  wrote:
> > > > > > On Tue, Jan 3, 2023 at 3:02 PM David Marchand
> > > > >  wrote:
> > > > > > > Hi i40e maintainers,
> > > > > > >
> > > > > > > On Tue, Dec 13, 2022 at 10:19 AM David Marchand
> > > > > > >  wrote:
> > > > > > > >
> > > > > > > > The mentioned changes broke existing applications when the
> > > > > > > > link status of i40e ports is down at the time the port is 
> > > > > > > > started.
> > > > > > > > Revert those changes, the original issue will need a different 
> > > > > > > > fix.
> > > > Hi David
> > > >
> > > > Does it break all the application or just a specific application?
> > >
> > > I don't see how it would not affect all applications seeing how the
> > > original patch is dumb.
> > >
> > > > We may need to understand the issue you met, and try to fix it later.
> > >
> > > Just unplug the cable or fake a link down on your i40e port, start
> > > your application or port, then plug the cable back.
> > > The max frame size will never get applied to hw.
> > Got it, I will forward to a right expert to check. Thank you very much for
> reaching out to us!
> 
> I hope I get a reply _soon_.
> Or I will just apply those reverts.

If applying those reverts, some issues still exist on our side. I sent one 
patch to patchwork:
https://patchwork.dpdk.org/project/dpdk/patch/20230116105318.19412-1-simei...@intel.com/.
You can try this patch to see whether it can solve the issue on your side.
At the same time, on our side, we need to do regression test further to check 
if this patch will affect other
cases, the regression takes some time.

Thanks,
Simei

> 
> 
> Thanks.
> 
> --
> David Marchand



RE: [PATCH] net/i40e: rework maximum frame size configuration

2023-01-16 Thread Su, Simei
Hi David,

> -Original Message-
> From: David Marchand 
> Sent: Monday, January 16, 2023 7:19 PM
> To: Su, Simei 
> Cc: Xing, Beilei ; Zhang, Yuying
> ; dev@dpdk.org; Zhang, Qi Z
> ; Yang, Qiming ;
> sta...@dpdk.org; Zhang, Helin 
> Subject: Re: [PATCH] net/i40e: rework maximum frame size configuration
> 
> On Mon, Jan 16, 2023 at 11:54 AM Simei Su  wrote:
> >
> > This patch removes unnecessary link status check.
> >
> > Fixes: a4ba77367923 ("net/i40e: enable maximum frame size at port
> > level")
> > Fixes: 2184f7cdeeaa ("net/i40e: fix max frame size config at port
> > level")
> > Fixes: 719469f13b11 ("net/i40e: fix jumbo frame Rx with X722")
> > Cc: sta...@dpdk.org
> >
> > Signed-off-by: Simei Su 
> 
> Thanks for looking into the issue.
> 
> This is rather close to what I had tried [1] along my original report, but it 
> failed
> in the CI.
> Let's see how the validation of your patch goes.
> 
> 1:
> https://patchwork.dpdk.org/project/dpdk/patch/20221212143715.29649-1-d
> avid.march...@redhat.com/
> 

OK. We will find one environment to see why the unit test failed.

Thanks,
Simei

> 
> --
> David Marchand



RE: [PATCH v3 3/7] net/e1000: fix whitespace

2023-01-16 Thread Su, Simei


> -Original Message-
> From: Stephen Hemminger 
> Sent: Tuesday, January 17, 2023 8:15 AM
> To: dev@dpdk.org
> Cc: Stephen Hemminger ; Su, Simei
> ; Wu, Wenjun1 ; Burakov,
> Anatoly 
> Subject: [PATCH v3 3/7] net/e1000: fix whitespace
> 
> The style standard is to use blank after keywords.
> I.e "if (" not "if("
> 
> Signed-off-by: Stephen Hemminger 
> ---
>  drivers/net/e1000/em_ethdev.c  |  2 +-
>  drivers/net/e1000/igb_ethdev.c | 16 
>  drivers/net/e1000/igb_pf.c |  2 +-
>  drivers/net/e1000/igb_rxtx.c   |  6 +++---
>  4 files changed, 13 insertions(+), 13 deletions(-)
> 
> diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c
> index 8ee9be12ad19..3cb09538b2df 100644
> --- a/drivers/net/e1000/em_ethdev.c
> +++ b/drivers/net/e1000/em_ethdev.c
> @@ -872,7 +872,7 @@ eth_em_stats_get(struct rte_eth_dev *dev, struct
> rte_eth_stats *rte_stats)
>   E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
>   int pause_frames;
> 
> - if(hw->phy.media_type == e1000_media_type_copper ||
> + if (hw->phy.media_type == e1000_media_type_copper ||
>   (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
>   stats->symerrs += E1000_READ_REG(hw,E1000_SYMERRS);
>   stats->sec += E1000_READ_REG(hw, E1000_SEC); diff --git
> a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c index
> 8858f975f8cc..e89bfa248fb0 100644
> --- a/drivers/net/e1000/igb_ethdev.c
> +++ b/drivers/net/e1000/igb_ethdev.c
> @@ -735,7 +735,7 @@ eth_igb_dev_init(struct rte_eth_dev *eth_dev)
>   /* for secondary processes, we don't initialise any further as primary
>* has already done this work. Only check we don't need a different
>* RX function */
> - if (rte_eal_process_type() != RTE_PROC_PRIMARY){
> + if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
>   if (eth_dev->data->scattered_rx)
>   eth_dev->rx_pkt_burst = ð_igb_recv_scattered_pkts;
>   return 0;
> @@ -927,7 +927,7 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
>   /* for secondary processes, we don't initialise any further as primary
>* has already done this work. Only check we don't need a different
>* RX function */
> - if (rte_eal_process_type() != RTE_PROC_PRIMARY){
> + if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
>   if (eth_dev->data->scattered_rx)
>   eth_dev->rx_pkt_burst = ð_igb_recv_scattered_pkts;
>   return 0;
> @@ -1683,7 +1683,7 @@ igb_read_stats_registers(struct e1000_hw *hw,
> struct e1000_hw_stats *stats)
>   uint64_t old_rpthc = stats->rpthc;
>   uint64_t old_hgptc = stats->hgptc;
> 
> - if(hw->phy.media_type == e1000_media_type_copper ||
> + if (hw->phy.media_type == e1000_media_type_copper ||
>   (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
>   stats->symerrs +=
>   E1000_READ_REG(hw,E1000_SYMERRS); @@ -3498,12
> +3498,12 @@ static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on)
>   E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
>   int i = 0, j = 0, vfta = 0, mask = 1;
> 
> - for (i = 0; i < IGB_VFTA_SIZE; i++){
> + for (i = 0; i < IGB_VFTA_SIZE; i++) {
>   vfta = shadow_vfta->vfta[i];
> - if(vfta){
> + if (vfta) {
>   mask = 1;
> - for (j = 0; j < 32; j++){
> - if(vfta & mask)
> + for (j = 0; j < 32; j++) {
> + if (vfta & mask)
>   igbvf_set_vfta(hw,
>   (uint16_t)((i<<5)+j), on);
>   mask<<=1;
> @@ -3528,7 +3528,7 @@ igbvf_vlan_filter_set(struct rte_eth_dev *dev,
> uint16_t vlan_id, int on)
> 
>   /*vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf*/
>   ret = igbvf_set_vfta(hw, vlan_id, !!on);
> - if(ret){
> + if (ret) {
>   PMD_INIT_LOG(ERR, "Unable to set VF vlan");
>   return ret;
>   }
> diff --git a/drivers/net/e1000/igb_pf.c b/drivers/net/e1000/igb_pf.c index
> c7588ea57eaa..b1f74eb841d2 100644
> --- a/drivers/net/e1000/igb_pf.c
> +++ b/drivers/net/e1000/igb_pf.c
> @@ -78,7 +78,7 @@ void igb_pf_host_init(struct rte_eth_dev *eth_dev)
> 
>   if (hw->mac.type == e1000_i350)
>   nb_queue 

RE: [PATCH 1/3] net/igc: code refactoring

2023-01-16 Thread Su, Simei
Hi Qi,

> -Original Message-
> From: Zhang, Qi Z 
> Sent: Tuesday, January 17, 2023 10:25 AM
> To: Su, Simei ; Guo, Junfeng 
> Cc: dev@dpdk.org; Wu, Wenjun1 
> Subject: RE: [PATCH 1/3] net/igc: code refactoring
> 
> 
> 
> > -Original Message-
> > From: Su, Simei 
> > Sent: Tuesday, December 20, 2022 11:41 AM
> > To: Zhang, Qi Z ; Guo, Junfeng
> > 
> > Cc: dev@dpdk.org; Wu, Wenjun1 ; Su, Simei
> > 
> > Subject: [PATCH 1/3] net/igc: code refactoring
> >
> > Move related structures for Rx/Tx queue from igc_txrx.c to igc_txrx.h
> > to make code cleaner and variables used more conveniently.
> 
> Not sure if this is necessary.
> If a structure only be used internally, keep it internally should be OK.
> Otherwise need to give a reason why to expose it.

OK. I will rework commit log to give detailed reason in v2.

Thanks,
Simei

> 
> >
> > Signed-off-by: Simei Su 
> > ---
> >  drivers/net/igc/igc_txrx.c | 118
> > -
> >  drivers/net/igc/igc_txrx.h | 115
> > +++
> >  2 files changed, 115 insertions(+), 118 deletions(-)
> >
> > diff --git a/drivers/net/igc/igc_txrx.c b/drivers/net/igc/igc_txrx.c
> > index
> > ffd219b..c462e91 100644
> > --- a/drivers/net/igc/igc_txrx.c
> > +++ b/drivers/net/igc/igc_txrx.c
> > @@ -93,124 +93,6 @@
> >
> >  #define IGC_TX_OFFLOAD_NOTSUP_MASK
> > (RTE_MBUF_F_TX_OFFLOAD_MASK ^ IGC_TX_OFFLOAD_MASK)
> >
> > -/**
> > - * Structure associated with each descriptor of the RX ring of a RX queue.
> > - */
> > -struct igc_rx_entry {
> > -   struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
> > -};
> > -
> > -/**
> > - * Structure associated with each RX queue.
> > - */
> > -struct igc_rx_queue {
> > -   struct rte_mempool  *mb_pool;   /**< mbuf pool to populate RX ring.
> > */
> > -   volatile union igc_adv_rx_desc *rx_ring;
> > -   /**< RX ring virtual address. */
> > -   uint64_trx_ring_phys_addr; /**< RX ring DMA address. */
> > -   volatile uint32_t   *rdt_reg_addr; /**< RDT register address. */
> > -   volatile uint32_t   *rdh_reg_addr; /**< RDH register address. */
> > -   struct igc_rx_entry *sw_ring;   /**< address of RX software ring. */
> > -   struct rte_mbuf *pkt_first_seg; /**< First segment of current packet.
> > */
> > -   struct rte_mbuf *pkt_last_seg;  /**< Last segment of current packet.
> > */
> > -   uint16_tnb_rx_desc; /**< number of RX descriptors. */
> > -   uint16_trx_tail;/**< current value of RDT register. */
> > -   uint16_tnb_rx_hold; /**< number of held free RX desc. */
> > -   uint16_trx_free_thresh; /**< max free RX desc to hold. */
> > -   uint16_tqueue_id;   /**< RX queue index. */
> > -   uint16_treg_idx;/**< RX queue register index. */
> > -   uint16_tport_id;/**< Device port identifier. */
> > -   uint8_t pthresh;/**< Prefetch threshold register. */
> > -   uint8_t hthresh;/**< Host threshold register. */
> > -   uint8_t wthresh;/**< Write-back threshold register. */
> > -   uint8_t crc_len;/**< 0 if CRC stripped, 4 otherwise. */
> > -   uint8_t drop_en;/**< If not 0, set SRRCTL.Drop_En. */
> > -   uint32_tflags;  /**< RX flags. */
> > -   uint64_toffloads;   /**< offloads of
> > RTE_ETH_RX_OFFLOAD_* */
> > -};
> > -
> > -/** Offload features */
> > -union igc_tx_offload {
> > -   uint64_t data;
> > -   struct {
> > -   uint64_t l3_len:9; /**< L3 (IP) Header Length. */
> > -   uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
> > -   uint64_t vlan_tci:16;
> > -   /**< VLAN Tag Control Identifier(CPU order). */
> > -   uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
> > -   uint64_t tso_segsz:16; /**< TCP TSO segment size. */
> > -   /* uint64_t unused:8; */
> > -   };
> > -};
> > -
> > -/*
> > - * Compare mask for igc_tx_offload.data,
> > - * should be in sync with igc_tx_offload layout.
> > - */
> > -#define TX_MACIP_LEN_CMP_MASK  0xULL /**< L2L3
> > header mask. */
> > -#define TX_VLAN_CMP_MASK   0xULL /**< Vlan
> > mask. */
> > -#define TX_

[PATCH v2 0/3] net/igc: support PTP timesync

2023-01-17 Thread Simei Su
[PATCH v2 1/3] code refactoring.
[PATCH v2 2/3] add related definitions for ptp timesync.
[PATCH v2 3/3] add IEEE1588 API to support timesync.

v2:
* Refine commit log.
* Update the doc/guides/nics/features/igc.ini to add "Timesync" feature.
* Add release notes.

Simei Su (3):
  net/igc: code refactoring
  net/igc/base: support PTP timesync
  net/igc: support IEEE 1588 PTP

 doc/guides/nics/features/igc.ini   |   1 +
 doc/guides/rel_notes/release_23_03.rst |   3 +
 drivers/net/igc/base/igc_defines.h |  11 ++
 drivers/net/igc/igc_ethdev.c   | 222 +
 drivers/net/igc/igc_ethdev.h   |   4 +-
 drivers/net/igc/igc_txrx.c | 166 +++-
 drivers/net/igc/igc_txrx.h | 116 +
 7 files changed, 401 insertions(+), 122 deletions(-)

-- 
2.9.5



[PATCH v2 1/3] net/igc: code refactoring

2023-01-17 Thread Simei Su
This patch moves some structures from rxtx.c to rxtx.h for the
timesync enabling feature. For example, variables in "igc_rx_queue"
structure can be used by variables both in igc_ethdev.c and igc_txrx.c
more conveniently. It is also consistent with other PMD coding styles.

Signed-off-by: Simei Su 
---
 drivers/net/igc/igc_txrx.c | 118 -
 drivers/net/igc/igc_txrx.h | 115 +++
 2 files changed, 115 insertions(+), 118 deletions(-)

diff --git a/drivers/net/igc/igc_txrx.c b/drivers/net/igc/igc_txrx.c
index ffd219b..c462e91 100644
--- a/drivers/net/igc/igc_txrx.c
+++ b/drivers/net/igc/igc_txrx.c
@@ -93,124 +93,6 @@
 
 #define IGC_TX_OFFLOAD_NOTSUP_MASK (RTE_MBUF_F_TX_OFFLOAD_MASK ^ 
IGC_TX_OFFLOAD_MASK)
 
-/**
- * Structure associated with each descriptor of the RX ring of a RX queue.
- */
-struct igc_rx_entry {
-   struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
-};
-
-/**
- * Structure associated with each RX queue.
- */
-struct igc_rx_queue {
-   struct rte_mempool  *mb_pool;   /**< mbuf pool to populate RX ring. */
-   volatile union igc_adv_rx_desc *rx_ring;
-   /**< RX ring virtual address. */
-   uint64_trx_ring_phys_addr; /**< RX ring DMA address. */
-   volatile uint32_t   *rdt_reg_addr; /**< RDT register address. */
-   volatile uint32_t   *rdh_reg_addr; /**< RDH register address. */
-   struct igc_rx_entry *sw_ring;   /**< address of RX software ring. */
-   struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
-   struct rte_mbuf *pkt_last_seg;  /**< Last segment of current packet. */
-   uint16_tnb_rx_desc; /**< number of RX descriptors. */
-   uint16_trx_tail;/**< current value of RDT register. */
-   uint16_tnb_rx_hold; /**< number of held free RX desc. */
-   uint16_trx_free_thresh; /**< max free RX desc to hold. */
-   uint16_tqueue_id;   /**< RX queue index. */
-   uint16_treg_idx;/**< RX queue register index. */
-   uint16_tport_id;/**< Device port identifier. */
-   uint8_t pthresh;/**< Prefetch threshold register. */
-   uint8_t hthresh;/**< Host threshold register. */
-   uint8_t wthresh;/**< Write-back threshold register. */
-   uint8_t crc_len;/**< 0 if CRC stripped, 4 otherwise. */
-   uint8_t drop_en;/**< If not 0, set SRRCTL.Drop_En. */
-   uint32_tflags;  /**< RX flags. */
-   uint64_toffloads;   /**< offloads of RTE_ETH_RX_OFFLOAD_* */
-};
-
-/** Offload features */
-union igc_tx_offload {
-   uint64_t data;
-   struct {
-   uint64_t l3_len:9; /**< L3 (IP) Header Length. */
-   uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
-   uint64_t vlan_tci:16;
-   /**< VLAN Tag Control Identifier(CPU order). */
-   uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
-   uint64_t tso_segsz:16; /**< TCP TSO segment size. */
-   /* uint64_t unused:8; */
-   };
-};
-
-/*
- * Compare mask for igc_tx_offload.data,
- * should be in sync with igc_tx_offload layout.
- */
-#define TX_MACIP_LEN_CMP_MASK  0xULL /**< L2L3 header mask. */
-#define TX_VLAN_CMP_MASK   0xULL /**< Vlan mask. */
-#define TX_TCP_LEN_CMP_MASK0x00FFULL /**< TCP header mask. */
-#define TX_TSO_MSS_CMP_MASK0x0000ULL /**< TSO segsz mask. */
-/** Mac + IP + TCP + Mss mask. */
-#define TX_TSO_CMP_MASK\
-   (TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK | TX_TSO_MSS_CMP_MASK)
-
-/**
- * Structure to check if new context need be built
- */
-struct igc_advctx_info {
-   uint64_t flags;   /**< ol_flags related to context build. */
-   /** tx offload: vlan, tso, l2-l3-l4 lengths. */
-   union igc_tx_offload tx_offload;
-   /** compare mask for tx offload. */
-   union igc_tx_offload tx_offload_mask;
-};
-
-/**
- * Hardware context number
- */
-enum {
-   IGC_CTX_0= 0, /**< CTX0*/
-   IGC_CTX_1= 1, /**< CTX1*/
-   IGC_CTX_NUM  = 2, /**< CTX_NUM */
-};
-
-/**
- * Structure associated with each descriptor of the TX ring of a TX queue.
- */
-struct igc_tx_entry {
-   struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
-   uint16_t next_id; /**< Index of next descriptor in ring. */
-   uint16_t last_id; /**< Index of last scattered descriptor. */
-};
-
-/**
- * Structure associated with each TX queue.
- */
-struct igc_tx_queue {
-   volatile union igc_adv_tx_desc *tx_ring; /**< TX ring address */
-   uint64_t 

[PATCH v2 2/3] net/igc/base: support PTP timesync

2023-01-17 Thread Simei Su
Add definitions for timesync enabling.

Signed-off-by: Simei Su 
---
 drivers/net/igc/base/igc_defines.h | 11 +++
 1 file changed, 11 insertions(+)

diff --git a/drivers/net/igc/base/igc_defines.h 
b/drivers/net/igc/base/igc_defines.h
index 61964bc..dd7330a 100644
--- a/drivers/net/igc/base/igc_defines.h
+++ b/drivers/net/igc/base/igc_defines.h
@@ -795,6 +795,17 @@
 
 #define TSYNC_INTERRUPTS   TSINTR_TXTS
 
+/* Split Replication Receive Control */
+#define IGC_SRRCTL_TIMESTAMP   0x4000
+#define IGC_SRRCTL_TIMER1SEL(timer)(((timer) & 0x3) << 14)
+#define IGC_SRRCTL_TIMER0SEL(timer)(((timer) & 0x3) << 17)
+
+/* Sample RX tstamp in PHY sop */
+#define IGC_TSYNCRXCTL_RXSYNSIG 0x0400
+
+/* Sample TX tstamp in PHY sop */
+#define IGC_TSYNCTXCTL_TXSYNSIG 0x0020
+
 /* TSAUXC Configuration Bits */
 #define TSAUXC_EN_TT0  (1 << 0)  /* Enable target time 0. */
 #define TSAUXC_EN_TT1  (1 << 1)  /* Enable target time 1. */
-- 
2.9.5



[PATCH v2 3/3] net/igc: support IEEE 1588 PTP

2023-01-17 Thread Simei Su
Add igc support for new ethdev APIs to enable/disable and read/write/adjust
IEEE1588 PTP timestamps.

The example command for running ptpclient is as below:
./build/examples/dpdk-ptpclient -c 1 -n 3 -- -T 0 -p 0x1

Signed-off-by: Simei Su 
---
 doc/guides/nics/features/igc.ini   |   1 +
 doc/guides/rel_notes/release_23_03.rst |   3 +
 drivers/net/igc/igc_ethdev.c   | 222 +
 drivers/net/igc/igc_ethdev.h   |   4 +-
 drivers/net/igc/igc_txrx.c |  50 +++-
 drivers/net/igc/igc_txrx.h |   1 +
 6 files changed, 276 insertions(+), 5 deletions(-)

diff --git a/doc/guides/nics/features/igc.ini b/doc/guides/nics/features/igc.ini
index b5deea3..25a997c 100644
--- a/doc/guides/nics/features/igc.ini
+++ b/doc/guides/nics/features/igc.ini
@@ -33,6 +33,7 @@ VLAN filter  = Y
 VLAN offload = Y
 Linux= Y
 x86-64   = Y
+Timesync = Y
 
 [rte_flow items]
 eth  = P
diff --git a/doc/guides/rel_notes/release_23_03.rst 
b/doc/guides/rel_notes/release_23_03.rst
index b8c5b68..6e086a2 100644
--- a/doc/guides/rel_notes/release_23_03.rst
+++ b/doc/guides/rel_notes/release_23_03.rst
@@ -55,6 +55,9 @@ New Features
  Also, make sure to start the actual text at the margin.
  ===
 
+* **Updated Intel igc driver.**
+
+  * Added timesync API support.
 
 Removed Items
 -
diff --git a/drivers/net/igc/igc_ethdev.c b/drivers/net/igc/igc_ethdev.c
index dcd262f..ef3346b 100644
--- a/drivers/net/igc/igc_ethdev.c
+++ b/drivers/net/igc/igc_ethdev.c
@@ -78,6 +78,16 @@
 #define IGC_ALARM_INTERVAL 800u
 /* us, about 13.6s some per-queue registers will wrap around back to 0. */
 
+/* Transmit and receive latency (for PTP timestamps) */
+#define IGC_I225_TX_LATENCY_10 240
+#define IGC_I225_TX_LATENCY_10058
+#define IGC_I225_TX_LATENCY_1000   80
+#define IGC_I225_TX_LATENCY_2500   1325
+#define IGC_I225_RX_LATENCY_10 6450
+#define IGC_I225_RX_LATENCY_100185
+#define IGC_I225_RX_LATENCY_1000   300
+#define IGC_I225_RX_LATENCY_2500   1485
+
 static const struct rte_eth_desc_lim rx_desc_lim = {
.nb_max = IGC_MAX_RXD,
.nb_min = IGC_MIN_RXD,
@@ -245,6 +255,18 @@ eth_igc_vlan_filter_set(struct rte_eth_dev *dev, uint16_t 
vlan_id, int on);
 static int eth_igc_vlan_offload_set(struct rte_eth_dev *dev, int mask);
 static int eth_igc_vlan_tpid_set(struct rte_eth_dev *dev,
  enum rte_vlan_type vlan_type, uint16_t tpid);
+static int eth_igc_timesync_enable(struct rte_eth_dev *dev);
+static int eth_igc_timesync_disable(struct rte_eth_dev *dev);
+static int eth_igc_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp,
+ uint32_t flags);
+static int eth_igc_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp);
+static int eth_igc_timesync_adjust_time(struct rte_eth_dev *dev, int64_t 
delta);
+static int eth_igc_timesync_read_time(struct rte_eth_dev *dev,
+ struct timespec *timestamp);
+static int eth_igc_timesync_write_time(struct rte_eth_dev *dev,
+  const struct timespec *timestamp);
 
 static const struct eth_dev_ops eth_igc_ops = {
.dev_configure  = eth_igc_configure,
@@ -298,6 +320,13 @@ static const struct eth_dev_ops eth_igc_ops = {
.vlan_tpid_set  = eth_igc_vlan_tpid_set,
.vlan_strip_queue_set   = eth_igc_vlan_strip_queue_set,
.flow_ops_get   = eth_igc_flow_ops_get,
+   .timesync_enable= eth_igc_timesync_enable,
+   .timesync_disable   = eth_igc_timesync_disable,
+   .timesync_read_rx_timestamp = eth_igc_timesync_read_rx_timestamp,
+   .timesync_read_tx_timestamp = eth_igc_timesync_read_tx_timestamp,
+   .timesync_adjust_time   = eth_igc_timesync_adjust_time,
+   .timesync_read_time = eth_igc_timesync_read_time,
+   .timesync_write_time= eth_igc_timesync_write_time,
 };
 
 /*
@@ -2582,6 +2611,199 @@ eth_igc_vlan_tpid_set(struct rte_eth_dev *dev,
 }
 
 static int
+eth_igc_timesync_enable(struct rte_eth_dev *dev)
+{
+   struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+   struct timespec system_time;
+   struct igc_rx_queue *rxq;
+   uint32_t val;
+   uint16_t i;
+
+   IGC_WRITE_REG(hw, IGC_TSAUXC, 0x0);
+
+   clock_gettime(CLOCK_REALTIME, &system_time);
+   IGC_WRITE_REG(hw, IGC_SYSTIML, system_time.tv_nsec);
+   IGC_WRITE_REG(hw, IGC_SYSTIMH, system_time.tv_sec);
+
+   /* Enable timestamping of received PTP packets. */
+   val = IGC_READ_REG(hw, IGC_RXPBS);
+   val |= IGC_RXPBS_CFG_TS_EN;
+   IGC_WRITE_REG(hw, IGC_RXPBS, val);
+
+   for (i = 

RE: [PATCH] net/i40e: rework maximum frame size configuration

2023-01-20 Thread Su, Simei
Hi David,

> -Original Message-
> From: David Marchand 
> Sent: Friday, January 20, 2023 3:34 PM
> To: Su, Simei 
> Cc: Xing, Beilei ; Zhang, Yuying
> ; dev@dpdk.org; Zhang, Qi Z
> ; Yang, Qiming ;
> sta...@dpdk.org; Zhang, Helin 
> Subject: Re: [PATCH] net/i40e: rework maximum frame size configuration
> 
> On Mon, Jan 16, 2023 at 1:15 PM Su, Simei  wrote:
> >
> > Hi David,
> >
> > > -Original Message-
> > > From: David Marchand 
> > > Sent: Monday, January 16, 2023 7:19 PM
> > > To: Su, Simei 
> > > Cc: Xing, Beilei ; Zhang, Yuying
> > > ; dev@dpdk.org; Zhang, Qi Z
> > > ; Yang, Qiming ;
> > > sta...@dpdk.org; Zhang, Helin 
> > > Subject: Re: [PATCH] net/i40e: rework maximum frame size
> > > configuration
> > >
> > > On Mon, Jan 16, 2023 at 11:54 AM Simei Su  wrote:
> > > >
> > > > This patch removes unnecessary link status check.
> > > >
> > > > Fixes: a4ba77367923 ("net/i40e: enable maximum frame size at port
> > > > level")
> > > > Fixes: 2184f7cdeeaa ("net/i40e: fix max frame size config at port
> > > > level")
> > > > Fixes: 719469f13b11 ("net/i40e: fix jumbo frame Rx with X722")
> > > > Cc: sta...@dpdk.org
> > > >
> > > > Signed-off-by: Simei Su 
> > >
> > > Thanks for looking into the issue.
> > >
> > > This is rather close to what I had tried [1] along my original
> > > report, but it failed in the CI.
> > > Let's see how the validation of your patch goes.
> > >
> > > 1:
> > >
> https://patchwork.dpdk.org/project/dpdk/patch/20221212143715.29649-1
> > > -d
> > > avid.march...@redhat.com/
> > >
> >
> > OK. We will find one environment to see why the unit test failed.
> 
> Any update?

We can reproduce CI error, but "ifconfig interface up" needs to be done firstly
to reproduce it otherwise this error won't exist. The specific reason hasn't 
been
found currently. We will discuss it after Chinese New Year about how to handle 
it
more reasonably.

> 
> 
> --
> David Marchand



RE: [PATCH] net/i40e: rework maximum frame size configuration

2023-01-20 Thread Su, Simei

> -Original Message-
> From: David Marchand 
> Sent: Friday, January 20, 2023 10:47 PM
> To: Su, Simei 
> Cc: Xing, Beilei ; Zhang, Yuying
> ; dev@dpdk.org; Zhang, Qi Z
> ; Yang, Qiming ;
> sta...@dpdk.org; Zhang, Helin ; Mcnamara, John
> 
> Subject: Re: [PATCH] net/i40e: rework maximum frame size configuration
> 
> On Fri, Jan 20, 2023 at 2:58 PM Su, Simei  wrote:
> >
> > Hi David,
> >
> > > -Original Message-
> > > From: David Marchand 
> > > Sent: Friday, January 20, 2023 3:34 PM
> > > To: Su, Simei 
> > > Cc: Xing, Beilei ; Zhang, Yuying
> > > ; dev@dpdk.org; Zhang, Qi Z
> > > ; Yang, Qiming ;
> > > sta...@dpdk.org; Zhang, Helin 
> > > Subject: Re: [PATCH] net/i40e: rework maximum frame size
> > > configuration
> > >
> > > On Mon, Jan 16, 2023 at 1:15 PM Su, Simei  wrote:
> > > >
> > > > Hi David,
> > > >
> > > > > -Original Message-
> > > > > From: David Marchand 
> > > > > Sent: Monday, January 16, 2023 7:19 PM
> > > > > To: Su, Simei 
> > > > > Cc: Xing, Beilei ; Zhang, Yuying
> > > > > ; dev@dpdk.org; Zhang, Qi Z
> > > > > ; Yang, Qiming ;
> > > > > sta...@dpdk.org; Zhang, Helin 
> > > > > Subject: Re: [PATCH] net/i40e: rework maximum frame size
> > > > > configuration
> > > > >
> > > > > On Mon, Jan 16, 2023 at 11:54 AM Simei Su 
> wrote:
> > > > > >
> > > > > > This patch removes unnecessary link status check.
> > > > > >
> > > > > > Fixes: a4ba77367923 ("net/i40e: enable maximum frame size at
> > > > > > port
> > > > > > level")
> > > > > > Fixes: 2184f7cdeeaa ("net/i40e: fix max frame size config at
> > > > > > port
> > > > > > level")
> > > > > > Fixes: 719469f13b11 ("net/i40e: fix jumbo frame Rx with X722")
> > > > > > Cc: sta...@dpdk.org
> > > > > >
> > > > > > Signed-off-by: Simei Su 
> > > > >
> > > > > Thanks for looking into the issue.
> > > > >
> > > > > This is rather close to what I had tried [1] along my original
> > > > > report, but it failed in the CI.
> > > > > Let's see how the validation of your patch goes.
> > > > >
> > > > > 1:
> > > > >
> > >
> https://patchwork.dpdk.org/project/dpdk/patch/20221212143715.29649-1
> > > > > -d
> > > > > avid.march...@redhat.com/
> > > > >
> > > >
> > > > OK. We will find one environment to see why the unit test failed.
> > >
> > > Any update?
> >
> > We can reproduce CI error, but "ifconfig interface up" needs to be
> > done firstly to reproduce it otherwise this error won't exist. The
> > specific reason hasn't been found currently. We will discuss it after
> > Chinese New Year about how to handle it more reasonably.
> 
> We have regressions in stable releases.
> Please put priority when the team is back so that this topic is fixed in 
> 23.03.
> 
> 
> Thanks.
> 
> --
> David Marchand

Sorry for any inconvenience. OK, we will handle it as soon as possible when we 
are back
and fix it in 23.03.

Thanks,
Simei




[PATCH v2] net/i40e: rework maximum frame size configuration

2023-01-30 Thread Simei Su
This patch removes unnecessary link status check and adds link update.

Fixes: a4ba77367923 ("net/i40e: enable maximum frame size at port level")
Fixes: 2184f7cdeeaa ("net/i40e: fix max frame size config at port level")
Fixes: 719469f13b11 ("net/i40e: fix jumbo frame Rx with X722")
Cc: sta...@dpdk.org

Signed-off-by: Simei Su 
---
v2:
* Refine commit log.
* Add link update.

 drivers/net/i40e/i40e_ethdev.c | 54 +++---
 1 file changed, 14 insertions(+), 40 deletions(-)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 7726a89d..a3100ec 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -387,7 +387,6 @@ static int i40e_set_default_mac_addr(struct rte_eth_dev 
*dev,
  struct rte_ether_addr *mac_addr);
 
 static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
-static void i40e_set_mac_max_frame(struct rte_eth_dev *dev, uint16_t size);
 
 static int i40e_ethertype_filter_convert(
const struct rte_eth_ethertype_filter *input,
@@ -2467,8 +2466,18 @@ i40e_dev_start(struct rte_eth_dev *dev)
"please call hierarchy_commit() "
"before starting the port");
 
-   max_frame_size = dev->data->mtu + I40E_ETH_OVERHEAD;
-   i40e_set_mac_max_frame(dev, max_frame_size);
+   i40e_dev_link_update(dev, 1);
+
+   max_frame_size = dev->data->mtu ?
+   dev->data->mtu + I40E_ETH_OVERHEAD :
+   I40E_FRAME_SIZE_MAX;
+
+   /* Set the max frame size to HW*/
+   ret = i40e_aq_set_mac_config(hw, max_frame_size, TRUE, false, 0, NULL);
+   if (ret) {
+   PMD_DRV_LOG(ERR, "Fail to set mac config");
+   return ret;
+   }
 
return I40E_SUCCESS;
 
@@ -2809,9 +2818,6 @@ i40e_dev_set_link_down(struct rte_eth_dev *dev)
return i40e_phy_conf_link(hw, abilities, speed, false);
 }
 
-#define CHECK_INTERVAL 100  /* 100ms */
-#define MAX_REPEAT_TIME10  /* 1s (10 * 100ms) in total */
-
 static __rte_always_inline void
 update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link)
 {
@@ -2878,6 +2884,8 @@ static __rte_always_inline void
 update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link,
bool enable_lse, int wait_to_complete)
 {
+#define CHECK_INTERVAL 100  /* 100ms */
+#define MAX_REPEAT_TIME10  /* 1s (10 * 100ms) in total */
uint32_t rep_cnt = MAX_REPEAT_TIME;
struct i40e_link_status link_status;
int status;
@@ -12123,40 +12131,6 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
return ret;
 }
 
-static void
-i40e_set_mac_max_frame(struct rte_eth_dev *dev, uint16_t size)
-{
-   struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-   uint32_t rep_cnt = MAX_REPEAT_TIME;
-   struct rte_eth_link link;
-   enum i40e_status_code status;
-   bool can_be_set = true;
-
-   /*
-* I40E_MEDIA_TYPE_BASET link up can be ignored
-* I40E_MEDIA_TYPE_BASET link down that hw->phy.media_type
-* is I40E_MEDIA_TYPE_UNKNOWN
-*/
-   if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET &&
-   hw->phy.media_type != I40E_MEDIA_TYPE_UNKNOWN) {
-   do {
-   update_link_reg(hw, &link);
-   if (link.link_status)
-   break;
-   rte_delay_ms(CHECK_INTERVAL);
-   } while (--rep_cnt);
-   can_be_set = !!link.link_status;
-   }
-
-   if (can_be_set) {
-   status = i40e_aq_set_mac_config(hw, size, TRUE, 0, false, NULL);
-   if (status != I40E_SUCCESS)
-   PMD_DRV_LOG(ERR, "Failed to set max frame size at port 
level");
-   } else {
-   PMD_DRV_LOG(ERR, "Set max frame size at port level not 
applicable on link down");
-   }
-}
-
 RTE_LOG_REGISTER_SUFFIX(i40e_logtype_init, init, NOTICE);
 RTE_LOG_REGISTER_SUFFIX(i40e_logtype_driver, driver, NOTICE);
 #ifdef RTE_ETHDEV_DEBUG_RX
-- 
2.9.5



[PATCH v3] net/i40e: rework maximum frame size configuration

2023-01-31 Thread Simei Su
This patch removes unnecessary link status check and adds link update.

Fixes: a4ba77367923 ("net/i40e: enable maximum frame size at port level")
Fixes: 2184f7cdeeaa ("net/i40e: fix max frame size config at port level")
Fixes: 719469f13b11 ("net/i40e: fix jumbo frame Rx with X722")
Cc: sta...@dpdk.org

Signed-off-by: Simei Su 
---
v3:
* Put link update before interrupt enable.

v2:
* Refine commit log.
* Add link update.

 drivers/net/i40e/i40e_ethdev.c | 58 +++---
 1 file changed, 15 insertions(+), 43 deletions(-)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 7726a89d..f5a6cec 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -387,7 +387,6 @@ static int i40e_set_default_mac_addr(struct rte_eth_dev 
*dev,
  struct rte_ether_addr *mac_addr);
 
 static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
-static void i40e_set_mac_max_frame(struct rte_eth_dev *dev, uint16_t size);
 
 static int i40e_ethertype_filter_convert(
const struct rte_eth_ethertype_filter *input,
@@ -2447,11 +2446,11 @@ i40e_dev_start(struct rte_eth_dev *dev)
   I40E_AQ_EVENT_MEDIA_NA), NULL);
if (ret != I40E_SUCCESS)
PMD_DRV_LOG(WARNING, "Fail to set phy mask");
-
-   /* Call get_link_info aq command to enable/disable LSE */
-   i40e_dev_link_update(dev, 0);
}
 
+   /* Call get_link_info aq command to enable/disable LSE */
+   i40e_dev_link_update(dev, 1);
+
if (dev->data->dev_conf.intr_conf.rxq == 0) {
rte_eal_alarm_set(I40E_ALARM_INTERVAL,
  i40e_dev_alarm_handler, dev);
@@ -2467,8 +2466,16 @@ i40e_dev_start(struct rte_eth_dev *dev)
"please call hierarchy_commit() "
"before starting the port");
 
-   max_frame_size = dev->data->mtu + I40E_ETH_OVERHEAD;
-   i40e_set_mac_max_frame(dev, max_frame_size);
+   max_frame_size = dev->data->mtu ?
+   dev->data->mtu + I40E_ETH_OVERHEAD :
+   I40E_FRAME_SIZE_MAX;
+
+   /* Set the max frame size to HW*/
+   ret = i40e_aq_set_mac_config(hw, max_frame_size, TRUE, false, 0, NULL);
+   if (ret) {
+   PMD_DRV_LOG(ERR, "Fail to set mac config");
+   return ret;
+   }
 
return I40E_SUCCESS;
 
@@ -2809,9 +2816,6 @@ i40e_dev_set_link_down(struct rte_eth_dev *dev)
return i40e_phy_conf_link(hw, abilities, speed, false);
 }
 
-#define CHECK_INTERVAL 100  /* 100ms */
-#define MAX_REPEAT_TIME10  /* 1s (10 * 100ms) in total */
-
 static __rte_always_inline void
 update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link)
 {
@@ -2878,6 +2882,8 @@ static __rte_always_inline void
 update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link,
bool enable_lse, int wait_to_complete)
 {
+#define CHECK_INTERVAL 100  /* 100ms */
+#define MAX_REPEAT_TIME10  /* 1s (10 * 100ms) in total */
uint32_t rep_cnt = MAX_REPEAT_TIME;
struct i40e_link_status link_status;
int status;
@@ -12123,40 +12129,6 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
return ret;
 }
 
-static void
-i40e_set_mac_max_frame(struct rte_eth_dev *dev, uint16_t size)
-{
-   struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-   uint32_t rep_cnt = MAX_REPEAT_TIME;
-   struct rte_eth_link link;
-   enum i40e_status_code status;
-   bool can_be_set = true;
-
-   /*
-* I40E_MEDIA_TYPE_BASET link up can be ignored
-* I40E_MEDIA_TYPE_BASET link down that hw->phy.media_type
-* is I40E_MEDIA_TYPE_UNKNOWN
-*/
-   if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET &&
-   hw->phy.media_type != I40E_MEDIA_TYPE_UNKNOWN) {
-   do {
-   update_link_reg(hw, &link);
-   if (link.link_status)
-   break;
-   rte_delay_ms(CHECK_INTERVAL);
-   } while (--rep_cnt);
-   can_be_set = !!link.link_status;
-   }
-
-   if (can_be_set) {
-   status = i40e_aq_set_mac_config(hw, size, TRUE, 0, false, NULL);
-   if (status != I40E_SUCCESS)
-   PMD_DRV_LOG(ERR, "Failed to set max frame size at port 
level");
-   } else {
-   PMD_DRV_LOG(ERR, "Set max frame size at port level not 
applicable on link down");
-   }
-}
-
 RTE_LOG_REGISTER_SUFFIX(i40e_logtype_init, init, NOTICE);
 RTE_LOG_REGISTER_SUFFIX(i40e_logtype_driver, driver, NOTICE);
 #ifdef RTE_ETHDEV_DEBUG_RX
-- 
2.9.5



[PATCH v2 0/2] net/igc: support launch time offloading

2023-01-31 Thread Simei Su
[PATCH v2 1/2] expose packet pacing registers
[PATCH v2 2/2] enable launch time offloading

v2:
* Refine title and commit log.
* Add release notes.
* Rename variable name.

Simei Su (2):
  net/igc/base: expose packet pacing registers
  net/igc: enable launch time offloading

 doc/guides/rel_notes/release_23_03.rst |  2 +-
 drivers/net/igc/base/igc_defines.h |  9 +
 drivers/net/igc/base/igc_regs.h|  8 
 drivers/net/igc/igc_ethdev.c   | 70 ++
 drivers/net/igc/igc_ethdev.h   |  6 ++-
 drivers/net/igc/igc_txrx.c | 58 
 drivers/net/igc/igc_txrx.h |  3 ++
 7 files changed, 146 insertions(+), 10 deletions(-)

-- 
2.9.5



[PATCH v2 1/2] net/igc/base: expose packet pacing registers

2023-01-31 Thread Simei Su
Add definitions for packet pacing(launch time offloading) related
registers.

Signed-off-by: Simei Su 
---
 drivers/net/igc/base/igc_defines.h | 9 +
 drivers/net/igc/base/igc_regs.h| 8 
 2 files changed, 17 insertions(+)

diff --git a/drivers/net/igc/base/igc_defines.h 
b/drivers/net/igc/base/igc_defines.h
index dd7330a..280570b 100644
--- a/drivers/net/igc/base/igc_defines.h
+++ b/drivers/net/igc/base/igc_defines.h
@@ -188,6 +188,15 @@
 #define IGC_RCTL_BSEX  0x0200 /* Buffer size extension */
 #define IGC_RCTL_SECRC 0x0400 /* Strip Ethernet CRC */
 
+#define IGC_DTXMXPKTSZ_TSN 0x19 /* 1600 bytes of max TX DMA packet size */
+#define IGC_TXPBSIZE_TSN   0x04145145 /* 5k bytes buffer for each queue */
+
+/* Transmit Scheduling */
+#define IGC_TQAVCTRL_TRANSMIT_MODE_TSN 0x0001
+#define IGC_TQAVCTRL_ENHANCED_QAV  0x0008
+
+#define IGC_TXQCTL_QUEUE_MODE_LAUNCHT  0x0001
+
 /* Use byte values for the following shift parameters
  * Usage:
  * psrctl |= (((ROUNDUP(value0, 128) >> IGC_PSRCTL_BSIZE0_SHIFT) &
diff --git a/drivers/net/igc/base/igc_regs.h b/drivers/net/igc/base/igc_regs.h
index d424387..e423814 100644
--- a/drivers/net/igc/base/igc_regs.h
+++ b/drivers/net/igc/base/igc_regs.h
@@ -602,6 +602,14 @@
 #define IGC_RXMTRL 0x0B634 /* Time sync Rx EtherType and Msg Type - RW */
 #define IGC_RXUDP  0x0B638 /* Time Sync Rx UDP Port - RW */
 
+#define IGC_QBVCYCLET  0x331C
+#define IGC_QBVCYCLET_S 0x3320
+#define IGC_STQT(_n)   (0x3324 + 0x4 * (_n))
+#define IGC_ENDQT(_n)  (0x3334 + 0x4 * (_n))
+#define IGC_TXQCTL(_n) (0x3344 + 0x4 * (_n))
+#define IGC_BASET_L0x3314
+#define IGC_BASET_H0x3318
+
 /* Filtering Registers */
 #define IGC_SAQF(_n)   (0x05980 + (4 * (_n))) /* Source Address Queue Fltr */
 #define IGC_DAQF(_n)   (0x059A0 + (4 * (_n))) /* Dest Address Queue Fltr */
-- 
2.9.5



[PATCH v2 2/2] net/igc: enable launch time offloading

2023-01-31 Thread Simei Su
The LaunchTime defines the scheduling time of the packet from
the packet buffer to the MAC. The launchtime of each packet is
specified as an offset applied to the BaseT registers while BaseT
is automatically incremented each cycle.

This patch supports Tx timestamp based packet pacing by leveraging
offload flag "RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP". We should set
the expected launchtime to the advanced transmit descriptor.

Signed-off-by: Simei Su 
---
 doc/guides/rel_notes/release_23_03.rst |  2 +-
 drivers/net/igc/igc_ethdev.c   | 70 ++
 drivers/net/igc/igc_ethdev.h   |  6 ++-
 drivers/net/igc/igc_txrx.c | 58 
 drivers/net/igc/igc_txrx.h |  3 ++
 5 files changed, 129 insertions(+), 10 deletions(-)

diff --git a/doc/guides/rel_notes/release_23_03.rst 
b/doc/guides/rel_notes/release_23_03.rst
index d175f8e..5bca6af 100644
--- a/doc/guides/rel_notes/release_23_03.rst
+++ b/doc/guides/rel_notes/release_23_03.rst
@@ -79,7 +79,7 @@ New Features
 * **Updated Intel igc driver.**
 
   * Added timesync API support.
-
+  * Added packet pacing(launch time offloading) support.
 
 Removed Items
 -
diff --git a/drivers/net/igc/igc_ethdev.c b/drivers/net/igc/igc_ethdev.c
index ef3346b..fab2ab6 100644
--- a/drivers/net/igc/igc_ethdev.c
+++ b/drivers/net/igc/igc_ethdev.c
@@ -88,6 +88,9 @@
 #define IGC_I225_RX_LATENCY_1000   300
 #define IGC_I225_RX_LATENCY_2500   1485
 
+uint64_t igc_tx_timestamp_dynflag;
+int igc_tx_timestamp_dynfield_offset = -1;
+
 static const struct rte_eth_desc_lim rx_desc_lim = {
.nb_max = IGC_MAX_RXD,
.nb_min = IGC_MIN_RXD,
@@ -267,6 +270,7 @@ static int eth_igc_timesync_read_time(struct rte_eth_dev 
*dev,
  struct timespec *timestamp);
 static int eth_igc_timesync_write_time(struct rte_eth_dev *dev,
   const struct timespec *timestamp);
+static int eth_igc_read_clock(struct rte_eth_dev *dev, uint64_t *clock);
 
 static const struct eth_dev_ops eth_igc_ops = {
.dev_configure  = eth_igc_configure,
@@ -327,6 +331,7 @@ static const struct eth_dev_ops eth_igc_ops = {
.timesync_adjust_time   = eth_igc_timesync_adjust_time,
.timesync_read_time = eth_igc_timesync_read_time,
.timesync_write_time= eth_igc_timesync_write_time,
+   .read_clock = eth_igc_read_clock,
 };
 
 /*
@@ -949,7 +954,12 @@ eth_igc_start(struct rte_eth_dev *dev)
struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
+   uint32_t nsec, sec, baset_l, baset_h, tqavctrl;
+   struct timespec system_time;
+   int64_t n, systime;
+   uint32_t txqctl = 0;
uint32_t *speeds;
+   uint16_t i;
int ret;
 
PMD_INIT_FUNC_TRACE();
@@ -1009,6 +1019,55 @@ eth_igc_start(struct rte_eth_dev *dev)
return ret;
}
 
+   if (igc_tx_timestamp_dynflag > 0) {
+   adapter->base_time = 0;
+   adapter->cycle_time = NSEC_PER_SEC;
+
+   IGC_WRITE_REG(hw, IGC_TSSDP, 0);
+   IGC_WRITE_REG(hw, IGC_TSIM, TSINTR_TXTS);
+   IGC_WRITE_REG(hw, IGC_IMS, IGC_ICR_TS);
+
+   IGC_WRITE_REG(hw, IGC_TSAUXC, 0);
+   IGC_WRITE_REG(hw, IGC_I350_DTXMXPKTSZ, IGC_DTXMXPKTSZ_TSN);
+   IGC_WRITE_REG(hw, IGC_TXPBS, IGC_TXPBSIZE_TSN);
+
+   tqavctrl = IGC_READ_REG(hw, IGC_I210_TQAVCTRL);
+   tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN |
+   IGC_TQAVCTRL_ENHANCED_QAV;
+   IGC_WRITE_REG(hw, IGC_I210_TQAVCTRL, tqavctrl);
+
+   IGC_WRITE_REG(hw, IGC_QBVCYCLET_S, adapter->cycle_time);
+   IGC_WRITE_REG(hw, IGC_QBVCYCLET, adapter->cycle_time);
+
+   for (i = 0; i < dev->data->nb_tx_queues; i++) {
+   IGC_WRITE_REG(hw, IGC_STQT(i), 0);
+   IGC_WRITE_REG(hw, IGC_ENDQT(i), NSEC_PER_SEC);
+
+   txqctl |= IGC_TXQCTL_QUEUE_MODE_LAUNCHT;
+   IGC_WRITE_REG(hw, IGC_TXQCTL(i), txqctl);
+   }
+
+   clock_gettime(CLOCK_REALTIME, &system_time);
+   IGC_WRITE_REG(hw, IGC_SYSTIML, system_time.tv_nsec);
+   IGC_WRITE_REG(hw, IGC_SYSTIMH, system_time.tv_sec);
+
+   nsec = IGC_READ_REG(hw, IGC_SYSTIML);
+   sec = IGC_READ_REG(hw, IGC_SYSTIMH);
+   systime = (int64_t)sec * NSEC_PER_SEC + (int64_t)nsec;
+
+   if (systime > adapter->base_time) {
+   n = (systime - adapter->base_time) /
+adapter->cycle_time;
+   adapter->base_time = adapter->base_time +
+ 

[PATCH v3 0/2] net/igc: support launch time offloading

2023-02-01 Thread Simei Su
[PATCH v3 1/2] expose packet pacing registers
[PATCH v3 2/2] enable launch time offloading

v3:
* Fix coding style issue.

v2:
* Refine title and commit log.
* Add release notes.
* Rename variable name.

Simei Su (2):
  net/igc/base: expose packet pacing registers
  net/igc: enable launch time offloading

 doc/guides/rel_notes/release_23_03.rst |  2 +-
 drivers/net/igc/base/igc_defines.h |  9 +
 drivers/net/igc/base/igc_regs.h|  8 
 drivers/net/igc/igc_ethdev.c   | 70 ++
 drivers/net/igc/igc_ethdev.h   |  6 ++-
 drivers/net/igc/igc_txrx.c | 58 
 drivers/net/igc/igc_txrx.h |  3 ++
 7 files changed, 146 insertions(+), 10 deletions(-)

-- 
2.9.5



[PATCH v3 1/2] net/igc/base: expose packet pacing registers

2023-02-01 Thread Simei Su
Add definitions for packet pacing(launch time offloading) related
registers.

Signed-off-by: Simei Su 
---
 drivers/net/igc/base/igc_defines.h | 9 +
 drivers/net/igc/base/igc_regs.h| 8 
 2 files changed, 17 insertions(+)

diff --git a/drivers/net/igc/base/igc_defines.h 
b/drivers/net/igc/base/igc_defines.h
index dd7330a..280570b 100644
--- a/drivers/net/igc/base/igc_defines.h
+++ b/drivers/net/igc/base/igc_defines.h
@@ -188,6 +188,15 @@
 #define IGC_RCTL_BSEX  0x0200 /* Buffer size extension */
 #define IGC_RCTL_SECRC 0x0400 /* Strip Ethernet CRC */
 
+#define IGC_DTXMXPKTSZ_TSN 0x19 /* 1600 bytes of max TX DMA packet size */
+#define IGC_TXPBSIZE_TSN   0x04145145 /* 5k bytes buffer for each queue */
+
+/* Transmit Scheduling */
+#define IGC_TQAVCTRL_TRANSMIT_MODE_TSN 0x0001
+#define IGC_TQAVCTRL_ENHANCED_QAV  0x0008
+
+#define IGC_TXQCTL_QUEUE_MODE_LAUNCHT  0x0001
+
 /* Use byte values for the following shift parameters
  * Usage:
  * psrctl |= (((ROUNDUP(value0, 128) >> IGC_PSRCTL_BSIZE0_SHIFT) &
diff --git a/drivers/net/igc/base/igc_regs.h b/drivers/net/igc/base/igc_regs.h
index d424387..e423814 100644
--- a/drivers/net/igc/base/igc_regs.h
+++ b/drivers/net/igc/base/igc_regs.h
@@ -602,6 +602,14 @@
 #define IGC_RXMTRL 0x0B634 /* Time sync Rx EtherType and Msg Type - RW */
 #define IGC_RXUDP  0x0B638 /* Time Sync Rx UDP Port - RW */
 
+#define IGC_QBVCYCLET  0x331C
+#define IGC_QBVCYCLET_S 0x3320
+#define IGC_STQT(_n)   (0x3324 + 0x4 * (_n))
+#define IGC_ENDQT(_n)  (0x3334 + 0x4 * (_n))
+#define IGC_TXQCTL(_n) (0x3344 + 0x4 * (_n))
+#define IGC_BASET_L0x3314
+#define IGC_BASET_H0x3318
+
 /* Filtering Registers */
 #define IGC_SAQF(_n)   (0x05980 + (4 * (_n))) /* Source Address Queue Fltr */
 #define IGC_DAQF(_n)   (0x059A0 + (4 * (_n))) /* Dest Address Queue Fltr */
-- 
2.9.5



[PATCH v3 2/2] net/igc: enable launch time offloading

2023-02-01 Thread Simei Su
The LaunchTime defines the scheduling time of the packet from
the packet buffer to the MAC. The launchtime of each packet is
specified as an offset applied to the BaseT registers while BaseT
is automatically incremented each cycle.

This patch supports Tx timestamp based packet pacing by leveraging
offload flag "RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP". We should set
the expected launchtime to the advanced transmit descriptor.

Signed-off-by: Simei Su 
---
 doc/guides/rel_notes/release_23_03.rst |  2 +-
 drivers/net/igc/igc_ethdev.c   | 70 ++
 drivers/net/igc/igc_ethdev.h   |  6 ++-
 drivers/net/igc/igc_txrx.c | 58 
 drivers/net/igc/igc_txrx.h |  3 ++
 5 files changed, 129 insertions(+), 10 deletions(-)

diff --git a/doc/guides/rel_notes/release_23_03.rst 
b/doc/guides/rel_notes/release_23_03.rst
index d175f8e..5bca6af 100644
--- a/doc/guides/rel_notes/release_23_03.rst
+++ b/doc/guides/rel_notes/release_23_03.rst
@@ -79,7 +79,7 @@ New Features
 * **Updated Intel igc driver.**
 
   * Added timesync API support.
-
+  * Added packet pacing(launch time offloading) support.
 
 Removed Items
 -
diff --git a/drivers/net/igc/igc_ethdev.c b/drivers/net/igc/igc_ethdev.c
index ef3346b..fab2ab6 100644
--- a/drivers/net/igc/igc_ethdev.c
+++ b/drivers/net/igc/igc_ethdev.c
@@ -88,6 +88,9 @@
 #define IGC_I225_RX_LATENCY_1000   300
 #define IGC_I225_RX_LATENCY_2500   1485
 
+uint64_t igc_tx_timestamp_dynflag;
+int igc_tx_timestamp_dynfield_offset = -1;
+
 static const struct rte_eth_desc_lim rx_desc_lim = {
.nb_max = IGC_MAX_RXD,
.nb_min = IGC_MIN_RXD,
@@ -267,6 +270,7 @@ static int eth_igc_timesync_read_time(struct rte_eth_dev 
*dev,
  struct timespec *timestamp);
 static int eth_igc_timesync_write_time(struct rte_eth_dev *dev,
   const struct timespec *timestamp);
+static int eth_igc_read_clock(struct rte_eth_dev *dev, uint64_t *clock);
 
 static const struct eth_dev_ops eth_igc_ops = {
.dev_configure  = eth_igc_configure,
@@ -327,6 +331,7 @@ static const struct eth_dev_ops eth_igc_ops = {
.timesync_adjust_time   = eth_igc_timesync_adjust_time,
.timesync_read_time = eth_igc_timesync_read_time,
.timesync_write_time= eth_igc_timesync_write_time,
+   .read_clock = eth_igc_read_clock,
 };
 
 /*
@@ -949,7 +954,12 @@ eth_igc_start(struct rte_eth_dev *dev)
struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
+   uint32_t nsec, sec, baset_l, baset_h, tqavctrl;
+   struct timespec system_time;
+   int64_t n, systime;
+   uint32_t txqctl = 0;
uint32_t *speeds;
+   uint16_t i;
int ret;
 
PMD_INIT_FUNC_TRACE();
@@ -1009,6 +1019,55 @@ eth_igc_start(struct rte_eth_dev *dev)
return ret;
}
 
+   if (igc_tx_timestamp_dynflag > 0) {
+   adapter->base_time = 0;
+   adapter->cycle_time = NSEC_PER_SEC;
+
+   IGC_WRITE_REG(hw, IGC_TSSDP, 0);
+   IGC_WRITE_REG(hw, IGC_TSIM, TSINTR_TXTS);
+   IGC_WRITE_REG(hw, IGC_IMS, IGC_ICR_TS);
+
+   IGC_WRITE_REG(hw, IGC_TSAUXC, 0);
+   IGC_WRITE_REG(hw, IGC_I350_DTXMXPKTSZ, IGC_DTXMXPKTSZ_TSN);
+   IGC_WRITE_REG(hw, IGC_TXPBS, IGC_TXPBSIZE_TSN);
+
+   tqavctrl = IGC_READ_REG(hw, IGC_I210_TQAVCTRL);
+   tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN |
+   IGC_TQAVCTRL_ENHANCED_QAV;
+   IGC_WRITE_REG(hw, IGC_I210_TQAVCTRL, tqavctrl);
+
+   IGC_WRITE_REG(hw, IGC_QBVCYCLET_S, adapter->cycle_time);
+   IGC_WRITE_REG(hw, IGC_QBVCYCLET, adapter->cycle_time);
+
+   for (i = 0; i < dev->data->nb_tx_queues; i++) {
+   IGC_WRITE_REG(hw, IGC_STQT(i), 0);
+   IGC_WRITE_REG(hw, IGC_ENDQT(i), NSEC_PER_SEC);
+
+   txqctl |= IGC_TXQCTL_QUEUE_MODE_LAUNCHT;
+   IGC_WRITE_REG(hw, IGC_TXQCTL(i), txqctl);
+   }
+
+   clock_gettime(CLOCK_REALTIME, &system_time);
+   IGC_WRITE_REG(hw, IGC_SYSTIML, system_time.tv_nsec);
+   IGC_WRITE_REG(hw, IGC_SYSTIMH, system_time.tv_sec);
+
+   nsec = IGC_READ_REG(hw, IGC_SYSTIML);
+   sec = IGC_READ_REG(hw, IGC_SYSTIMH);
+   systime = (int64_t)sec * NSEC_PER_SEC + (int64_t)nsec;
+
+   if (systime > adapter->base_time) {
+   n = (systime - adapter->base_time) /
+adapter->cycle_time;
+   adapter->base_time = adapter->base_time +
+ 

[PATCH v4] net/i40e: rework maximum frame size configuration

2023-02-02 Thread Simei Su
This patch reverts mentionned changes below to remove unnecessary link
status check and only moves max frame size configuration to dev_start.
Also, it sets the parameter "wait to complete" true to wait for complete
right after setting link up.

Fixes: a4ba77367923 ("net/i40e: enable maximum frame size at port level")
Fixes: 2184f7cdeeaa ("net/i40e: fix max frame size config at port level")
Fixes: 719469f13b11 ("net/i40e: fix jumbo frame Rx with X722")
Cc: sta...@dpdk.org

Signed-off-by: Simei Su 
---
v4:
* Refine commit log.
* Avoid duplicate call to set parameter "wait to complete" true.

v3:
* Put link update before interrupt enable.

v2:
* Refine commit log.
* Add link update.

 drivers/net/i40e/i40e_ethdev.c | 54 ++
 1 file changed, 13 insertions(+), 41 deletions(-)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 7726a89d..5d57bb9 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -387,7 +387,6 @@ static int i40e_set_default_mac_addr(struct rte_eth_dev 
*dev,
  struct rte_ether_addr *mac_addr);
 
 static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
-static void i40e_set_mac_max_frame(struct rte_eth_dev *dev, uint16_t size);
 
 static int i40e_ethertype_filter_convert(
const struct rte_eth_ethertype_filter *input,
@@ -2449,7 +2448,7 @@ i40e_dev_start(struct rte_eth_dev *dev)
PMD_DRV_LOG(WARNING, "Fail to set phy mask");
 
/* Call get_link_info aq command to enable/disable LSE */
-   i40e_dev_link_update(dev, 0);
+   i40e_dev_link_update(dev, 1);
}
 
if (dev->data->dev_conf.intr_conf.rxq == 0) {
@@ -2467,8 +2466,16 @@ i40e_dev_start(struct rte_eth_dev *dev)
"please call hierarchy_commit() "
"before starting the port");
 
-   max_frame_size = dev->data->mtu + I40E_ETH_OVERHEAD;
-   i40e_set_mac_max_frame(dev, max_frame_size);
+   max_frame_size = dev->data->mtu ?
+   dev->data->mtu + I40E_ETH_OVERHEAD :
+   I40E_FRAME_SIZE_MAX;
+
+   /* Set the max frame size to HW*/
+   ret = i40e_aq_set_mac_config(hw, max_frame_size, TRUE, false, 0, NULL);
+   if (ret) {
+   PMD_DRV_LOG(ERR, "Fail to set mac config");
+   return ret;
+   }
 
return I40E_SUCCESS;
 
@@ -2809,9 +2816,6 @@ i40e_dev_set_link_down(struct rte_eth_dev *dev)
return i40e_phy_conf_link(hw, abilities, speed, false);
 }
 
-#define CHECK_INTERVAL 100  /* 100ms */
-#define MAX_REPEAT_TIME10  /* 1s (10 * 100ms) in total */
-
 static __rte_always_inline void
 update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link)
 {
@@ -2878,6 +2882,8 @@ static __rte_always_inline void
 update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link,
bool enable_lse, int wait_to_complete)
 {
+#define CHECK_INTERVAL 100  /* 100ms */
+#define MAX_REPEAT_TIME10  /* 1s (10 * 100ms) in total */
uint32_t rep_cnt = MAX_REPEAT_TIME;
struct i40e_link_status link_status;
int status;
@@ -12123,40 +12129,6 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
return ret;
 }
 
-static void
-i40e_set_mac_max_frame(struct rte_eth_dev *dev, uint16_t size)
-{
-   struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-   uint32_t rep_cnt = MAX_REPEAT_TIME;
-   struct rte_eth_link link;
-   enum i40e_status_code status;
-   bool can_be_set = true;
-
-   /*
-* I40E_MEDIA_TYPE_BASET link up can be ignored
-* I40E_MEDIA_TYPE_BASET link down that hw->phy.media_type
-* is I40E_MEDIA_TYPE_UNKNOWN
-*/
-   if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET &&
-   hw->phy.media_type != I40E_MEDIA_TYPE_UNKNOWN) {
-   do {
-   update_link_reg(hw, &link);
-   if (link.link_status)
-   break;
-   rte_delay_ms(CHECK_INTERVAL);
-   } while (--rep_cnt);
-   can_be_set = !!link.link_status;
-   }
-
-   if (can_be_set) {
-   status = i40e_aq_set_mac_config(hw, size, TRUE, 0, false, NULL);
-   if (status != I40E_SUCCESS)
-   PMD_DRV_LOG(ERR, "Failed to set max frame size at port 
level");
-   } else {
-   PMD_DRV_LOG(ERR, "Set max frame size at port level not 
applicable on link down");
-   }
-}
-
 RTE_LOG_REGISTER_SUFFIX(i40e_logtype_init, init, NOTICE);
 RTE_LOG_REGISTER_SUFFIX(i40e_logtype_driver, driver, NOTICE);
 #ifdef RTE_ETHDEV_DEBUG_RX
-- 
2.9.5



[PATCH v5] net/i40e: rework maximum frame size configuration

2023-02-02 Thread Simei Su
This patch reverts mentioned changes below to remove unnecessary link
status check and only moves max frame size configuration to dev_start.
Also, it sets the parameter "wait to complete" true to wait for complete
right after setting link up.

Fixes: a4ba77367923 ("net/i40e: enable maximum frame size at port level")
Fixes: 2184f7cdeeaa ("net/i40e: fix max frame size config at port level")
Fixes: 719469f13b11 ("net/i40e: fix jumbo frame Rx with X722")
Cc: sta...@dpdk.org

Signed-off-by: Simei Su 
---
v5:
* Fix misspelling in commit log.

v4:
* Refine commit log.
* Avoid duplicate call to set parameter "wait to complete" true.

v3:
* Put link update before interrupt enable.

v2:
* Refine commit log.
* Add link update.

 drivers/net/i40e/i40e_ethdev.c | 54 ++
 1 file changed, 13 insertions(+), 41 deletions(-)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 7726a89d..5d57bb9 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -387,7 +387,6 @@ static int i40e_set_default_mac_addr(struct rte_eth_dev 
*dev,
  struct rte_ether_addr *mac_addr);
 
 static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
-static void i40e_set_mac_max_frame(struct rte_eth_dev *dev, uint16_t size);
 
 static int i40e_ethertype_filter_convert(
const struct rte_eth_ethertype_filter *input,
@@ -2449,7 +2448,7 @@ i40e_dev_start(struct rte_eth_dev *dev)
PMD_DRV_LOG(WARNING, "Fail to set phy mask");
 
/* Call get_link_info aq command to enable/disable LSE */
-   i40e_dev_link_update(dev, 0);
+   i40e_dev_link_update(dev, 1);
}
 
if (dev->data->dev_conf.intr_conf.rxq == 0) {
@@ -2467,8 +2466,16 @@ i40e_dev_start(struct rte_eth_dev *dev)
"please call hierarchy_commit() "
"before starting the port");
 
-   max_frame_size = dev->data->mtu + I40E_ETH_OVERHEAD;
-   i40e_set_mac_max_frame(dev, max_frame_size);
+   max_frame_size = dev->data->mtu ?
+   dev->data->mtu + I40E_ETH_OVERHEAD :
+   I40E_FRAME_SIZE_MAX;
+
+   /* Set the max frame size to HW*/
+   ret = i40e_aq_set_mac_config(hw, max_frame_size, TRUE, false, 0, NULL);
+   if (ret) {
+   PMD_DRV_LOG(ERR, "Fail to set mac config");
+   return ret;
+   }
 
return I40E_SUCCESS;
 
@@ -2809,9 +2816,6 @@ i40e_dev_set_link_down(struct rte_eth_dev *dev)
return i40e_phy_conf_link(hw, abilities, speed, false);
 }
 
-#define CHECK_INTERVAL 100  /* 100ms */
-#define MAX_REPEAT_TIME10  /* 1s (10 * 100ms) in total */
-
 static __rte_always_inline void
 update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link)
 {
@@ -2878,6 +2882,8 @@ static __rte_always_inline void
 update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link,
bool enable_lse, int wait_to_complete)
 {
+#define CHECK_INTERVAL 100  /* 100ms */
+#define MAX_REPEAT_TIME10  /* 1s (10 * 100ms) in total */
uint32_t rep_cnt = MAX_REPEAT_TIME;
struct i40e_link_status link_status;
int status;
@@ -12123,40 +12129,6 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
return ret;
 }
 
-static void
-i40e_set_mac_max_frame(struct rte_eth_dev *dev, uint16_t size)
-{
-   struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-   uint32_t rep_cnt = MAX_REPEAT_TIME;
-   struct rte_eth_link link;
-   enum i40e_status_code status;
-   bool can_be_set = true;
-
-   /*
-* I40E_MEDIA_TYPE_BASET link up can be ignored
-* I40E_MEDIA_TYPE_BASET link down that hw->phy.media_type
-* is I40E_MEDIA_TYPE_UNKNOWN
-*/
-   if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET &&
-   hw->phy.media_type != I40E_MEDIA_TYPE_UNKNOWN) {
-   do {
-   update_link_reg(hw, &link);
-   if (link.link_status)
-   break;
-   rte_delay_ms(CHECK_INTERVAL);
-   } while (--rep_cnt);
-   can_be_set = !!link.link_status;
-   }
-
-   if (can_be_set) {
-   status = i40e_aq_set_mac_config(hw, size, TRUE, 0, false, NULL);
-   if (status != I40E_SUCCESS)
-   PMD_DRV_LOG(ERR, "Failed to set max frame size at port 
level");
-   } else {
-   PMD_DRV_LOG(ERR, "Set max frame size at port level not 
applicable on link down");
-   }
-}
-
 RTE_LOG_REGISTER_SUFFIX(i40e_logtype_init, init, NOTICE);
 RTE_LOG_REGISTER_SUFFIX(i40e_logtype_driver, driver, NOTICE);
 #ifdef RTE_ETHDEV_DEBUG_RX
-- 
2.9.5



RE: [PATCH v3 2/2] net/igc: enable launch time offloading

2023-02-02 Thread Su, Simei
Hi Stephen,

> -Original Message-
> From: Stephen Hemminger 
> Sent: Friday, February 3, 2023 8:31 AM
> To: Su, Simei 
> Cc: Zhang, Qi Z ; Guo, Junfeng
> ; dev@dpdk.org; Wu, Wenjun1
> 
> Subject: Re: [PATCH v3 2/2] net/igc: enable launch time offloading
> 
> On Thu,  2 Feb 2023 15:18:01 +0800
> Simei Su  wrote:
> 
> >
> > +static uint32_t igc_tx_launchtime(uint64_t txtime, uint16_t port_id)
> > +{
> > +   struct rte_eth_dev *dev = &rte_eth_devices[port_id];
> > +   struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev);
> > +   uint64_t base_time = adapter->base_time;
> > +   uint64_t cycle_time = adapter->cycle_time;
> > +   uint32_t launchtime;
> > +
> > +   launchtime = (txtime - base_time) % cycle_time;
> > +
> > +   return rte_cpu_to_le_32(launchtime); }
> 
> 
> Divide in transmit path will slow things down.
> Better to use something like rte_reciprocal_divide_64() to avoid slow 64 bit
> divide.

Thanks for your comments.
The performance won't be affected if the launchtime function is turned off.
If the function is turned on, it indeed doesn't need high performance based on 
the function.

Thanks,
Simei


RE: [PATCH v5] net/i40e: rework maximum frame size configuration

2023-02-02 Thread Su, Simei
Hi David,

> -Original Message-
> From: David Marchand 
> Sent: Thursday, February 2, 2023 8:56 PM
> To: Su, Simei ; Zhang, Qi Z 
> Cc: Xing, Beilei ; Zhang, Yuying
> ; dev@dpdk.org; Yang, Qiming
> ; sta...@dpdk.org
> Subject: Re: [PATCH v5] net/i40e: rework maximum frame size configuration
> 
> On Thu, Feb 2, 2023 at 1:37 PM Simei Su  wrote:
> >
> > This patch reverts mentioned changes below to remove unnecessary link
> > status check and only moves max frame size configuration to dev_start.
> > Also, it sets the parameter "wait to complete" true to wait for
> > complete right after setting link up.
> 
> Why is the change on link status needed?
> Is it necessary?

Indeed, it doesn't change the link status, it only involves update, waiting for 
it to complete.
Sorry not to describe correctly about setting the " wait to complete" true.

> 
> >
> > Fixes: a4ba77367923 ("net/i40e: enable maximum frame size at port
> > level")
> > Fixes: 2184f7cdeeaa ("net/i40e: fix max frame size config at port
> > level")
> > Fixes: 719469f13b11 ("net/i40e: fix jumbo frame Rx with X722")
> > Cc: sta...@dpdk.org
> >
> > Signed-off-by: Simei Su 
> 
> I would have preferred you reply to my original report.
> At least, I'd like you add some credit with my name in the commitlog.

Sorry, I will notice it in next version.

Thanks,
Simei

> 
> 
> For the record, the differences with my v1 are:
> 
> diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
> index 5635dd03cf..5d57bb9a0e 100644
> --- a/drivers/net/i40e/i40e_ethdev.c
> +++ b/drivers/net/i40e/i40e_ethdev.c
> @@ -2327,6 +2327,7 @@ i40e_dev_start(struct rte_eth_dev *dev)
> uint32_t intr_vector = 0;
> struct i40e_vsi *vsi;
> uint16_t nb_rxq, nb_txq;
> +   uint16_t max_frame_size;
> 
> hw->adapter_stopped = 0;
> 
> @@ -2447,7 +2448,7 @@ i40e_dev_start(struct rte_eth_dev *dev)
> PMD_DRV_LOG(WARNING, "Fail to set phy
> mask");
> 
> /* Call get_link_info aq command to enable/disable LSE
> */
> -   i40e_dev_link_update(dev, 0);
> +   i40e_dev_link_update(dev, 1);
> }
> 
> if (dev->data->dev_conf.intr_conf.rxq == 0) { @@ -2465,8 +2466,16
> @@ i40e_dev_start(struct rte_eth_dev *dev)
> "please call hierarchy_commit() "
> "before starting the port");
> 
> -   i40e_aq_set_mac_config(hw, dev->data->mtu +
> I40E_ETH_OVERHEAD, TRUE,
> -   false, 0, NULL);
> +   max_frame_size = dev->data->mtu ?
> +   dev->data->mtu + I40E_ETH_OVERHEAD :
> +   I40E_FRAME_SIZE_MAX;
> +
> +   /* Set the max frame size to HW*/
> +   ret = i40e_aq_set_mac_config(hw, max_frame_size, TRUE, false, 0,
> NULL);
> +   if (ret) {
> +   PMD_DRV_LOG(ERR, "Fail to set mac config");
> +   return ret;
> +   }
> 
> return I40E_SUCCESS;
> 
> 
> Qi, don't apply this fix yet.
> I'll generate some binaries internally to have Red Hat QE run their tests.
> 
> 
> Thanks.
> 
> --
> David Marchand



RE: [PATCH v5] net/i40e: rework maximum frame size configuration

2023-02-02 Thread Su, Simei
Hi David,

> -Original Message-
> From: David Marchand 
> Sent: Thursday, February 2, 2023 9:24 PM
> To: Su, Simei 
> Cc: Xing, Beilei ; Zhang, Yuying
> ; dev@dpdk.org; Zhang, Qi Z
> ; Yang, Qiming ;
> sta...@dpdk.org
> Subject: Re: [PATCH v5] net/i40e: rework maximum frame size configuration
> 
> On Thu, Feb 2, 2023 at 1:37 PM Simei Su  wrote:
> > @@ -2467,8 +2466,16 @@ i40e_dev_start(struct rte_eth_dev *dev)
> > "please call hierarchy_commit() "
> > "before starting the port");
> >
> > -   max_frame_size = dev->data->mtu + I40E_ETH_OVERHEAD;
> > -   i40e_set_mac_max_frame(dev, max_frame_size);
> > +   max_frame_size = dev->data->mtu ?
> > +   dev->data->mtu + I40E_ETH_OVERHEAD :
> > +   I40E_FRAME_SIZE_MAX;
> > +
> > +   /* Set the max frame size to HW*/
> > +   ret = i40e_aq_set_mac_config(hw, max_frame_size, TRUE, false, 0,
> NULL);
> > +   if (ret) {
> > +   PMD_DRV_LOG(ERR, "Fail to set mac config");
> > +   return ret;
> > +   }
> 
> Reading this patch again.
> 
> Returning here seems incorrect as we leave rx/tx queue in started state.
> Don't we need to jump to tx_err label on error?

Yes, it's my fault to return here incorrectly. I will modify it in next version.

Thanks,
Simei

> 
> >
> > return I40E_SUCCESS;
> >
> 
> --
> David Marchand



[PATCH v2] common/idpf: refactor single queue Tx function

2023-09-04 Thread Simei Su
This patch replaces flex Tx descriptor with base Tx descriptor to align
with kernel driver practice.

Signed-off-by: Simei Su 
---
v2:
* Refine commit title and commit log.
* Remove redundant definition.
* Modify base mode context TSO descriptor.

 drivers/common/idpf/idpf_common_rxtx.c| 76 +--
 drivers/common/idpf/idpf_common_rxtx.h|  2 +-
 drivers/common/idpf/idpf_common_rxtx_avx512.c | 37 +
 drivers/net/idpf/idpf_rxtx.c  |  2 +-
 4 files changed, 73 insertions(+), 44 deletions(-)

diff --git a/drivers/common/idpf/idpf_common_rxtx.c 
b/drivers/common/idpf/idpf_common_rxtx.c
index fc87e3e243..01a8685ea3 100644
--- a/drivers/common/idpf/idpf_common_rxtx.c
+++ b/drivers/common/idpf/idpf_common_rxtx.c
@@ -276,14 +276,14 @@ idpf_qc_single_tx_queue_reset(struct idpf_tx_queue *txq)
}
 
txe = txq->sw_ring;
-   size = sizeof(struct idpf_flex_tx_desc) * txq->nb_tx_desc;
+   size = sizeof(struct idpf_base_tx_desc) * txq->nb_tx_desc;
for (i = 0; i < size; i++)
((volatile char *)txq->tx_ring)[i] = 0;
 
prev = (uint16_t)(txq->nb_tx_desc - 1);
for (i = 0; i < txq->nb_tx_desc; i++) {
-   txq->tx_ring[i].qw1.cmd_dtype =
-   rte_cpu_to_le_16(IDPF_TX_DESC_DTYPE_DESC_DONE);
+   txq->tx_ring[i].qw1 =
+   rte_cpu_to_le_64(IDPF_TX_DESC_DTYPE_DESC_DONE);
txe[i].mbuf =  NULL;
txe[i].last_id = i;
txe[prev].next_id = i;
@@ -823,6 +823,37 @@ idpf_calc_context_desc(uint64_t flags)
return 0;
 }
 
+/* set TSO context descriptor for single queue
+ */
+static inline void
+idpf_set_singleq_tso_ctx(struct rte_mbuf *mbuf,
+   union idpf_tx_offload tx_offload,
+   volatile struct idpf_base_tx_ctx_desc *ctx_desc)
+{
+   uint32_t tso_len;
+   uint8_t hdr_len;
+   uint64_t qw1;
+
+   if (tx_offload.l4_len == 0) {
+   TX_LOG(DEBUG, "L4 length set to 0");
+   return;
+   }
+
+   hdr_len = tx_offload.l2_len +
+   tx_offload.l3_len +
+   tx_offload.l4_len;
+   tso_len = mbuf->pkt_len - hdr_len;
+   qw1 = (uint64_t)IDPF_TX_DESC_DTYPE_CTX;
+
+   qw1 |= IDPF_TX_CTX_DESC_TSO << IDPF_TXD_CTX_QW1_CMD_S;
+   qw1 |= ((uint64_t)tso_len << IDPF_TXD_CTX_QW1_TSO_LEN_S) &
+   IDPF_TXD_CTX_QW1_TSO_LEN_M;
+   qw1 |= ((uint64_t)mbuf->tso_segsz << IDPF_TXD_CTX_QW1_MSS_S) &
+   IDPF_TXD_CTX_QW1_MSS_M;
+
+   ctx_desc->qw1 = rte_cpu_to_le_64(qw1);
+}
+
 /* set TSO context descriptor
  */
 static inline void
@@ -1307,17 +1338,16 @@ idpf_xmit_cleanup(struct idpf_tx_queue *txq)
uint16_t nb_tx_to_clean;
uint16_t i;
 
-   volatile struct idpf_flex_tx_desc *txd = txq->tx_ring;
+   volatile struct idpf_base_tx_desc *txd = txq->tx_ring;
 
desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->rs_thresh);
if (desc_to_clean_to >= nb_tx_desc)
desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
 
desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
-   /* In the writeback Tx desccriptor, the only significant fields are the 
4-bit DTYPE */
-   if ((txd[desc_to_clean_to].qw1.cmd_dtype &
-rte_cpu_to_le_16(IDPF_TXD_QW1_DTYPE_M)) !=
-   rte_cpu_to_le_16(IDPF_TX_DESC_DTYPE_DESC_DONE)) {
+   if ((txd[desc_to_clean_to].qw1 &
+rte_cpu_to_le_64(IDPF_TXD_QW1_DTYPE_M)) !=
+   rte_cpu_to_le_64(IDPF_TX_DESC_DTYPE_DESC_DONE)) {
TX_LOG(DEBUG, "TX descriptor %4u is not done "
   "(port=%d queue=%d)", desc_to_clean_to,
   txq->port_id, txq->queue_id);
@@ -1331,10 +1361,7 @@ idpf_xmit_cleanup(struct idpf_tx_queue *txq)
nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
last_desc_cleaned);
 
-   txd[desc_to_clean_to].qw1.cmd_dtype = 0;
-   txd[desc_to_clean_to].qw1.buf_size = 0;
-   for (i = 0; i < RTE_DIM(txd[desc_to_clean_to].qw1.flex.raw); i++)
-   txd[desc_to_clean_to].qw1.flex.raw[i] = 0;
+   txd[desc_to_clean_to].qw1 = 0;
 
txq->last_desc_cleaned = desc_to_clean_to;
txq->nb_free = (uint16_t)(txq->nb_free + nb_tx_to_clean);
@@ -1347,8 +1374,8 @@ uint16_t
 idpf_dp_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
  uint16_t nb_pkts)
 {
-   volatile struct idpf_flex_tx_desc *txd;
-   volatile struct idpf_flex_tx_desc *txr;
+   volatile struct idpf_base_tx_desc *txd;
+   volatile struct idpf_base_tx_desc *txr;
union idpf_tx_offload tx_offload = {0};
struct idpf_tx_entry *txe, *txn;
struct idpf_tx_entry *sw_ring;
@@ -13

[PATCH v3] common/idpf: refactor single queue Tx function

2023-09-08 Thread Simei Su
This patch replaces flex Tx descriptor with base Tx descriptor to align
with kernel driver practice.

Signed-off-by: Simei Su 
---
v3:
* Change context TSO descriptor from base mode to flex mode.

v2:
* Refine commit title and commit log.
* Remove redundant definition.
* Modify base mode context TSO descriptor.

 drivers/common/idpf/idpf_common_rxtx.c| 39 +--
 drivers/common/idpf/idpf_common_rxtx.h|  2 +-
 drivers/common/idpf/idpf_common_rxtx_avx512.c | 37 +-
 drivers/net/idpf/idpf_rxtx.c  |  2 +-
 4 files changed, 39 insertions(+), 41 deletions(-)

diff --git a/drivers/common/idpf/idpf_common_rxtx.c 
b/drivers/common/idpf/idpf_common_rxtx.c
index fc87e3e243..e6d2486272 100644
--- a/drivers/common/idpf/idpf_common_rxtx.c
+++ b/drivers/common/idpf/idpf_common_rxtx.c
@@ -276,14 +276,14 @@ idpf_qc_single_tx_queue_reset(struct idpf_tx_queue *txq)
}
 
txe = txq->sw_ring;
-   size = sizeof(struct idpf_flex_tx_desc) * txq->nb_tx_desc;
+   size = sizeof(struct idpf_base_tx_desc) * txq->nb_tx_desc;
for (i = 0; i < size; i++)
((volatile char *)txq->tx_ring)[i] = 0;
 
prev = (uint16_t)(txq->nb_tx_desc - 1);
for (i = 0; i < txq->nb_tx_desc; i++) {
-   txq->tx_ring[i].qw1.cmd_dtype =
-   rte_cpu_to_le_16(IDPF_TX_DESC_DTYPE_DESC_DONE);
+   txq->tx_ring[i].qw1 =
+   rte_cpu_to_le_64(IDPF_TX_DESC_DTYPE_DESC_DONE);
txe[i].mbuf =  NULL;
txe[i].last_id = i;
txe[prev].next_id = i;
@@ -1307,17 +1307,16 @@ idpf_xmit_cleanup(struct idpf_tx_queue *txq)
uint16_t nb_tx_to_clean;
uint16_t i;
 
-   volatile struct idpf_flex_tx_desc *txd = txq->tx_ring;
+   volatile struct idpf_base_tx_desc *txd = txq->tx_ring;
 
desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->rs_thresh);
if (desc_to_clean_to >= nb_tx_desc)
desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
 
desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
-   /* In the writeback Tx desccriptor, the only significant fields are the 
4-bit DTYPE */
-   if ((txd[desc_to_clean_to].qw1.cmd_dtype &
-rte_cpu_to_le_16(IDPF_TXD_QW1_DTYPE_M)) !=
-   rte_cpu_to_le_16(IDPF_TX_DESC_DTYPE_DESC_DONE)) {
+   if ((txd[desc_to_clean_to].qw1 &
+rte_cpu_to_le_64(IDPF_TXD_QW1_DTYPE_M)) !=
+   rte_cpu_to_le_64(IDPF_TX_DESC_DTYPE_DESC_DONE)) {
TX_LOG(DEBUG, "TX descriptor %4u is not done "
   "(port=%d queue=%d)", desc_to_clean_to,
   txq->port_id, txq->queue_id);
@@ -1331,10 +1330,7 @@ idpf_xmit_cleanup(struct idpf_tx_queue *txq)
nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
last_desc_cleaned);
 
-   txd[desc_to_clean_to].qw1.cmd_dtype = 0;
-   txd[desc_to_clean_to].qw1.buf_size = 0;
-   for (i = 0; i < RTE_DIM(txd[desc_to_clean_to].qw1.flex.raw); i++)
-   txd[desc_to_clean_to].qw1.flex.raw[i] = 0;
+   txd[desc_to_clean_to].qw1 = 0;
 
txq->last_desc_cleaned = desc_to_clean_to;
txq->nb_free = (uint16_t)(txq->nb_free + nb_tx_to_clean);
@@ -1347,8 +1343,8 @@ uint16_t
 idpf_dp_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
  uint16_t nb_pkts)
 {
-   volatile struct idpf_flex_tx_desc *txd;
-   volatile struct idpf_flex_tx_desc *txr;
+   volatile struct idpf_base_tx_desc *txd;
+   volatile struct idpf_base_tx_desc *txr;
union idpf_tx_offload tx_offload = {0};
struct idpf_tx_entry *txe, *txn;
struct idpf_tx_entry *sw_ring;
@@ -1356,6 +1352,7 @@ idpf_dp_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf 
**tx_pkts,
struct rte_mbuf *tx_pkt;
struct rte_mbuf *m_seg;
uint64_t buf_dma_addr;
+   uint32_t td_offset;
uint64_t ol_flags;
uint16_t tx_last;
uint16_t nb_used;
@@ -1382,6 +1379,7 @@ idpf_dp_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf 
**tx_pkts,
 
for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
td_cmd = 0;
+   td_offset = 0;
 
tx_pkt = *tx_pkts++;
RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
@@ -1462,9 +1460,10 @@ idpf_dp_singleq_xmit_pkts(void *tx_queue, struct 
rte_mbuf **tx_pkts,
slen = m_seg->data_len;
buf_dma_addr = rte_mbuf_data_iova(m_seg);
txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
-   txd->qw1.buf_size = slen;
-   txd->qw1.cmd_dtype = 
rte_cpu_to_le_16(IDPF_TX_DESC_DTYPE_FLEX_DATA <<
- 

[PATCH v4 0/3] refactor single queue Tx data path

2023-09-13 Thread Simei Su
1. Refine single queue Tx data path for idpf common module.
2. Refine Tx queue setup for idpf pmd.
3. Refine Tx queue setup for cpfl pmd.

v4:
* Split one patch into patchset.
* Refine commit title and commit log.

v3:
* Change context TSO descriptor from base mode to flex mode.

v2:
* Refine commit title and commit log.
* Remove redundant definition.
* Modify base mode context TSO descriptor.

Simei Su (3):
  common/idpf: refactor single queue Tx data path
  net/idpf: refine Tx queue setup
  net/cpfl: refine Tx queue setup

 drivers/common/idpf/idpf_common_rxtx.c| 39 +--
 drivers/common/idpf/idpf_common_rxtx.h|  2 +-
 drivers/common/idpf/idpf_common_rxtx_avx512.c | 37 +-
 drivers/net/cpfl/cpfl_rxtx.c  |  2 +-
 drivers/net/idpf/idpf_rxtx.c  |  2 +-
 5 files changed, 40 insertions(+), 42 deletions(-)

-- 
2.25.1



[PATCH v4 2/3] net/idpf: refine Tx queue setup

2023-09-13 Thread Simei Su
This patch refines Tx single queue setup to align with Tx data path.

Signed-off-by: Simei Su 
Acked-by: Wenjun Wu 
---
 drivers/net/idpf/idpf_rxtx.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c
index 3e3d81ca6d..64f2235580 100644
--- a/drivers/net/idpf/idpf_rxtx.c
+++ b/drivers/net/idpf/idpf_rxtx.c
@@ -74,7 +74,7 @@ idpf_dma_zone_reserve(struct rte_eth_dev *dev, uint16_t 
queue_idx,
ring_size = RTE_ALIGN(len * sizeof(struct 
idpf_flex_tx_sched_desc),
  IDPF_DMA_MEM_ALIGN);
else
-   ring_size = RTE_ALIGN(len * sizeof(struct 
idpf_flex_tx_desc),
+   ring_size = RTE_ALIGN(len * sizeof(struct 
idpf_base_tx_desc),
  IDPF_DMA_MEM_ALIGN);
rte_memcpy(ring_name, "idpf Tx ring", sizeof("idpf Tx ring"));
break;
-- 
2.25.1



[PATCH v4 1/3] common/idpf: refactor single queue Tx data path

2023-09-13 Thread Simei Su
Currently, single queue Tx data path uses flex Tx data descriptor
which is changed in the latest idpf spec. This patch replaces flex
Tx data descriptor with base Tx data descriptor for single queue Tx
data path.

Signed-off-by: Simei Su 
Acked-by: Wenjun Wu 
---
 drivers/common/idpf/idpf_common_rxtx.c| 39 +--
 drivers/common/idpf/idpf_common_rxtx.h|  2 +-
 drivers/common/idpf/idpf_common_rxtx_avx512.c | 37 +-
 3 files changed, 38 insertions(+), 40 deletions(-)

diff --git a/drivers/common/idpf/idpf_common_rxtx.c 
b/drivers/common/idpf/idpf_common_rxtx.c
index fc87e3e243..e6d2486272 100644
--- a/drivers/common/idpf/idpf_common_rxtx.c
+++ b/drivers/common/idpf/idpf_common_rxtx.c
@@ -276,14 +276,14 @@ idpf_qc_single_tx_queue_reset(struct idpf_tx_queue *txq)
}
 
txe = txq->sw_ring;
-   size = sizeof(struct idpf_flex_tx_desc) * txq->nb_tx_desc;
+   size = sizeof(struct idpf_base_tx_desc) * txq->nb_tx_desc;
for (i = 0; i < size; i++)
((volatile char *)txq->tx_ring)[i] = 0;
 
prev = (uint16_t)(txq->nb_tx_desc - 1);
for (i = 0; i < txq->nb_tx_desc; i++) {
-   txq->tx_ring[i].qw1.cmd_dtype =
-   rte_cpu_to_le_16(IDPF_TX_DESC_DTYPE_DESC_DONE);
+   txq->tx_ring[i].qw1 =
+   rte_cpu_to_le_64(IDPF_TX_DESC_DTYPE_DESC_DONE);
txe[i].mbuf =  NULL;
txe[i].last_id = i;
txe[prev].next_id = i;
@@ -1307,17 +1307,16 @@ idpf_xmit_cleanup(struct idpf_tx_queue *txq)
uint16_t nb_tx_to_clean;
uint16_t i;
 
-   volatile struct idpf_flex_tx_desc *txd = txq->tx_ring;
+   volatile struct idpf_base_tx_desc *txd = txq->tx_ring;
 
desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->rs_thresh);
if (desc_to_clean_to >= nb_tx_desc)
desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
 
desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
-   /* In the writeback Tx desccriptor, the only significant fields are the 
4-bit DTYPE */
-   if ((txd[desc_to_clean_to].qw1.cmd_dtype &
-rte_cpu_to_le_16(IDPF_TXD_QW1_DTYPE_M)) !=
-   rte_cpu_to_le_16(IDPF_TX_DESC_DTYPE_DESC_DONE)) {
+   if ((txd[desc_to_clean_to].qw1 &
+rte_cpu_to_le_64(IDPF_TXD_QW1_DTYPE_M)) !=
+   rte_cpu_to_le_64(IDPF_TX_DESC_DTYPE_DESC_DONE)) {
TX_LOG(DEBUG, "TX descriptor %4u is not done "
   "(port=%d queue=%d)", desc_to_clean_to,
   txq->port_id, txq->queue_id);
@@ -1331,10 +1330,7 @@ idpf_xmit_cleanup(struct idpf_tx_queue *txq)
nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
last_desc_cleaned);
 
-   txd[desc_to_clean_to].qw1.cmd_dtype = 0;
-   txd[desc_to_clean_to].qw1.buf_size = 0;
-   for (i = 0; i < RTE_DIM(txd[desc_to_clean_to].qw1.flex.raw); i++)
-   txd[desc_to_clean_to].qw1.flex.raw[i] = 0;
+   txd[desc_to_clean_to].qw1 = 0;
 
txq->last_desc_cleaned = desc_to_clean_to;
txq->nb_free = (uint16_t)(txq->nb_free + nb_tx_to_clean);
@@ -1347,8 +1343,8 @@ uint16_t
 idpf_dp_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
  uint16_t nb_pkts)
 {
-   volatile struct idpf_flex_tx_desc *txd;
-   volatile struct idpf_flex_tx_desc *txr;
+   volatile struct idpf_base_tx_desc *txd;
+   volatile struct idpf_base_tx_desc *txr;
union idpf_tx_offload tx_offload = {0};
struct idpf_tx_entry *txe, *txn;
struct idpf_tx_entry *sw_ring;
@@ -1356,6 +1352,7 @@ idpf_dp_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf 
**tx_pkts,
struct rte_mbuf *tx_pkt;
struct rte_mbuf *m_seg;
uint64_t buf_dma_addr;
+   uint32_t td_offset;
uint64_t ol_flags;
uint16_t tx_last;
uint16_t nb_used;
@@ -1382,6 +1379,7 @@ idpf_dp_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf 
**tx_pkts,
 
for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
td_cmd = 0;
+   td_offset = 0;
 
tx_pkt = *tx_pkts++;
RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
@@ -1462,9 +1460,10 @@ idpf_dp_singleq_xmit_pkts(void *tx_queue, struct 
rte_mbuf **tx_pkts,
slen = m_seg->data_len;
buf_dma_addr = rte_mbuf_data_iova(m_seg);
txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
-   txd->qw1.buf_size = slen;
-   txd->qw1.cmd_dtype = 
rte_cpu_to_le_16(IDPF_TX_DESC_DTYPE_FLEX_DATA <<
- 
IDPF_FLEX_TXD_QW1_DTYPE_S);
+   txd->qw1 = rte_cpu_to_le_64(IDPF_TX_DESC_DTYPE_DATA |
+  

[PATCH v4 3/3] net/cpfl: refine Tx queue setup

2023-09-13 Thread Simei Su
This patch refines Tx single queue setup to align with Tx data path.

Signed-off-by: Simei Su 
Acked-by: Wenjun Wu 
---
 drivers/net/cpfl/cpfl_rxtx.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index 2ef6871a85..ab8bec4645 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -135,7 +135,7 @@ cpfl_dma_zone_reserve(struct rte_eth_dev *dev, uint16_t 
queue_idx,
ring_size = RTE_ALIGN(len * sizeof(struct 
idpf_flex_tx_sched_desc),
  CPFL_DMA_MEM_ALIGN);
else
-   ring_size = RTE_ALIGN(len * sizeof(struct 
idpf_flex_tx_desc),
+   ring_size = RTE_ALIGN(len * sizeof(struct 
idpf_base_tx_desc),
  CPFL_DMA_MEM_ALIGN);
memcpy(ring_name, "cpfl Tx ring", sizeof("cpfl Tx ring"));
break;
-- 
2.25.1



[PATCH v5] common/idpf: refactor single queue Tx data path

2023-09-13 Thread Simei Su
Currently, single queue Tx data path uses flex Tx data
descriptor(DTYPE3) which is removed in the latest idpf spec.
This patch replaces flex Tx data descriptor with base Tx data
descriptor for single queue Tx data path and refines Tx single
queue setup to align with Tx data path.

Signed-off-by: Simei Su 
Acked-by: Wenjun Wu 
Acked-by: Beilei Xing 
---
 drivers/common/idpf/idpf_common_rxtx.c| 39 +--
 drivers/common/idpf/idpf_common_rxtx.h|  2 +-
 drivers/common/idpf/idpf_common_rxtx_avx512.c | 37 +-
 drivers/net/cpfl/cpfl_rxtx.c  |  2 +-
 drivers/net/idpf/idpf_rxtx.c  |  2 +-
 5 files changed, 40 insertions(+), 42 deletions(-)

diff --git a/drivers/common/idpf/idpf_common_rxtx.c 
b/drivers/common/idpf/idpf_common_rxtx.c
index fc87e3e243..e6d2486272 100644
--- a/drivers/common/idpf/idpf_common_rxtx.c
+++ b/drivers/common/idpf/idpf_common_rxtx.c
@@ -276,14 +276,14 @@ idpf_qc_single_tx_queue_reset(struct idpf_tx_queue *txq)
}
 
txe = txq->sw_ring;
-   size = sizeof(struct idpf_flex_tx_desc) * txq->nb_tx_desc;
+   size = sizeof(struct idpf_base_tx_desc) * txq->nb_tx_desc;
for (i = 0; i < size; i++)
((volatile char *)txq->tx_ring)[i] = 0;
 
prev = (uint16_t)(txq->nb_tx_desc - 1);
for (i = 0; i < txq->nb_tx_desc; i++) {
-   txq->tx_ring[i].qw1.cmd_dtype =
-   rte_cpu_to_le_16(IDPF_TX_DESC_DTYPE_DESC_DONE);
+   txq->tx_ring[i].qw1 =
+   rte_cpu_to_le_64(IDPF_TX_DESC_DTYPE_DESC_DONE);
txe[i].mbuf =  NULL;
txe[i].last_id = i;
txe[prev].next_id = i;
@@ -1307,17 +1307,16 @@ idpf_xmit_cleanup(struct idpf_tx_queue *txq)
uint16_t nb_tx_to_clean;
uint16_t i;
 
-   volatile struct idpf_flex_tx_desc *txd = txq->tx_ring;
+   volatile struct idpf_base_tx_desc *txd = txq->tx_ring;
 
desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->rs_thresh);
if (desc_to_clean_to >= nb_tx_desc)
desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
 
desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
-   /* In the writeback Tx desccriptor, the only significant fields are the 
4-bit DTYPE */
-   if ((txd[desc_to_clean_to].qw1.cmd_dtype &
-rte_cpu_to_le_16(IDPF_TXD_QW1_DTYPE_M)) !=
-   rte_cpu_to_le_16(IDPF_TX_DESC_DTYPE_DESC_DONE)) {
+   if ((txd[desc_to_clean_to].qw1 &
+rte_cpu_to_le_64(IDPF_TXD_QW1_DTYPE_M)) !=
+   rte_cpu_to_le_64(IDPF_TX_DESC_DTYPE_DESC_DONE)) {
TX_LOG(DEBUG, "TX descriptor %4u is not done "
   "(port=%d queue=%d)", desc_to_clean_to,
   txq->port_id, txq->queue_id);
@@ -1331,10 +1330,7 @@ idpf_xmit_cleanup(struct idpf_tx_queue *txq)
nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
last_desc_cleaned);
 
-   txd[desc_to_clean_to].qw1.cmd_dtype = 0;
-   txd[desc_to_clean_to].qw1.buf_size = 0;
-   for (i = 0; i < RTE_DIM(txd[desc_to_clean_to].qw1.flex.raw); i++)
-   txd[desc_to_clean_to].qw1.flex.raw[i] = 0;
+   txd[desc_to_clean_to].qw1 = 0;
 
txq->last_desc_cleaned = desc_to_clean_to;
txq->nb_free = (uint16_t)(txq->nb_free + nb_tx_to_clean);
@@ -1347,8 +1343,8 @@ uint16_t
 idpf_dp_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
  uint16_t nb_pkts)
 {
-   volatile struct idpf_flex_tx_desc *txd;
-   volatile struct idpf_flex_tx_desc *txr;
+   volatile struct idpf_base_tx_desc *txd;
+   volatile struct idpf_base_tx_desc *txr;
union idpf_tx_offload tx_offload = {0};
struct idpf_tx_entry *txe, *txn;
struct idpf_tx_entry *sw_ring;
@@ -1356,6 +1352,7 @@ idpf_dp_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf 
**tx_pkts,
struct rte_mbuf *tx_pkt;
struct rte_mbuf *m_seg;
uint64_t buf_dma_addr;
+   uint32_t td_offset;
uint64_t ol_flags;
uint16_t tx_last;
uint16_t nb_used;
@@ -1382,6 +1379,7 @@ idpf_dp_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf 
**tx_pkts,
 
for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
td_cmd = 0;
+   td_offset = 0;
 
tx_pkt = *tx_pkts++;
RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
@@ -1462,9 +1460,10 @@ idpf_dp_singleq_xmit_pkts(void *tx_queue, struct 
rte_mbuf **tx_pkts,
slen = m_seg->data_len;
buf_dma_addr = rte_mbuf_data_iova(m_seg);
txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
-   txd->qw1.buf_size = slen;
-   txd->qw1.cmd_dtype = 
rte_cpu_to_le_16(IDPF_TX_DESC_D

[PATCH v3 00/17] update idpf base code

2023-09-14 Thread Simei Su
This patch set updates idpf base code.

v3:
* Fix coding style issue.
* Modify unexpected error in the update version patch.

v2:
* Add two patches for share code update.
* Add version update.
* Fix coding style issue.

Simei Su (17):
  common/idpf/base: enable support for physical port stats
  common/idpf/base: add miss completion capabilities
  common/idpf/base: initial PTP support
  common/idpf/base: remove mailbox registers
  common/idpf/base: add some adi specific fields
  common/idpf/base: add necessary check
  common/idpf/base: add union for SW cookie fields in ctlq msg
  common/idpf/base: define non-flexible size structure for ADI
  common/idpf/base: use local pointer before updating 'CQ out'
  common/idpf/base: use 'void' return type
  common/idpf/base: refactor descriptor 'ret val' stripping
  common/idpf/base: refine comments and alignment
  common/idpf/base: use GENMASK macro
  common/idpf/base: use 'type functionname(args)' style
  common/idpf/base: don't declare union with 'flex'
  common/idpf/base: remove unused Tx descriptor types
  common/idpf/base: update version

 .mailmap  |   7 +
 drivers/common/idpf/base/README   |   2 +-
 drivers/common/idpf/base/idpf_common.c|  10 +-
 drivers/common/idpf/base/idpf_controlq.c  |  64 ++--
 drivers/common/idpf/base/idpf_controlq_api.h  |  17 +-
 .../common/idpf/base/idpf_controlq_setup.c|   5 +-
 drivers/common/idpf/base/idpf_lan_pf_regs.h   |  33 +-
 drivers/common/idpf/base/idpf_lan_txrx.h  | 285 +---
 drivers/common/idpf/base/idpf_lan_vf_regs.h   |  41 ++-
 drivers/common/idpf/base/idpf_osdep.h |   7 +
 drivers/common/idpf/base/idpf_prototype.h |   2 +-
 drivers/common/idpf/base/siov_regs.h  |  13 +-
 drivers/common/idpf/base/virtchnl2.h  | 303 --
 13 files changed, 462 insertions(+), 327 deletions(-)

-- 
2.25.1



[PATCH v3 01/17] common/idpf/base: enable support for physical port stats

2023-09-14 Thread Simei Su
Add support to indicate physical port representor and query its statistics.

Signed-off-by: Zhenning Xiao 
Signed-off-by: Jayaprakash Shanmugam 
Signed-off-by: Simei Su 
---
 .mailmap |  2 +
 drivers/common/idpf/base/virtchnl2.h | 80 +++-
 2 files changed, 81 insertions(+), 1 deletion(-)

diff --git a/.mailmap b/.mailmap
index 4dac53011b..3dfdd81797 100644
--- a/.mailmap
+++ b/.mailmap
@@ -1639,3 +1639,5 @@ Ziye Yang 
 Zoltan Kiss  
 Zorik Machulsky 
 Zyta Szpak   
+Jayaprakash Shanmugam 
+Zhenning Xiao 
diff --git a/drivers/common/idpf/base/virtchnl2.h 
b/drivers/common/idpf/base/virtchnl2.h
index 594bc26b8c..cd47444835 100644
--- a/drivers/common/idpf/base/virtchnl2.h
+++ b/drivers/common/idpf/base/virtchnl2.h
@@ -97,6 +97,7 @@
 #defineVIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE537
 #defineVIRTCHNL2_OP_ADD_QUEUE_GROUPS   538
 #defineVIRTCHNL2_OP_DEL_QUEUE_GROUPS   539
+#defineVIRTCHNL2_OP_GET_PORT_STATS 540
 
 #define VIRTCHNL2_RDMA_INVALID_QUEUE_IDX   0x
 
@@ -582,6 +583,9 @@ struct virtchnl2_queue_reg_chunks {
 
 VIRTCHNL2_CHECK_STRUCT_LEN(40, virtchnl2_queue_reg_chunks);
 
+/* VIRTCHNL2_VPORT_FLAGS */
+#define VIRTCHNL2_VPORT_UPLINK_PORTBIT(0)
+
 #define VIRTCHNL2_ETH_LENGTH_OF_ADDRESS  6
 
 /* VIRTCHNL2_OP_CREATE_VPORT
@@ -620,7 +624,8 @@ struct virtchnl2_create_vport {
__le16 max_mtu;
__le32 vport_id;
u8 default_mac_addr[VIRTCHNL2_ETH_LENGTH_OF_ADDRESS];
-   __le16 pad;
+   /* see VIRTCHNL2_VPORT_FLAGS definitions */
+   __le16 vport_flags;
/* see VIRTCHNL2_RX_DESC_IDS definitions */
__le64 rx_desc_ids;
/* see VIRTCHNL2_TX_DESC_IDS definitions */
@@ -1159,6 +1164,74 @@ struct virtchnl2_vport_stats {
 
 VIRTCHNL2_CHECK_STRUCT_LEN(128, virtchnl2_vport_stats);
 
+/* physical port statistics */
+struct virtchnl2_phy_port_stats {
+   __le64 rx_bytes;
+   __le64 rx_unicast_pkts;
+   __le64 rx_multicast_pkts;
+   __le64 rx_broadcast_pkts;
+   __le64 rx_size_64_pkts;
+   __le64 rx_size_127_pkts;
+   __le64 rx_size_255_pkts;
+   __le64 rx_size_511_pkts;
+   __le64 rx_size_1023_pkts;
+   __le64 rx_size_1518_pkts;
+   __le64 rx_size_jumbo_pkts;
+   __le64 rx_xon_events;
+   __le64 rx_xoff_events;
+   __le64 rx_undersized_pkts;
+   __le64 rx_fragmented_pkts;
+   __le64 rx_oversized_pkts;
+   __le64 rx_jabber_pkts;
+   __le64 rx_csum_errors;
+   __le64 rx_length_errors;
+   __le64 rx_dropped_pkts;
+   __le64 rx_crc_errors;
+   /* Frames with length < 64 and a bad CRC */
+   __le64 rx_runt_errors;
+   __le64 rx_illegal_bytes;
+   __le64 rx_total_pkts;
+   u8 rx_reserved[128];
+
+   __le64 tx_bytes;
+   __le64 tx_unicast_pkts;
+   __le64 tx_multicast_pkts;
+   __le64 tx_broadcast_pkts;
+   __le64 tx_errors;
+   __le64 tx_timeout_events;
+   __le64 tx_size_64_pkts;
+   __le64 tx_size_127_pkts;
+   __le64 tx_size_255_pkts;
+   __le64 tx_size_511_pkts;
+   __le64 tx_size_1023_pkts;
+   __le64 tx_size_1518_pkts;
+   __le64 tx_size_jumbo_pkts;
+   __le64 tx_xon_events;
+   __le64 tx_xoff_events;
+   __le64 tx_dropped_link_down_pkts;
+   __le64 tx_total_pkts;
+   u8 tx_reserved[128];
+   __le64 mac_local_faults;
+   __le64 mac_remote_faults;
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(600, virtchnl2_phy_port_stats);
+
+/* VIRTCHNL2_OP_GET_PORT_STATS
+ * PF/VF sends this message to CP to get the updated stats by specifying the
+ * vport_id. CP responds with stats in struct virtchnl2_port_stats that
+ * includes both physical port as well as vport statistics.
+ */
+struct virtchnl2_port_stats {
+   __le32 vport_id;
+   u8 pad[4];
+
+   struct virtchnl2_phy_port_stats phy_port_stats;
+   struct virtchnl2_vport_stats virt_port_stats;
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(736, virtchnl2_port_stats);
+
 /* VIRTCHNL2_OP_EVENT
  * CP sends this message to inform the PF/VF driver of events that may affect
  * it. No direct response is expected from the driver, though it may generate
@@ -1384,6 +1457,8 @@ static inline const char *virtchnl2_op_str(__le32 
v_opcode)
return "VIRTCHNL2_OP_ADD_QUEUE_GROUPS";
case VIRTCHNL2_OP_DEL_QUEUE_GROUPS:
return "VIRTCHNL2_OP_DEL_QUEUE_GROUPS";
+   case VIRTCHNL2_OP_GET_PORT_STATS:
+   return "VIRTCHNL2_OP_GET_PORT_STATS";
default:
return "Unsupported (update virtchnl2.h)";
}
@@ -1648,6 +1723,9 @@ virtchnl2_vc_validate_vf_msg(__rte_unused struct 
virtchnl2_version_info *ver, u3
case VIRTCHNL2_OP_GET_STATS:
valid_len = sizeof(struct virtchnl2_vport_stats);
break;
+   case VIRTCHNL2_OP_GET_PORT_STATS:
+   val

[PATCH v3 03/17] common/idpf/base: initial PTP support

2023-09-14 Thread Simei Su
Adding a few PTP capabilities to determine which PTP features are
enabled - legacy cross time, ptm, device clock control, PTP Tx
timestamp with direct registers access, PTP Tx timestamp using
virtchnl messages.

Creating structures and opcodes to support feautres introduced by
capabilities.

Signed-off-by: Milena Olech 
Signed-off-by: Simei Su 
---
 drivers/common/idpf/base/virtchnl2.h | 145 +++
 1 file changed, 145 insertions(+)

diff --git a/drivers/common/idpf/base/virtchnl2.h 
b/drivers/common/idpf/base/virtchnl2.h
index c49e4b943c..320430df6f 100644
--- a/drivers/common/idpf/base/virtchnl2.h
+++ b/drivers/common/idpf/base/virtchnl2.h
@@ -98,6 +98,9 @@
 #defineVIRTCHNL2_OP_ADD_QUEUE_GROUPS   538
 #defineVIRTCHNL2_OP_DEL_QUEUE_GROUPS   539
 #defineVIRTCHNL2_OP_GET_PORT_STATS 540
+   /* TimeSync opcodes */
+#defineVIRTCHNL2_OP_GET_PTP_CAPS   541
+#defineVIRTCHNL2_OP_GET_PTP_TX_TSTAMP_LATCHES  542
 
 #define VIRTCHNL2_RDMA_INVALID_QUEUE_IDX   0x
 
@@ -1395,6 +1398,112 @@ struct virtchnl2_promisc_info {
 
 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_promisc_info);
 
+/* VIRTCHNL2_PTP_CAPS
+ * PTP capabilities
+ */
+#define VIRTCHNL2_PTP_CAP_LEGACY_CROSS_TIMEBIT(0)
+#define VIRTCHNL2_PTP_CAP_PTM  BIT(1)
+#define VIRTCHNL2_PTP_CAP_DEVICE_CLOCK_CONTROL BIT(2)
+#define VIRTCHNL2_PTP_CAP_TX_TSTAMPS_DIRECTBIT(3)
+#defineVIRTCHNL2_PTP_CAP_TX_TSTAMPS_VIRTCHNL   BIT(4)
+
+/* Legacy cross time registers offsets */
+struct virtchnl2_ptp_legacy_cross_time_reg {
+   __le32 shadow_time_0;
+   __le32 shadow_time_l;
+   __le32 shadow_time_h;
+   __le32 cmd_sync;
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_legacy_cross_time_reg);
+
+/* PTM cross time registers offsets */
+struct virtchnl2_ptp_ptm_cross_time_reg {
+   __le32 art_l;
+   __le32 art_h;
+   __le32 cmd_sync;
+   u8 pad[4];
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_ptm_cross_time_reg);
+
+/* Registers needed to control the main clock */
+struct virtchnl2_ptp_device_clock_control {
+   __le32 cmd;
+   __le32 incval_l;
+   __le32 incval_h;
+   __le32 shadj_l;
+   __le32 shadj_h;
+   u8 pad[4];
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_ptp_device_clock_control);
+
+/* Structure that defines tx tstamp entry - index and register offset */
+struct virtchnl2_ptp_tx_tstamp_entry {
+   __le32 tx_latch_register_base;
+   __le32 tx_latch_register_offset;
+   u8 index;
+   u8 pad[7];
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_tx_tstamp_entry);
+
+/* Structure that defines tx tstamp entries - total number of latches
+ * and the array of entries.
+ */
+struct virtchnl2_ptp_tx_tstamp {
+   __le16 num_latches;
+   /* latch size expressed in bits */
+   __le16 latch_size;
+   u8 pad[4];
+   struct virtchnl2_ptp_tx_tstamp_entry ptp_tx_tstamp_entries[1];
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_ptp_tx_tstamp);
+
+/* VIRTCHNL2_OP_GET_PTP_CAPS
+ * PV/VF sends this message to negotiate PTP capabilities. CP updates bitmap
+ * with supported features and fulfills appropriate structures.
+ */
+struct virtchnl2_get_ptp_caps {
+   /* PTP capability bitmap */
+   /* see VIRTCHNL2_PTP_CAPS definitions */
+   __le32 ptp_caps;
+   u8 pad[4];
+
+   struct virtchnl2_ptp_legacy_cross_time_reg legacy_cross_time_reg;
+   struct virtchnl2_ptp_ptm_cross_time_reg ptm_cross_time_reg;
+   struct virtchnl2_ptp_device_clock_control device_clock_control;
+   struct virtchnl2_ptp_tx_tstamp tx_tstamp;
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(88, virtchnl2_get_ptp_caps);
+
+/* Structure that describes tx tstamp values, index and validity */
+struct virtchnl2_ptp_tx_tstamp_latch {
+   __le32 tstamp_h;
+   __le32 tstamp_l;
+   u8 index;
+   u8 valid;
+   u8 pad[6];
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_tx_tstamp_latch);
+
+/* VIRTCHNL2_OP_GET_PTP_TX_TSTAMP_LATCHES
+ * PF/VF sends this message to receive a specified number of timestamps
+ * entries.
+ */
+struct virtchnl2_ptp_tx_tstamp_latches {
+   __le16 num_latches;
+   /* latch size expressed in bits */
+   __le16 latch_size;
+   u8 pad[4];
+   struct virtchnl2_ptp_tx_tstamp_latch tstamp_latches[1];
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_ptp_tx_tstamp_latches);
 
 static inline const char *virtchnl2_op_str(__le32 v_opcode)
 {
@@ -1463,6 +1572,10 @@ static inline const char *virtchnl2_op_str(__le32 
v_opcode)
return "VIRTCHNL2_OP_DEL_QUEUE_GROUPS";
case VIRTCHNL2_OP_GET_PORT_STATS:
return "VIRTCHNL2_OP_GET_PORT_STATS";
+   case VIRTCHNL2_OP_GET_PTP_CAPS:
+   return "VIRTCHNL2_OP_GET_PTP_CAPS";
+   case VIRTCHNL2_OP_GET_PTP_TX_TSTAMP_LATCHES:
+   return "VIR

[PATCH v3 02/17] common/idpf/base: add miss completion capabilities

2023-09-14 Thread Simei Su
Add miss completion tag to other capabilities list, to indicate support for
detecting a miss completion based on the upper bit of the completion tag.

Signed-off-by: Josh Hay 
Signed-off-by: Simei Su 
---
 .mailmap | 1 +
 drivers/common/idpf/base/virtchnl2.h | 4 
 2 files changed, 5 insertions(+)

diff --git a/.mailmap b/.mailmap
index 3dfdd81797..91d8cca78f 100644
--- a/.mailmap
+++ b/.mailmap
@@ -1641,3 +1641,4 @@ Zorik Machulsky 
 Zyta Szpak   
 Jayaprakash Shanmugam 
 Zhenning Xiao 
+Josh Hay 
diff --git a/drivers/common/idpf/base/virtchnl2.h 
b/drivers/common/idpf/base/virtchnl2.h
index cd47444835..c49e4b943c 100644
--- a/drivers/common/idpf/base/virtchnl2.h
+++ b/drivers/common/idpf/base/virtchnl2.h
@@ -231,6 +231,10 @@
 #define VIRTCHNL2_CAP_RX_FLEX_DESC BIT(17)
 #define VIRTCHNL2_CAP_PTYPEBIT(18)
 #define VIRTCHNL2_CAP_LOOPBACK BIT(19)
+/* Enable miss completion types plus ability to detect a miss completion if a
+ * reserved bit is set in a standared completion's tag.
+ */
+#define VIRTCHNL2_CAP_MISS_COMPL_TAG   BIT(20)
 /* this must be the last capability */
 #define VIRTCHNL2_CAP_OEM  BIT(63)
 
-- 
2.25.1



[PATCH v3 04/17] common/idpf/base: remove mailbox registers

2023-09-14 Thread Simei Su
Removing mailbox register offsets as the mapping to device register
offsets are different between CVL and MEV (they are swapped out)
individual drivers will define the offsets based on how registers
are hardware addressed. However the it will begin with VDEV_MBX_START
offset.

Signed-off-by: Madhu Chittim 
Signed-off-by: Simei Su 
---
 .mailmap |  1 +
 drivers/common/idpf/base/siov_regs.h | 13 ++---
 2 files changed, 3 insertions(+), 11 deletions(-)

diff --git a/.mailmap b/.mailmap
index 91d8cca78f..d8782cd67e 100644
--- a/.mailmap
+++ b/.mailmap
@@ -1642,3 +1642,4 @@ Zyta Szpak   

 Jayaprakash Shanmugam 
 Zhenning Xiao 
 Josh Hay 
+Madhu Chittim 
diff --git a/drivers/common/idpf/base/siov_regs.h 
b/drivers/common/idpf/base/siov_regs.h
index fad329601a..7e1ae2e300 100644
--- a/drivers/common/idpf/base/siov_regs.h
+++ b/drivers/common/idpf/base/siov_regs.h
@@ -4,16 +4,6 @@
 #ifndef _SIOV_REGS_H_
 #define _SIOV_REGS_H_
 #define VDEV_MBX_START 0x2 /* Begin at 128KB */
-#define VDEV_MBX_ATQBAL(VDEV_MBX_START + 0x)
-#define VDEV_MBX_ATQBAH(VDEV_MBX_START + 0x0004)
-#define VDEV_MBX_ATQLEN(VDEV_MBX_START + 0x0008)
-#define VDEV_MBX_ATQH  (VDEV_MBX_START + 0x000C)
-#define VDEV_MBX_ATQT  (VDEV_MBX_START + 0x0010)
-#define VDEV_MBX_ARQBAL(VDEV_MBX_START + 0x0014)
-#define VDEV_MBX_ARQBAH(VDEV_MBX_START + 0x0018)
-#define VDEV_MBX_ARQLEN(VDEV_MBX_START + 0x001C)
-#define VDEV_MBX_ARQH  (VDEV_MBX_START + 0x0020)
-#define VDEV_MBX_ARQT  (VDEV_MBX_START + 0x0024)
 #define VDEV_GET_RSTAT 0x21000 /* 132KB for RSTAT */
 
 /* Begin at offset after 1MB (after 256 4k pages) */
@@ -43,5 +33,6 @@
 #define VDEV_INT_ITR_1(_i) (VDEV_INT_DYN_START + ((_i) * 0x1000) + 
0x08)
 #define VDEV_INT_ITR_2(_i) (VDEV_INT_DYN_START + ((_i) * 0x1000) + 
0x0C)
 
-/* Next offset to begin at 42MB (0x2A0) */
+#define SIOV_REG_BAR_SIZE   0x2A0
+/* Next offset to begin at 42MB + 4K (0x2A0 + 0x1000) */
 #endif /* _SIOV_REGS_H_ */
-- 
2.25.1



[PATCH v3 05/17] common/idpf/base: add some adi specific fields

2023-09-14 Thread Simei Su
a) Add maximum ADI count in capabilities message
b) Add PF side ADI index to create_adi message
c) Define another constant to indicate 'Function active' state of ADI

Signed-off-by: Shailendra Bhatnagar 
Signed-off-by: Simei Su 
---
 .mailmap | 1 +
 drivers/common/idpf/base/virtchnl2.h | 8 ++--
 2 files changed, 7 insertions(+), 2 deletions(-)

diff --git a/.mailmap b/.mailmap
index d8782cd67e..75d534c53d 100644
--- a/.mailmap
+++ b/.mailmap
@@ -1643,3 +1643,4 @@ Jayaprakash Shanmugam 
 Zhenning Xiao 
 Josh Hay 
 Madhu Chittim 
+Shailendra Bhatnagar 
diff --git a/drivers/common/idpf/base/virtchnl2.h 
b/drivers/common/idpf/base/virtchnl2.h
index 320430df6f..7a099f5148 100644
--- a/drivers/common/idpf/base/virtchnl2.h
+++ b/drivers/common/idpf/base/virtchnl2.h
@@ -294,6 +294,7 @@
 /* These messages are only sent to PF from CP */
 #define VIRTCHNL2_EVENT_START_RESET_ADI2
 #define VIRTCHNL2_EVENT_FINISH_RESET_ADI   3
+#define VIRTCHNL2_EVENT_ADI_ACTIVE 4
 
 /* VIRTCHNL2_QUEUE_TYPE
  * Transmit and Receive queue types are valid in legacy as well as split queue
@@ -547,7 +548,8 @@ struct virtchnl2_get_capabilities {
u8 max_sg_bufs_per_tx_pkt;
 
u8 reserved1;
-   __le16 pad1;
+   /* upper bound of number of ADIs supported */
+   __le16 max_adis;
 
/* version of Control Plane that is running */
__le16 oem_cp_ver_major;
@@ -1076,10 +1078,12 @@ struct virtchnl2_create_adi {
__le16 mbx_id;
/* PF sends mailbox vector id to CP */
__le16 mbx_vec_id;
+   /* PF populates this ADI index */
+   __le16 adi_index;
/* CP populates ADI id */
__le16 adi_id;
u8 reserved[64];
-   u8 pad[6];
+   u8 pad[4];
/* CP populates queue chunks */
struct virtchnl2_queue_reg_chunks chunks;
/* PF sends vector chunks to CP */
-- 
2.25.1



[PATCH v3 06/17] common/idpf/base: add necessary check

2023-09-14 Thread Simei Su
Add necessary check for payload and message buffer.

Signed-off-by: Julianx Grajkowski 
Signed-off-by: Simei Su 
---
 .mailmap   | 1 +
 drivers/common/idpf/base/idpf_common.c | 6 --
 2 files changed, 5 insertions(+), 2 deletions(-)

diff --git a/.mailmap b/.mailmap
index 75d534c53d..23aed53102 100644
--- a/.mailmap
+++ b/.mailmap
@@ -1644,3 +1644,4 @@ Zhenning Xiao 
 Josh Hay 
 Madhu Chittim 
 Shailendra Bhatnagar 
+Julianx Grajkowski 
diff --git a/drivers/common/idpf/base/idpf_common.c 
b/drivers/common/idpf/base/idpf_common.c
index fbf71416fd..9610916aa9 100644
--- a/drivers/common/idpf/base/idpf_common.c
+++ b/drivers/common/idpf/base/idpf_common.c
@@ -239,8 +239,10 @@ int idpf_clean_arq_element(struct idpf_hw *hw,
e->desc.ret_val = msg.status;
e->desc.datalen = msg.data_len;
if (msg.data_len > 0) {
-   if (!msg.ctx.indirect.payload)
-   return -EINVAL;
+   if (!msg.ctx.indirect.payload || !msg.ctx.indirect.payload->va 
||
+   !e->msg_buf) {
+   return -EFAULT;
+   }
e->buf_len = msg.data_len;
msg_data_len = msg.data_len;
idpf_memcpy(e->msg_buf, msg.ctx.indirect.payload->va, 
msg_data_len,
-- 
2.25.1



[PATCH v3 07/17] common/idpf/base: add union for SW cookie fields in ctlq msg

2023-09-14 Thread Simei Su
Instead of using something like a byte offset, we can add a union to the
struct here to enable direct addressing.

Signed-off-by: Alan Brady 
Signed-off-by: Simei Su 
---
 .mailmap | 1 +
 drivers/common/idpf/base/idpf_controlq_api.h | 5 +
 2 files changed, 6 insertions(+)

diff --git a/.mailmap b/.mailmap
index 23aed53102..2fcadb4e4c 100644
--- a/.mailmap
+++ b/.mailmap
@@ -1645,3 +1645,4 @@ Josh Hay 
 Madhu Chittim 
 Shailendra Bhatnagar 
 Julianx Grajkowski 
+Alan Brady 
diff --git a/drivers/common/idpf/base/idpf_controlq_api.h 
b/drivers/common/idpf/base/idpf_controlq_api.h
index 3780304256..f4e7b53ac9 100644
--- a/drivers/common/idpf/base/idpf_controlq_api.h
+++ b/drivers/common/idpf/base/idpf_controlq_api.h
@@ -77,6 +77,11 @@ struct idpf_ctlq_msg {
u8 context[IDPF_INDIRECT_CTX_SIZE];
struct idpf_dma_mem *payload;
} indirect;
+   struct {
+   u32 rsvd;
+   u16 data;
+   u16 flags;
+   } sw_cookie;
} ctx;
 };
 
-- 
2.25.1



[PATCH v3 08/17] common/idpf/base: define non-flexible size structure for ADI

2023-09-14 Thread Simei Su
Customer has a requirement to use the legacy fixed size, single chunk
structure for ADI creation - one chunk for queue and one chunk for vector.
This is described in detail in customer case
https://issuetracker.google.com/issues/270157802.

On the other hand, upstream code review patch has been posted with
flex-array definitions. To accommodate the old style, the single chunk
structures are being renamed so that merger of upstream patches with
current code does not impact the existing workflows of the customer.

a) Define virtchnl2_non_flex_queue_reg_chunks with a single chunk in it.
b) Define virtchnl2_non_flex_vector_chunks with a single chunk in it.
c) Rename and modify virtchnl2_create_adi to use the above 2 new structs.
New structure is virtchnl2_non_flex_create_adi.

Signed-off-by: Shailendra Bhatnagar 
Signed-off-by: Simei Su 
---
 drivers/common/idpf/base/virtchnl2.h | 66 ++--
 1 file changed, 43 insertions(+), 23 deletions(-)

diff --git a/drivers/common/idpf/base/virtchnl2.h 
b/drivers/common/idpf/base/virtchnl2.h
index 7a099f5148..a19bb193c9 100644
--- a/drivers/common/idpf/base/virtchnl2.h
+++ b/drivers/common/idpf/base/virtchnl2.h
@@ -89,8 +89,8 @@
 * VIRTCHNL2_OP_GET_PTYPE_INFO_RAW
 */
/* opcodes 529, 530, and 531 are reserved */
-#defineVIRTCHNL2_OP_CREATE_ADI 532
-#defineVIRTCHNL2_OP_DESTROY_ADI533
+#defineVIRTCHNL2_OP_NON_FLEX_CREATE_ADI532
+#defineVIRTCHNL2_OP_NON_FLEX_DESTROY_ADI   533
 #defineVIRTCHNL2_OP_LOOPBACK   534
 #defineVIRTCHNL2_OP_ADD_MAC_ADDR   535
 #defineVIRTCHNL2_OP_DEL_MAC_ADDR   536
@@ -1061,14 +1061,34 @@ struct virtchnl2_sriov_vfs_info {
 
 VIRTCHNL2_CHECK_STRUCT_LEN(4, virtchnl2_sriov_vfs_info);
 
-/* VIRTCHNL2_OP_CREATE_ADI
+/* structure to specify single chunk of queue */
+/* 'chunks' is fixed size(not flexible) and will be deprecated at some point */
+struct virtchnl2_non_flex_queue_reg_chunks {
+   __le16 num_chunks;
+   u8 reserved[6];
+   struct virtchnl2_queue_reg_chunk chunks[1];
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(40, virtchnl2_non_flex_queue_reg_chunks);
+
+/* structure to specify single chunk of interrupt vector */
+/* 'vchunks' is fixed size(not flexible) and will be deprecated at some point 
*/
+struct virtchnl2_non_flex_vector_chunks {
+   __le16 num_vchunks;
+   u8 reserved[14];
+   struct virtchnl2_vector_chunk vchunks[1];
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(48, virtchnl2_non_flex_vector_chunks);
+
+/* VIRTCHNL2_OP_NON_FLEX_CREATE_ADI
  * PF sends this message to CP to create ADI by filling in required
- * fields of virtchnl2_create_adi structure.
- * CP responds with the updated virtchnl2_create_adi structure containing the
- * necessary fields followed by chunks which in turn will have an array of
+ * fields of virtchnl2_non_flex_create_adi structure.
+ * CP responds with the updated virtchnl2_non_flex_create_adi structure 
containing
+ * the necessary fields followed by chunks which in turn will have an array of
  * num_chunks entries of virtchnl2_queue_chunk structures.
  */
-struct virtchnl2_create_adi {
+struct virtchnl2_non_flex_create_adi {
/* PF sends PASID to CP */
__le32 pasid;
/*
@@ -1085,24 +1105,24 @@ struct virtchnl2_create_adi {
u8 reserved[64];
u8 pad[4];
/* CP populates queue chunks */
-   struct virtchnl2_queue_reg_chunks chunks;
+   struct virtchnl2_non_flex_queue_reg_chunks chunks;
/* PF sends vector chunks to CP */
-   struct virtchnl2_vector_chunks vchunks;
+   struct virtchnl2_non_flex_vector_chunks vchunks;
 };
 
-VIRTCHNL2_CHECK_STRUCT_LEN(168, virtchnl2_create_adi);
+VIRTCHNL2_CHECK_STRUCT_LEN(168, virtchnl2_non_flex_create_adi);
 
-/* VIRTCHNL2_OP_DESTROY_ADI
+/* VIRTCHNL2_OP_NON_FLEX_DESTROY_ADI
  * PF sends this message to CP to destroy ADI by filling
  * in the adi_id in virtchnl2_destropy_adi structure.
  * CP responds with the status of the requested operation.
  */
-struct virtchnl2_destroy_adi {
+struct virtchnl2_non_flex_destroy_adi {
__le16 adi_id;
u8 reserved[2];
 };
 
-VIRTCHNL2_CHECK_STRUCT_LEN(4, virtchnl2_destroy_adi);
+VIRTCHNL2_CHECK_STRUCT_LEN(4, virtchnl2_non_flex_destroy_adi);
 
 /* Based on the descriptor type the PF supports, CP fills ptype_id_10 or
  * ptype_id_8 for flex and base descriptor respectively. If ptype_id_10 value
@@ -1566,10 +1586,10 @@ static inline const char *virtchnl2_op_str(__le32 
v_opcode)
return "VIRTCHNL2_OP_EVENT";
case VIRTCHNL2_OP_RESET_VF:
return "VIRTCHNL2_OP_RESET_VF";
-   case VIRTCHNL2_OP_CREATE_ADI:
-   return "VIRTCHNL2_OP_CREATE_ADI";
-   case VIRTCHNL2_OP_DESTROY_ADI:
-   return "VIRTCHNL2_OP_DEST

[PATCH v3 09/17] common/idpf/base: use local pointer before updating 'CQ out'

2023-09-14 Thread Simei Su
Instead of updating directly to 'cq_out' double pointer, use a
local pointer and update only when we return success.

Signed-off-by: Pavan Kumar Linga 
Signed-off-by: Simei Su 
---
 drivers/common/idpf/base/idpf_controlq.c | 43 +---
 1 file changed, 23 insertions(+), 20 deletions(-)

diff --git a/drivers/common/idpf/base/idpf_controlq.c 
b/drivers/common/idpf/base/idpf_controlq.c
index 6815153e1d..b84a1ea046 100644
--- a/drivers/common/idpf/base/idpf_controlq.c
+++ b/drivers/common/idpf/base/idpf_controlq.c
@@ -137,6 +137,7 @@ int idpf_ctlq_add(struct idpf_hw *hw,
  struct idpf_ctlq_create_info *qinfo,
  struct idpf_ctlq_info **cq_out)
 {
+   struct idpf_ctlq_info *cq;
bool is_rxq = false;
int status = 0;
 
@@ -145,26 +146,26 @@ int idpf_ctlq_add(struct idpf_hw *hw,
qinfo->buf_size > IDPF_CTLQ_MAX_BUF_LEN)
return -EINVAL;
 
-   *cq_out = (struct idpf_ctlq_info *)
-   idpf_calloc(hw, 1, sizeof(struct idpf_ctlq_info));
-   if (!(*cq_out))
+   cq = (struct idpf_ctlq_info *)
+idpf_calloc(hw, 1, sizeof(struct idpf_ctlq_info));
+   if (!cq)
return -ENOMEM;
 
-   (*cq_out)->cq_type = qinfo->type;
-   (*cq_out)->q_id = qinfo->id;
-   (*cq_out)->buf_size = qinfo->buf_size;
-   (*cq_out)->ring_size = qinfo->len;
+   (cq)->cq_type = qinfo->type;
+   (cq)->q_id = qinfo->id;
+   (cq)->buf_size = qinfo->buf_size;
+   (cq)->ring_size = qinfo->len;
 
-   (*cq_out)->next_to_use = 0;
-   (*cq_out)->next_to_clean = 0;
-   (*cq_out)->next_to_post = (*cq_out)->ring_size - 1;
+   (cq)->next_to_use = 0;
+   (cq)->next_to_clean = 0;
+   (cq)->next_to_post = cq->ring_size - 1;
 
switch (qinfo->type) {
case IDPF_CTLQ_TYPE_MAILBOX_RX:
is_rxq = true;
/* fallthrough */
case IDPF_CTLQ_TYPE_MAILBOX_TX:
-   status = idpf_ctlq_alloc_ring_res(hw, *cq_out);
+   status = idpf_ctlq_alloc_ring_res(hw, cq);
break;
default:
status = -EINVAL;
@@ -175,33 +176,35 @@ int idpf_ctlq_add(struct idpf_hw *hw,
goto init_free_q;
 
if (is_rxq) {
-   idpf_ctlq_init_rxq_bufs(*cq_out);
+   idpf_ctlq_init_rxq_bufs(cq);
} else {
/* Allocate the array of msg pointers for TX queues */
-   (*cq_out)->bi.tx_msg = (struct idpf_ctlq_msg **)
+   cq->bi.tx_msg = (struct idpf_ctlq_msg **)
idpf_calloc(hw, qinfo->len,
sizeof(struct idpf_ctlq_msg *));
-   if (!(*cq_out)->bi.tx_msg) {
+   if (!cq->bi.tx_msg) {
status = -ENOMEM;
goto init_dealloc_q_mem;
}
}
 
-   idpf_ctlq_setup_regs(*cq_out, qinfo);
+   idpf_ctlq_setup_regs(cq, qinfo);
 
-   idpf_ctlq_init_regs(hw, *cq_out, is_rxq);
+   idpf_ctlq_init_regs(hw, cq, is_rxq);
 
-   idpf_init_lock(&(*cq_out)->cq_lock);
+   idpf_init_lock(&(cq->cq_lock));
 
-   LIST_INSERT_HEAD(&hw->cq_list_head, (*cq_out), cq_list);
+   LIST_INSERT_HEAD(&hw->cq_list_head, cq, cq_list);
 
+   *cq_out = cq;
return status;
 
 init_dealloc_q_mem:
/* free ring buffers and the ring itself */
-   idpf_ctlq_dealloc_ring_res(hw, *cq_out);
+   idpf_ctlq_dealloc_ring_res(hw, cq);
 init_free_q:
-   idpf_free(hw, *cq_out);
+   idpf_free(hw, cq);
+   cq = NULL;
 
return status;
 }
-- 
2.25.1



[PATCH v3 10/17] common/idpf/base: use 'void' return type

2023-09-14 Thread Simei Su
As idpf_ctlq_deinit always returns success, make it 'void' instead
of returning only success. This also changes the return type for
idpf_deinit_hw as 'void'.

Based on the upstream comments, explicit __le16 typecasting is not
necessary as CPU_TO_LE16 is already being used.

Signed-off-by: Pavan Kumar Linga 
Signed-off-by: Simei Su 
---
 drivers/common/idpf/base/idpf_common.c   | 4 ++--
 drivers/common/idpf/base/idpf_controlq.c | 7 ++-
 drivers/common/idpf/base/idpf_controlq_api.h | 2 +-
 drivers/common/idpf/base/idpf_prototype.h| 2 +-
 4 files changed, 6 insertions(+), 9 deletions(-)

diff --git a/drivers/common/idpf/base/idpf_common.c 
b/drivers/common/idpf/base/idpf_common.c
index 9610916aa9..7181a7f14c 100644
--- a/drivers/common/idpf/base/idpf_common.c
+++ b/drivers/common/idpf/base/idpf_common.c
@@ -262,12 +262,12 @@ int idpf_clean_arq_element(struct idpf_hw *hw,
  *  idpf_deinit_hw - shutdown routine
  *  @hw: pointer to the hardware structure
  */
-int idpf_deinit_hw(struct idpf_hw *hw)
+void idpf_deinit_hw(struct idpf_hw *hw)
 {
hw->asq = NULL;
hw->arq = NULL;
 
-   return idpf_ctlq_deinit(hw);
+   idpf_ctlq_deinit(hw);
 }
 
 /**
diff --git a/drivers/common/idpf/base/idpf_controlq.c 
b/drivers/common/idpf/base/idpf_controlq.c
index b84a1ea046..7b12dfab18 100644
--- a/drivers/common/idpf/base/idpf_controlq.c
+++ b/drivers/common/idpf/base/idpf_controlq.c
@@ -75,7 +75,7 @@ static void idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)
desc->flags =
CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
desc->opcode = 0;
-   desc->datalen = (__le16)CPU_TO_LE16(bi->size);
+   desc->datalen = CPU_TO_LE16(bi->size);
desc->ret_val = 0;
desc->cookie_high = 0;
desc->cookie_low = 0;
@@ -264,16 +264,13 @@ int idpf_ctlq_init(struct idpf_hw *hw, u8 num_q,
  * idpf_ctlq_deinit - destroy all control queues
  * @hw: pointer to hw struct
  */
-int idpf_ctlq_deinit(struct idpf_hw *hw)
+void idpf_ctlq_deinit(struct idpf_hw *hw)
 {
struct idpf_ctlq_info *cq = NULL, *tmp = NULL;
-   int ret_code = 0;
 
LIST_FOR_EACH_ENTRY_SAFE(cq, tmp, &hw->cq_list_head,
 idpf_ctlq_info, cq_list)
idpf_ctlq_remove(hw, cq);
-
-   return ret_code;
 }
 
 /**
diff --git a/drivers/common/idpf/base/idpf_controlq_api.h 
b/drivers/common/idpf/base/idpf_controlq_api.h
index f4e7b53ac9..78a54f6b4c 100644
--- a/drivers/common/idpf/base/idpf_controlq_api.h
+++ b/drivers/common/idpf/base/idpf_controlq_api.h
@@ -205,6 +205,6 @@ int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw,
struct idpf_dma_mem **buffs);
 
 /* Will destroy all q including the default mb */
-int idpf_ctlq_deinit(struct idpf_hw *hw);
+void idpf_ctlq_deinit(struct idpf_hw *hw);
 
 #endif /* _IDPF_CONTROLQ_API_H_ */
diff --git a/drivers/common/idpf/base/idpf_prototype.h 
b/drivers/common/idpf/base/idpf_prototype.h
index 988ff00506..e2f090a9e3 100644
--- a/drivers/common/idpf/base/idpf_prototype.h
+++ b/drivers/common/idpf/base/idpf_prototype.h
@@ -20,7 +20,7 @@
 #define APF
 
 int idpf_init_hw(struct idpf_hw *hw, struct idpf_ctlq_size ctlq_size);
-int idpf_deinit_hw(struct idpf_hw *hw);
+void idpf_deinit_hw(struct idpf_hw *hw);
 
 int idpf_clean_arq_element(struct idpf_hw *hw,
   struct idpf_arq_event_info *e,
-- 
2.25.1



[PATCH v3 11/17] common/idpf/base: refactor descriptor 'ret val' stripping

2023-09-14 Thread Simei Su
Conditional check is not necessary to strip and get status bits
from the descriptor.

Signed-off-by: Pavan Kumar Linga 
Signed-off-by: Simei Su 
---
 drivers/common/idpf/base/idpf_controlq.c | 7 ++-
 1 file changed, 2 insertions(+), 5 deletions(-)

diff --git a/drivers/common/idpf/base/idpf_controlq.c 
b/drivers/common/idpf/base/idpf_controlq.c
index 7b12dfab18..da5c930578 100644
--- a/drivers/common/idpf/base/idpf_controlq.c
+++ b/drivers/common/idpf/base/idpf_controlq.c
@@ -426,11 +426,8 @@ static int __idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, 
u16 *clean_count,
if (!force && !(LE16_TO_CPU(desc->flags) & IDPF_CTLQ_FLAG_DD))
break;
 
-   desc_err = LE16_TO_CPU(desc->ret_val);
-   if (desc_err) {
-   /* strip off FW internal code */
-   desc_err &= 0xff;
-   }
+   /* strip off FW internal code */
+   desc_err = LE16_TO_CPU(desc->ret_val) & 0xff;
 
msg_status[i] = cq->bi.tx_msg[ntc];
if (!msg_status[i])
-- 
2.25.1



[PATCH v3 12/17] common/idpf/base: refine comments and alignment

2023-09-14 Thread Simei Su
Refine the macros and definitions by using 'tab' spaces and new
lines wherever necessary. Also refine the comment in
'idpf_ctlq_setup_regs' and remove the TODO comment in idpf_rss_hash
enum as it doesn't make any sense.

Signed-off-by: Pavan Kumar Linga 
Signed-off-by: Simei Su 
---
 drivers/common/idpf/base/idpf_controlq.c |  2 +-
 drivers/common/idpf/base/idpf_controlq_api.h | 10 +
 drivers/common/idpf/base/idpf_lan_pf_regs.h  |  7 +--
 drivers/common/idpf/base/idpf_lan_txrx.h | 47 +---
 drivers/common/idpf/base/idpf_lan_vf_regs.h  | 25 +++
 5 files changed, 46 insertions(+), 45 deletions(-)

diff --git a/drivers/common/idpf/base/idpf_controlq.c 
b/drivers/common/idpf/base/idpf_controlq.c
index da5c930578..c24bfd23ef 100644
--- a/drivers/common/idpf/base/idpf_controlq.c
+++ b/drivers/common/idpf/base/idpf_controlq.c
@@ -13,7 +13,7 @@ static void
 idpf_ctlq_setup_regs(struct idpf_ctlq_info *cq,
 struct idpf_ctlq_create_info *q_create_info)
 {
-   /* set head and tail registers in our local struct */
+   /* set control queue registers in our local struct */
cq->reg.head = q_create_info->reg.head;
cq->reg.tail = q_create_info->reg.tail;
cq->reg.len = q_create_info->reg.len;
diff --git a/drivers/common/idpf/base/idpf_controlq_api.h 
b/drivers/common/idpf/base/idpf_controlq_api.h
index 78a54f6b4c..38f5d2df3c 100644
--- a/drivers/common/idpf/base/idpf_controlq_api.h
+++ b/drivers/common/idpf/base/idpf_controlq_api.h
@@ -21,10 +21,7 @@ enum idpf_ctlq_type {
IDPF_CTLQ_TYPE_RDMA_COMPL   = 7
 };
 
-/*
- * Generic Control Queue Structures
- */
-
+/* Generic Control Queue Structures */
 struct idpf_ctlq_reg {
/* used for queue tracking */
u32 head;
@@ -157,10 +154,7 @@ enum idpf_mbx_opc {
idpf_mbq_opc_send_msg_to_peer_drv   = 0x0804,
 };
 
-/*
- * API supported for control queue management
- */
-
+/* API supported for control queue management */
 /* Will init all required q including default mb.  "q_info" is an array of
  * create_info structs equal to the number of control queues to be created.
  */
diff --git a/drivers/common/idpf/base/idpf_lan_pf_regs.h 
b/drivers/common/idpf/base/idpf_lan_pf_regs.h
index 8542620e01..e47afad6e9 100644
--- a/drivers/common/idpf/base/idpf_lan_pf_regs.h
+++ b/drivers/common/idpf/base/idpf_lan_pf_regs.h
@@ -80,10 +80,11 @@
 /* _ITR is ITR index, _INT is interrupt index, _itrn_indx_spacing is
  * spacing b/w itrn registers of the same vector.
  */
-#define PF_GLINT_ITR_ADDR(_ITR, _reg_start, _itrn_indx_spacing) \
-   ((_reg_start) + (((_ITR)) * (_itrn_indx_spacing)))
+#define PF_GLINT_ITR_ADDR(_ITR, _reg_start, _itrn_indx_spacing)
\
+   ((_reg_start) + ((_ITR) * (_itrn_indx_spacing)))
 /* For PF, itrn_indx_spacing is 4 and itrn_reg_spacing is 0x1000 */
-#define PF_GLINT_ITR(_ITR, _INT) (PF_GLINT_BASE + (((_ITR) + 1) * 4) + ((_INT) 
* 0x1000))
+#define PF_GLINT_ITR(_ITR, _INT)   \
+   (PF_GLINT_BASE + (((_ITR) + 1) * 4) + ((_INT) * 0x1000))
 #define PF_GLINT_ITR_MAX_INDEX 2
 #define PF_GLINT_ITR_INTERVAL_S0
 #define PF_GLINT_ITR_INTERVAL_MIDPF_M(0xFFF, 
PF_GLINT_ITR_INTERVAL_S)
diff --git a/drivers/common/idpf/base/idpf_lan_txrx.h 
b/drivers/common/idpf/base/idpf_lan_txrx.h
index 7b03693eb1..4951e266f0 100644
--- a/drivers/common/idpf/base/idpf_lan_txrx.h
+++ b/drivers/common/idpf/base/idpf_lan_txrx.h
@@ -8,9 +8,9 @@
 #include "idpf_osdep.h"
 
 enum idpf_rss_hash {
-   /* Values 0 - 28 are reserved for future use */
-   IDPF_HASH_INVALID   = 0,
-   IDPF_HASH_NONF_UNICAST_IPV4_UDP = 29,
+   IDPF_HASH_INVALID   = 0,
+   /* Values 1 - 28 are reserved for future use */
+   IDPF_HASH_NONF_UNICAST_IPV4_UDP = 29,
IDPF_HASH_NONF_MULTICAST_IPV4_UDP,
IDPF_HASH_NONF_IPV4_UDP,
IDPF_HASH_NONF_IPV4_TCP_SYN_NO_ACK,
@@ -19,7 +19,7 @@ enum idpf_rss_hash {
IDPF_HASH_NONF_IPV4_OTHER,
IDPF_HASH_FRAG_IPV4,
/* Values 37-38 are reserved */
-   IDPF_HASH_NONF_UNICAST_IPV6_UDP = 39,
+   IDPF_HASH_NONF_UNICAST_IPV6_UDP = 39,
IDPF_HASH_NONF_MULTICAST_IPV6_UDP,
IDPF_HASH_NONF_IPV6_UDP,
IDPF_HASH_NONF_IPV6_TCP_SYN_NO_ACK,
@@ -32,34 +32,31 @@ enum idpf_rss_hash {
IDPF_HASH_NONF_FCOE_RX,
IDPF_HASH_NONF_FCOE_OTHER,
/* Values 51-62 are reserved */
-   IDPF_HASH_L2_PAYLOAD= 63,
+   IDPF_HASH_L2_PAYLOAD= 63,
+
IDPF_HASH_MAX
 };
 
 /* Supported RSS offloads */
-#define IDPF_DEFAULT_RSS_HASH ( \
-   BIT_ULL(IDPF_HASH_NONF_IPV4_UDP) | \
-   BIT_ULL(IDPF_HASH_NONF_IPV4_SCTP) | \
-   BIT_ULL(IDPF_HASH_NONF_IPV4_TCP) | \
-   BIT_ULL(IDPF_HASH_NONF_IPV4_OTHER) | \
-   BIT_ULL(IDPF_HASH_FRAG_IPV4) |

[PATCH v3 13/17] common/idpf/base: use GENMASK macro

2023-09-14 Thread Simei Su
Instead of using a custom defined macro for generating a mask,
use the standard GENMASK macro.

Signed-off-by: Pavan Kumar Linga 
Signed-off-by: Simei Su 
---
 drivers/common/idpf/base/idpf_lan_pf_regs.h |  26 ++---
 drivers/common/idpf/base/idpf_lan_txrx.h| 116 +---
 drivers/common/idpf/base/idpf_lan_vf_regs.h |  16 +--
 drivers/common/idpf/base/idpf_osdep.h   |   7 ++
 4 files changed, 80 insertions(+), 85 deletions(-)

diff --git a/drivers/common/idpf/base/idpf_lan_pf_regs.h 
b/drivers/common/idpf/base/idpf_lan_pf_regs.h
index e47afad6e9..b9d82592c0 100644
--- a/drivers/common/idpf/base/idpf_lan_pf_regs.h
+++ b/drivers/common/idpf/base/idpf_lan_pf_regs.h
@@ -24,7 +24,7 @@
 #define PF_FW_ARQBAH   (PF_FW_BASE + 0x4)
 #define PF_FW_ARQLEN   (PF_FW_BASE + 0x8)
 #define PF_FW_ARQLEN_ARQLEN_S  0
-#define PF_FW_ARQLEN_ARQLEN_M  IDPF_M(0x1FFF, PF_FW_ARQLEN_ARQLEN_S)
+#define PF_FW_ARQLEN_ARQLEN_M  GENMASK(12, 0)
 #define PF_FW_ARQLEN_ARQVFE_S  28
 #define PF_FW_ARQLEN_ARQVFE_M  BIT(PF_FW_ARQLEN_ARQVFE_S)
 #define PF_FW_ARQLEN_ARQOVFL_S 29
@@ -35,14 +35,14 @@
 #define PF_FW_ARQLEN_ARQENABLE_M   BIT(PF_FW_ARQLEN_ARQENABLE_S)
 #define PF_FW_ARQH (PF_FW_BASE + 0xC)
 #define PF_FW_ARQH_ARQH_S  0
-#define PF_FW_ARQH_ARQH_M  IDPF_M(0x1FFF, PF_FW_ARQH_ARQH_S)
+#define PF_FW_ARQH_ARQH_M  GENMASK(12, 0)
 #define PF_FW_ARQT (PF_FW_BASE + 0x10)
 
 #define PF_FW_ATQBAL   (PF_FW_BASE + 0x14)
 #define PF_FW_ATQBAH   (PF_FW_BASE + 0x18)
 #define PF_FW_ATQLEN   (PF_FW_BASE + 0x1C)
 #define PF_FW_ATQLEN_ATQLEN_S  0
-#define PF_FW_ATQLEN_ATQLEN_M  IDPF_M(0x3FF, PF_FW_ATQLEN_ATQLEN_S)
+#define PF_FW_ATQLEN_ATQLEN_M  GENMASK(9, 0)
 #define PF_FW_ATQLEN_ATQVFE_S  28
 #define PF_FW_ATQLEN_ATQVFE_M  BIT(PF_FW_ATQLEN_ATQVFE_S)
 #define PF_FW_ATQLEN_ATQOVFL_S 29
@@ -53,7 +53,7 @@
 #define PF_FW_ATQLEN_ATQENABLE_M   BIT(PF_FW_ATQLEN_ATQENABLE_S)
 #define PF_FW_ATQH (PF_FW_BASE + 0x20)
 #define PF_FW_ATQH_ATQH_S  0
-#define PF_FW_ATQH_ATQH_M  IDPF_M(0x3FF, PF_FW_ATQH_ATQH_S)
+#define PF_FW_ATQH_ATQH_M  GENMASK(9, 0)
 #define PF_FW_ATQT (PF_FW_BASE + 0x24)
 
 /* Interrupts */
@@ -66,7 +66,7 @@
 #define PF_GLINT_DYN_CTL_SWINT_TRIG_S  2
 #define PF_GLINT_DYN_CTL_SWINT_TRIG_M  BIT(PF_GLINT_DYN_CTL_SWINT_TRIG_S)
 #define PF_GLINT_DYN_CTL_ITR_INDX_S3
-#define PF_GLINT_DYN_CTL_ITR_INDX_MIDPF_M(0x3, PF_GLINT_DYN_CTL_ITR_INDX_S)
+#define PF_GLINT_DYN_CTL_ITR_INDX_MGENMASK(4, 3)
 #define PF_GLINT_DYN_CTL_INTERVAL_S5
 #define PF_GLINT_DYN_CTL_INTERVAL_MBIT(PF_GLINT_DYN_CTL_INTERVAL_S)
 #define PF_GLINT_DYN_CTL_SW_ITR_INDX_ENA_S 24
@@ -87,13 +87,13 @@
(PF_GLINT_BASE + (((_ITR) + 1) * 4) + ((_INT) * 0x1000))
 #define PF_GLINT_ITR_MAX_INDEX 2
 #define PF_GLINT_ITR_INTERVAL_S0
-#define PF_GLINT_ITR_INTERVAL_MIDPF_M(0xFFF, 
PF_GLINT_ITR_INTERVAL_S)
+#define PF_GLINT_ITR_INTERVAL_MGENMASK(11, 0)
 
 /* Timesync registers */
 #define PF_TIMESYNC_BASE   0x08404000
 #define PF_GLTSYN_CMD_SYNC (PF_TIMESYNC_BASE)
 #define PF_GLTSYN_CMD_SYNC_EXEC_CMD_S  0
-#define PF_GLTSYN_CMD_SYNC_EXEC_CMD_M  IDPF_M(0x3, 
PF_GLTSYN_CMD_SYNC_EXEC_CMD_S)
+#define PF_GLTSYN_CMD_SYNC_EXEC_CMD_M  GENMASK(1, 0)
 #define PF_GLTSYN_CMD_SYNC_SHTIME_EN_S 2
 #define PF_GLTSYN_CMD_SYNC_SHTIME_EN_M BIT(PF_GLTSYN_CMD_SYNC_SHTIME_EN_S)
 #define PF_GLTSYN_SHTIME_0 (PF_TIMESYNC_BASE + 0x4)
@@ -105,23 +105,23 @@
 /* Generic registers */
 #define PF_INT_DIR_OICR_ENA0x08406000
 #define PF_INT_DIR_OICR_ENA_S  0
-#define PF_INT_DIR_OICR_ENA_M  IDPF_M(0x, PF_INT_DIR_OICR_ENA_S)
+#define PF_INT_DIR_OICR_ENA_M  GENMASK(31, 0)
 #define PF_INT_DIR_OICR0x08406004
 #define PF_INT_DIR_OICR_TSYN_EVNT  0
 #define PF_INT_DIR_OICR_PHY_TS_0   BIT(1)
 #define PF_INT_DIR_OICR_PHY_TS_1   BIT(2)
 #define PF_INT_DIR_OICR_CAUSE  0x08406008
 #define PF_INT_DIR_OICR_CAUSE_CAUSE_S  0
-#define PF_INT_DIR_OICR_CAUSE_CAUSE_M  IDPF_M(0x, 
PF_INT_DIR_OICR_CAUSE_CAUSE_S)
+#define PF_INT_DIR_OICR_CAUSE_CAUSE_M  GENMASK(31, 0)
 #define PF_INT_PBA_CLEAR   0x0840600C
 
 #define PF_FUNC_RID0x08406010
 #define PF_FUNC_RID_FUNCTION_NUMBER_S  0
-#define PF_FUNC_RID_FUNCTION_NUMBER_M  IDPF_M(0x7, 
PF_FUNC_RID_FUNCTION_NUMBER_S)
+#define PF_FUNC_RID_FUNCTION_NUMBER_M  GENMASK(2, 0)
 #define PF_FUNC_RID_DEVICE_NUMBER_S3
-#define PF_FUNC_RID_DEVICE_NUMBER_MIDPF_M(0x1F, 
PF_FUNC_RID_DEVICE_NUMBER_S)
+#define PF_FUNC_RID_DEVICE_NUMBER_MGENMASK(7, 3)
 #define PF_FUNC_RID_BUS_NUMBER_S   8
-#define PF_FUNC_RID_BUS_NUMBER_M

[PATCH v3 14/17] common/idpf/base: use 'type functionname(args)' style

2023-09-14 Thread Simei Su
Instead of splitting the function name and function type into
multiple lines, use then in a single line.

Signed-off-by: Pavan Kumar Linga 
Signed-off-by: Simei Su 
---
 drivers/common/idpf/base/idpf_controlq.c   | 5 ++---
 drivers/common/idpf/base/idpf_controlq_setup.c | 5 ++---
 2 files changed, 4 insertions(+), 6 deletions(-)

diff --git a/drivers/common/idpf/base/idpf_controlq.c 
b/drivers/common/idpf/base/idpf_controlq.c
index c24bfd23ef..07bbec91b9 100644
--- a/drivers/common/idpf/base/idpf_controlq.c
+++ b/drivers/common/idpf/base/idpf_controlq.c
@@ -9,9 +9,8 @@
  * @cq: pointer to the specific control queue
  * @q_create_info: structs containing info for each queue to be initialized
  */
-static void
-idpf_ctlq_setup_regs(struct idpf_ctlq_info *cq,
-struct idpf_ctlq_create_info *q_create_info)
+static void idpf_ctlq_setup_regs(struct idpf_ctlq_info *cq,
+struct idpf_ctlq_create_info *q_create_info)
 {
/* set control queue registers in our local struct */
cq->reg.head = q_create_info->reg.head;
diff --git a/drivers/common/idpf/base/idpf_controlq_setup.c 
b/drivers/common/idpf/base/idpf_controlq_setup.c
index 0f1b52a7e9..21f43c74f5 100644
--- a/drivers/common/idpf/base/idpf_controlq_setup.c
+++ b/drivers/common/idpf/base/idpf_controlq_setup.c
@@ -11,9 +11,8 @@
  * @hw: pointer to hw struct
  * @cq: pointer to the specific Control queue
  */
-static int
-idpf_ctlq_alloc_desc_ring(struct idpf_hw *hw,
- struct idpf_ctlq_info *cq)
+static int idpf_ctlq_alloc_desc_ring(struct idpf_hw *hw,
+struct idpf_ctlq_info *cq)
 {
size_t size = cq->ring_size * sizeof(struct idpf_ctlq_desc);
 
-- 
2.25.1



[PATCH v3 15/17] common/idpf/base: don't declare union with 'flex'

2023-09-14 Thread Simei Su
In idpf_flex_tx_desc structure, instead of naming the union with 'flex',
use no name union as the union name is not really necessary there. This
reduces the level of indirection in the hotpath.

Signed-off-by: Pavan Kumar Linga 
Signed-off-by: Simei Su 
---
 drivers/common/idpf/base/idpf_lan_txrx.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/common/idpf/base/idpf_lan_txrx.h 
b/drivers/common/idpf/base/idpf_lan_txrx.h
index f213c49e47..1e19aeafac 100644
--- a/drivers/common/idpf/base/idpf_lan_txrx.h
+++ b/drivers/common/idpf/base/idpf_lan_txrx.h
@@ -226,11 +226,11 @@ enum idpf_tx_flex_desc_cmd_bits {
 struct idpf_flex_tx_desc {
__le64 buf_addr;/* Packet buffer address */
struct {
-   __le16 cmd_dtype;
 #define IDPF_FLEX_TXD_QW1_DTYPE_S  0
 #define IDPF_FLEX_TXD_QW1_DTYPE_M  GENMASK(4, 0)
 #define IDPF_FLEX_TXD_QW1_CMD_S5
 #define IDPF_FLEX_TXD_QW1_CMD_MGENMASK(15, 5)
+   __le16 cmd_dtype;
union {
/* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_DATA_(0x03) */
u8 raw[4];
@@ -247,7 +247,7 @@ struct idpf_flex_tx_desc {
__le16 l2tag1;
__le16 l2tag2;
} l2tags;
-   } flex;
+   };
__le16 buf_size;
} qw1;
 };
-- 
2.25.1



[PATCH v3 17/17] common/idpf/base: update version

2023-09-14 Thread Simei Su
Update README

Signed-off-by: Simei Su 
---
 drivers/common/idpf/base/README | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/common/idpf/base/README b/drivers/common/idpf/base/README
index 693049c057..ff26f736ec 100644
--- a/drivers/common/idpf/base/README
+++ b/drivers/common/idpf/base/README
@@ -6,7 +6,7 @@ Intel® IDPF driver
 ==
 
 This directory contains source code of BSD-3-Clause idpf driver of version
-2023.02.23 released by the team which develops basic drivers for Intel IPU.
+2023.07.25 released by the team which develops basic drivers for Intel IPU.
 The directory of base/ contains the original source package.
 This driver is valid for the product(s) listed below
 
-- 
2.25.1



[PATCH v3 16/17] common/idpf/base: remove unused Tx descriptor types

2023-09-14 Thread Simei Su
Remove the unused TX descriptor types and mark them as reserved.

Signed-off-by: Pavan Kumar Linga 
Signed-off-by: Simei Su 
---
 drivers/common/idpf/base/idpf_lan_txrx.h | 132 ++-
 1 file changed, 10 insertions(+), 122 deletions(-)

diff --git a/drivers/common/idpf/base/idpf_lan_txrx.h 
b/drivers/common/idpf/base/idpf_lan_txrx.h
index 1e19aeafac..5bc4271584 100644
--- a/drivers/common/idpf/base/idpf_lan_txrx.h
+++ b/drivers/common/idpf/base/idpf_lan_txrx.h
@@ -120,19 +120,19 @@ enum idpf_rss_hash {
 enum idpf_tx_desc_dtype_value {
IDPF_TX_DESC_DTYPE_DATA = 0,
IDPF_TX_DESC_DTYPE_CTX  = 1,
-   IDPF_TX_DESC_DTYPE_REINJECT_CTX = 2,
-   IDPF_TX_DESC_DTYPE_FLEX_DATA= 3,
-   IDPF_TX_DESC_DTYPE_FLEX_CTX = 4,
+   /* DTYPE 2 is reserved
+* DTYPE 3 is free for future use
+* DTYPE 4 is reserved
+*/
IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX = 5,
-   IDPF_TX_DESC_DTYPE_FLEX_TSYN_L2TAG1 = 6,
+   /* DTYPE 6 is reserved */
IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2   = 7,
-   IDPF_TX_DESC_DTYPE_FLEX_TSO_L2TAG2_PARSTAG_CTX  = 8,
-   IDPF_TX_DESC_DTYPE_FLEX_HOSTSPLIT_SA_TSO_CTX= 9,
-   IDPF_TX_DESC_DTYPE_FLEX_HOSTSPLIT_SA_CTX= 10,
-   IDPF_TX_DESC_DTYPE_FLEX_L2TAG2_CTX  = 11,
+   /* DTYPE 8, 9 are free for future use
+* DTYPE 10 is reserved
+* DTYPE 11 is free for future use
+*/
IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE   = 12,
-   IDPF_TX_DESC_DTYPE_FLEX_HOSTSPLIT_TSO_CTX   = 13,
-   IDPF_TX_DESC_DTYPE_FLEX_HOSTSPLIT_CTX   = 14,
+   /* DTYPE 13, 14 are free for future use */
/* DESC_DONE - HW has completed write-back of descriptor */
IDPF_TX_DESC_DTYPE_DESC_DONE= 15,
 };
@@ -232,16 +232,6 @@ struct idpf_flex_tx_desc {
 #define IDPF_FLEX_TXD_QW1_CMD_MGENMASK(15, 5)
__le16 cmd_dtype;
union {
-   /* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_DATA_(0x03) */
-   u8 raw[4];
-
-   /* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_TSYN_L2TAG1 (0x06) */
-   struct {
-   __le16 l2tag1;
-   u8 flex;
-   u8 tsync;
-   } tsync;
-
/* DTYPE=IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2 (0x07) */
struct {
__le16 l2tag1;
@@ -297,16 +287,6 @@ struct idpf_flex_tx_tso_ctx_qw {
 };
 
 union idpf_flex_tx_ctx_desc {
-   /* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_CTX (0x04) */
-   struct {
-   u8 qw0_flex[8];
-   struct {
-   __le16 cmd_dtype;
-   __le16 l2tag1;
-   u8 qw1_flex[4];
-   } qw1;
-   } gen;
-
/* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX (0x05) */
struct {
struct idpf_flex_tx_tso_ctx_qw qw0;
@@ -315,98 +295,6 @@ union idpf_flex_tx_ctx_desc {
u8 flex[6];
} qw1;
} tso;
-
-   /* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_TSO_L2TAG2_PARSTAG_CTX (0x08) */
-   struct {
-   struct idpf_flex_tx_tso_ctx_qw qw0;
-   struct {
-   __le16 cmd_dtype;
-   __le16 l2tag2;
-   u8 flex0;
-   u8 ptag;
-   u8 flex1[2];
-   } qw1;
-   } tso_l2tag2_ptag;
-
-   /* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_L2TAG2_CTX (0x0B) */
-   struct {
-   u8 qw0_flex[8];
-   struct {
-   __le16 cmd_dtype;
-   __le16 l2tag2;
-   u8 flex[4];
-   } qw1;
-   } l2tag2;
-
-   /* DTYPE = IDPF_TX_DESC_DTYPE_REINJECT_CTX (0x02) */
-   struct {
-   struct {
-   __le32 sa_domain;
-#define IDPF_TXD_FLEX_CTX_SA_DOM_M 0x
-#define IDPF_TXD_FLEX_CTX_SA_DOM_VAL   0x1
-   __le32 sa_idx;
-#define IDPF_TXD_FLEX_CTX_SAIDX_M  0x1F
-   } qw0;
-   struct {
-   __le16 cmd_dtype;
-   __le16 txr2comp;
-#define IDPF_TXD_FLEX_CTX_TXR2COMP 0x1
-   __le16 miss_txq_comp_tag;
-   __le16 miss_txq_id;
-   } qw1;
-   } reinjection_pkt;
 };
 
-/* Host Split Context Descriptors */
-struct idpf_flex_tx_hs_ctx_desc {
-   union {
-   struct {
-   __le32 host_fnum_tlen;
-#define IDPF_TXD_FLEX_CTX_TLEN_S   0
-/* see IDPF_TXD_FLEX_CTX_TLEN_M for mask definition */
-#define IDPF_TXD_FLEX_CTX_FNUM_S   18
-#define

[PATCH v4 00/18] update idpf base code

2023-09-17 Thread Simei Su
Currently, single queue Tx data path uses flex Tx data
descriptor(DTYPE3) which is removed in the latest idpf spec.
[PATCH v4 01/18] replaces flex Tx data descriptor with base Tx data
descriptor for single queue Tx data path and refines Tx single
queue setup to align with Tx data path.

[PATCH v4 02/18]~[PATCH v4 18/18] update idpf base code based on
[PATCH v4 01/18].

v4:
* Put single queue Tx data path refactor patch in this patchset.

v3:
* Fix coding style issue.
* Modify unexpected error in the update version patch.

v2:
* Add two patches for share code update.
* Add version update.
* Fix coding style issue.

Simei Su (18):
  common/idpf: refactor single queue Tx data path
  common/idpf/base: enable support for physical port stats
  common/idpf/base: add miss completion capabilities
  common/idpf/base: initial PTP support
  common/idpf/base: remove mailbox registers
  common/idpf/base: add some adi specific fields
  common/idpf/base: add necessary check
  common/idpf/base: add union for SW cookie fields in ctlq msg
  common/idpf/base: define non-flexible size structure for ADI
  common/idpf/base: use local pointer before updating 'CQ out'
  common/idpf/base: use 'void' return type
  common/idpf/base: refactor descriptor 'ret val' stripping
  common/idpf/base: refine comments and alignment
  common/idpf/base: use GENMASK macro
  common/idpf/base: use 'type functionname(args)' style
  common/idpf/base: don't declare union with 'flex'
  common/idpf/base: remove unused Tx descriptor types
  common/idpf/base: update version

 .mailmap  |   7 +
 drivers/common/idpf/base/README   |   2 +-
 drivers/common/idpf/base/idpf_common.c|  10 +-
 drivers/common/idpf/base/idpf_controlq.c  |  64 ++--
 drivers/common/idpf/base/idpf_controlq_api.h  |  17 +-
 .../common/idpf/base/idpf_controlq_setup.c|   5 +-
 drivers/common/idpf/base/idpf_lan_pf_regs.h   |  33 +-
 drivers/common/idpf/base/idpf_lan_txrx.h  | 285 +---
 drivers/common/idpf/base/idpf_lan_vf_regs.h   |  41 ++-
 drivers/common/idpf/base/idpf_osdep.h |   7 +
 drivers/common/idpf/base/idpf_prototype.h |   2 +-
 drivers/common/idpf/base/siov_regs.h  |  13 +-
 drivers/common/idpf/base/virtchnl2.h  | 303 --
 drivers/common/idpf/idpf_common_rxtx.c|  39 ++-
 drivers/common/idpf/idpf_common_rxtx.h|   2 +-
 drivers/common/idpf/idpf_common_rxtx_avx512.c |  37 ++-
 drivers/net/cpfl/cpfl_rxtx.c  |   2 +-
 drivers/net/idpf/idpf_rxtx.c  |   2 +-
 18 files changed, 502 insertions(+), 369 deletions(-)

-- 
2.25.1



[PATCH v4 01/18] common/idpf: refactor single queue Tx data path

2023-09-17 Thread Simei Su
Currently, single queue Tx data path uses flex Tx data
descriptor(DTYPE3) which is removed in the latest idpf spec.
This patch replaces flex Tx data descriptor with base Tx data
descriptor for single queue Tx data path and refines Tx single
queue setup to align with Tx data path.

Signed-off-by: Simei Su 
Acked-by: Wenjun Wu 
Acked-by: Beilei Xing 
---
 drivers/common/idpf/idpf_common_rxtx.c| 39 +--
 drivers/common/idpf/idpf_common_rxtx.h|  2 +-
 drivers/common/idpf/idpf_common_rxtx_avx512.c | 37 +-
 drivers/net/cpfl/cpfl_rxtx.c  |  2 +-
 drivers/net/idpf/idpf_rxtx.c  |  2 +-
 5 files changed, 40 insertions(+), 42 deletions(-)

diff --git a/drivers/common/idpf/idpf_common_rxtx.c 
b/drivers/common/idpf/idpf_common_rxtx.c
index fc87e3e243..e6d2486272 100644
--- a/drivers/common/idpf/idpf_common_rxtx.c
+++ b/drivers/common/idpf/idpf_common_rxtx.c
@@ -276,14 +276,14 @@ idpf_qc_single_tx_queue_reset(struct idpf_tx_queue *txq)
}
 
txe = txq->sw_ring;
-   size = sizeof(struct idpf_flex_tx_desc) * txq->nb_tx_desc;
+   size = sizeof(struct idpf_base_tx_desc) * txq->nb_tx_desc;
for (i = 0; i < size; i++)
((volatile char *)txq->tx_ring)[i] = 0;
 
prev = (uint16_t)(txq->nb_tx_desc - 1);
for (i = 0; i < txq->nb_tx_desc; i++) {
-   txq->tx_ring[i].qw1.cmd_dtype =
-   rte_cpu_to_le_16(IDPF_TX_DESC_DTYPE_DESC_DONE);
+   txq->tx_ring[i].qw1 =
+   rte_cpu_to_le_64(IDPF_TX_DESC_DTYPE_DESC_DONE);
txe[i].mbuf =  NULL;
txe[i].last_id = i;
txe[prev].next_id = i;
@@ -1307,17 +1307,16 @@ idpf_xmit_cleanup(struct idpf_tx_queue *txq)
uint16_t nb_tx_to_clean;
uint16_t i;
 
-   volatile struct idpf_flex_tx_desc *txd = txq->tx_ring;
+   volatile struct idpf_base_tx_desc *txd = txq->tx_ring;
 
desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->rs_thresh);
if (desc_to_clean_to >= nb_tx_desc)
desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
 
desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
-   /* In the writeback Tx desccriptor, the only significant fields are the 
4-bit DTYPE */
-   if ((txd[desc_to_clean_to].qw1.cmd_dtype &
-rte_cpu_to_le_16(IDPF_TXD_QW1_DTYPE_M)) !=
-   rte_cpu_to_le_16(IDPF_TX_DESC_DTYPE_DESC_DONE)) {
+   if ((txd[desc_to_clean_to].qw1 &
+rte_cpu_to_le_64(IDPF_TXD_QW1_DTYPE_M)) !=
+   rte_cpu_to_le_64(IDPF_TX_DESC_DTYPE_DESC_DONE)) {
TX_LOG(DEBUG, "TX descriptor %4u is not done "
   "(port=%d queue=%d)", desc_to_clean_to,
   txq->port_id, txq->queue_id);
@@ -1331,10 +1330,7 @@ idpf_xmit_cleanup(struct idpf_tx_queue *txq)
nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
last_desc_cleaned);
 
-   txd[desc_to_clean_to].qw1.cmd_dtype = 0;
-   txd[desc_to_clean_to].qw1.buf_size = 0;
-   for (i = 0; i < RTE_DIM(txd[desc_to_clean_to].qw1.flex.raw); i++)
-   txd[desc_to_clean_to].qw1.flex.raw[i] = 0;
+   txd[desc_to_clean_to].qw1 = 0;
 
txq->last_desc_cleaned = desc_to_clean_to;
txq->nb_free = (uint16_t)(txq->nb_free + nb_tx_to_clean);
@@ -1347,8 +1343,8 @@ uint16_t
 idpf_dp_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
  uint16_t nb_pkts)
 {
-   volatile struct idpf_flex_tx_desc *txd;
-   volatile struct idpf_flex_tx_desc *txr;
+   volatile struct idpf_base_tx_desc *txd;
+   volatile struct idpf_base_tx_desc *txr;
union idpf_tx_offload tx_offload = {0};
struct idpf_tx_entry *txe, *txn;
struct idpf_tx_entry *sw_ring;
@@ -1356,6 +1352,7 @@ idpf_dp_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf 
**tx_pkts,
struct rte_mbuf *tx_pkt;
struct rte_mbuf *m_seg;
uint64_t buf_dma_addr;
+   uint32_t td_offset;
uint64_t ol_flags;
uint16_t tx_last;
uint16_t nb_used;
@@ -1382,6 +1379,7 @@ idpf_dp_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf 
**tx_pkts,
 
for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
td_cmd = 0;
+   td_offset = 0;
 
tx_pkt = *tx_pkts++;
RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
@@ -1462,9 +1460,10 @@ idpf_dp_singleq_xmit_pkts(void *tx_queue, struct 
rte_mbuf **tx_pkts,
slen = m_seg->data_len;
buf_dma_addr = rte_mbuf_data_iova(m_seg);
txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
-   txd->qw1.buf_size = slen;
-   txd->qw1.cmd_dtype = 
rte_cpu_to_le_16(IDPF_TX_DESC_D

[PATCH v4 02/18] common/idpf/base: enable support for physical port stats

2023-09-17 Thread Simei Su
Add support to indicate physical port representor and query its statistics.

Signed-off-by: Zhenning Xiao 
Signed-off-by: Jayaprakash Shanmugam 
Signed-off-by: Simei Su 
Acked-by: Beilei Xing 
---
 .mailmap |  2 +
 drivers/common/idpf/base/virtchnl2.h | 80 +++-
 2 files changed, 81 insertions(+), 1 deletion(-)

diff --git a/.mailmap b/.mailmap
index 4dac53011b..3dfdd81797 100644
--- a/.mailmap
+++ b/.mailmap
@@ -1639,3 +1639,5 @@ Ziye Yang 
 Zoltan Kiss  
 Zorik Machulsky 
 Zyta Szpak   
+Jayaprakash Shanmugam 
+Zhenning Xiao 
diff --git a/drivers/common/idpf/base/virtchnl2.h 
b/drivers/common/idpf/base/virtchnl2.h
index 594bc26b8c..cd47444835 100644
--- a/drivers/common/idpf/base/virtchnl2.h
+++ b/drivers/common/idpf/base/virtchnl2.h
@@ -97,6 +97,7 @@
 #defineVIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE537
 #defineVIRTCHNL2_OP_ADD_QUEUE_GROUPS   538
 #defineVIRTCHNL2_OP_DEL_QUEUE_GROUPS   539
+#defineVIRTCHNL2_OP_GET_PORT_STATS 540
 
 #define VIRTCHNL2_RDMA_INVALID_QUEUE_IDX   0x
 
@@ -582,6 +583,9 @@ struct virtchnl2_queue_reg_chunks {
 
 VIRTCHNL2_CHECK_STRUCT_LEN(40, virtchnl2_queue_reg_chunks);
 
+/* VIRTCHNL2_VPORT_FLAGS */
+#define VIRTCHNL2_VPORT_UPLINK_PORTBIT(0)
+
 #define VIRTCHNL2_ETH_LENGTH_OF_ADDRESS  6
 
 /* VIRTCHNL2_OP_CREATE_VPORT
@@ -620,7 +624,8 @@ struct virtchnl2_create_vport {
__le16 max_mtu;
__le32 vport_id;
u8 default_mac_addr[VIRTCHNL2_ETH_LENGTH_OF_ADDRESS];
-   __le16 pad;
+   /* see VIRTCHNL2_VPORT_FLAGS definitions */
+   __le16 vport_flags;
/* see VIRTCHNL2_RX_DESC_IDS definitions */
__le64 rx_desc_ids;
/* see VIRTCHNL2_TX_DESC_IDS definitions */
@@ -1159,6 +1164,74 @@ struct virtchnl2_vport_stats {
 
 VIRTCHNL2_CHECK_STRUCT_LEN(128, virtchnl2_vport_stats);
 
+/* physical port statistics */
+struct virtchnl2_phy_port_stats {
+   __le64 rx_bytes;
+   __le64 rx_unicast_pkts;
+   __le64 rx_multicast_pkts;
+   __le64 rx_broadcast_pkts;
+   __le64 rx_size_64_pkts;
+   __le64 rx_size_127_pkts;
+   __le64 rx_size_255_pkts;
+   __le64 rx_size_511_pkts;
+   __le64 rx_size_1023_pkts;
+   __le64 rx_size_1518_pkts;
+   __le64 rx_size_jumbo_pkts;
+   __le64 rx_xon_events;
+   __le64 rx_xoff_events;
+   __le64 rx_undersized_pkts;
+   __le64 rx_fragmented_pkts;
+   __le64 rx_oversized_pkts;
+   __le64 rx_jabber_pkts;
+   __le64 rx_csum_errors;
+   __le64 rx_length_errors;
+   __le64 rx_dropped_pkts;
+   __le64 rx_crc_errors;
+   /* Frames with length < 64 and a bad CRC */
+   __le64 rx_runt_errors;
+   __le64 rx_illegal_bytes;
+   __le64 rx_total_pkts;
+   u8 rx_reserved[128];
+
+   __le64 tx_bytes;
+   __le64 tx_unicast_pkts;
+   __le64 tx_multicast_pkts;
+   __le64 tx_broadcast_pkts;
+   __le64 tx_errors;
+   __le64 tx_timeout_events;
+   __le64 tx_size_64_pkts;
+   __le64 tx_size_127_pkts;
+   __le64 tx_size_255_pkts;
+   __le64 tx_size_511_pkts;
+   __le64 tx_size_1023_pkts;
+   __le64 tx_size_1518_pkts;
+   __le64 tx_size_jumbo_pkts;
+   __le64 tx_xon_events;
+   __le64 tx_xoff_events;
+   __le64 tx_dropped_link_down_pkts;
+   __le64 tx_total_pkts;
+   u8 tx_reserved[128];
+   __le64 mac_local_faults;
+   __le64 mac_remote_faults;
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(600, virtchnl2_phy_port_stats);
+
+/* VIRTCHNL2_OP_GET_PORT_STATS
+ * PF/VF sends this message to CP to get the updated stats by specifying the
+ * vport_id. CP responds with stats in struct virtchnl2_port_stats that
+ * includes both physical port as well as vport statistics.
+ */
+struct virtchnl2_port_stats {
+   __le32 vport_id;
+   u8 pad[4];
+
+   struct virtchnl2_phy_port_stats phy_port_stats;
+   struct virtchnl2_vport_stats virt_port_stats;
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(736, virtchnl2_port_stats);
+
 /* VIRTCHNL2_OP_EVENT
  * CP sends this message to inform the PF/VF driver of events that may affect
  * it. No direct response is expected from the driver, though it may generate
@@ -1384,6 +1457,8 @@ static inline const char *virtchnl2_op_str(__le32 
v_opcode)
return "VIRTCHNL2_OP_ADD_QUEUE_GROUPS";
case VIRTCHNL2_OP_DEL_QUEUE_GROUPS:
return "VIRTCHNL2_OP_DEL_QUEUE_GROUPS";
+   case VIRTCHNL2_OP_GET_PORT_STATS:
+   return "VIRTCHNL2_OP_GET_PORT_STATS";
default:
return "Unsupported (update virtchnl2.h)";
}
@@ -1648,6 +1723,9 @@ virtchnl2_vc_validate_vf_msg(__rte_unused struct 
virtchnl2_version_info *ver, u3
case VIRTCHNL2_OP_GET_STATS:
valid_len = sizeof(struct virtchnl2_vport_stats);
break;
+   case VIRTCHNL2_OP_GET_PORT_STATS

[PATCH v4 03/18] common/idpf/base: add miss completion capabilities

2023-09-17 Thread Simei Su
Add miss completion tag to other capabilities list, to indicate support for
detecting a miss completion based on the upper bit of the completion tag.

Signed-off-by: Josh Hay 
Signed-off-by: Simei Su 
Acked-by: Beilei Xing 
---
 .mailmap | 1 +
 drivers/common/idpf/base/virtchnl2.h | 4 
 2 files changed, 5 insertions(+)

diff --git a/.mailmap b/.mailmap
index 3dfdd81797..91d8cca78f 100644
--- a/.mailmap
+++ b/.mailmap
@@ -1641,3 +1641,4 @@ Zorik Machulsky 
 Zyta Szpak   
 Jayaprakash Shanmugam 
 Zhenning Xiao 
+Josh Hay 
diff --git a/drivers/common/idpf/base/virtchnl2.h 
b/drivers/common/idpf/base/virtchnl2.h
index cd47444835..c49e4b943c 100644
--- a/drivers/common/idpf/base/virtchnl2.h
+++ b/drivers/common/idpf/base/virtchnl2.h
@@ -231,6 +231,10 @@
 #define VIRTCHNL2_CAP_RX_FLEX_DESC BIT(17)
 #define VIRTCHNL2_CAP_PTYPEBIT(18)
 #define VIRTCHNL2_CAP_LOOPBACK BIT(19)
+/* Enable miss completion types plus ability to detect a miss completion if a
+ * reserved bit is set in a standared completion's tag.
+ */
+#define VIRTCHNL2_CAP_MISS_COMPL_TAG   BIT(20)
 /* this must be the last capability */
 #define VIRTCHNL2_CAP_OEM  BIT(63)
 
-- 
2.25.1



[PATCH v4 05/18] common/idpf/base: remove mailbox registers

2023-09-17 Thread Simei Su
Removing mailbox register offsets as the mapping to device register
offsets are different between CVL and MEV (they are swapped out)
individual drivers will define the offsets based on how registers
are hardware addressed. However the it will begin with VDEV_MBX_START
offset.

Signed-off-by: Madhu Chittim 
Signed-off-by: Simei Su 
Acked-by: Beilei Xing 
---
 .mailmap |  1 +
 drivers/common/idpf/base/siov_regs.h | 13 ++---
 2 files changed, 3 insertions(+), 11 deletions(-)

diff --git a/.mailmap b/.mailmap
index 91d8cca78f..d8782cd67e 100644
--- a/.mailmap
+++ b/.mailmap
@@ -1642,3 +1642,4 @@ Zyta Szpak   

 Jayaprakash Shanmugam 
 Zhenning Xiao 
 Josh Hay 
+Madhu Chittim 
diff --git a/drivers/common/idpf/base/siov_regs.h 
b/drivers/common/idpf/base/siov_regs.h
index fad329601a..7e1ae2e300 100644
--- a/drivers/common/idpf/base/siov_regs.h
+++ b/drivers/common/idpf/base/siov_regs.h
@@ -4,16 +4,6 @@
 #ifndef _SIOV_REGS_H_
 #define _SIOV_REGS_H_
 #define VDEV_MBX_START 0x2 /* Begin at 128KB */
-#define VDEV_MBX_ATQBAL(VDEV_MBX_START + 0x)
-#define VDEV_MBX_ATQBAH(VDEV_MBX_START + 0x0004)
-#define VDEV_MBX_ATQLEN(VDEV_MBX_START + 0x0008)
-#define VDEV_MBX_ATQH  (VDEV_MBX_START + 0x000C)
-#define VDEV_MBX_ATQT  (VDEV_MBX_START + 0x0010)
-#define VDEV_MBX_ARQBAL(VDEV_MBX_START + 0x0014)
-#define VDEV_MBX_ARQBAH(VDEV_MBX_START + 0x0018)
-#define VDEV_MBX_ARQLEN(VDEV_MBX_START + 0x001C)
-#define VDEV_MBX_ARQH  (VDEV_MBX_START + 0x0020)
-#define VDEV_MBX_ARQT  (VDEV_MBX_START + 0x0024)
 #define VDEV_GET_RSTAT 0x21000 /* 132KB for RSTAT */
 
 /* Begin at offset after 1MB (after 256 4k pages) */
@@ -43,5 +33,6 @@
 #define VDEV_INT_ITR_1(_i) (VDEV_INT_DYN_START + ((_i) * 0x1000) + 
0x08)
 #define VDEV_INT_ITR_2(_i) (VDEV_INT_DYN_START + ((_i) * 0x1000) + 
0x0C)
 
-/* Next offset to begin at 42MB (0x2A0) */
+#define SIOV_REG_BAR_SIZE   0x2A0
+/* Next offset to begin at 42MB + 4K (0x2A0 + 0x1000) */
 #endif /* _SIOV_REGS_H_ */
-- 
2.25.1



[PATCH v4 04/18] common/idpf/base: initial PTP support

2023-09-17 Thread Simei Su
Adding a few PTP capabilities to determine which PTP features are
enabled - legacy cross time, ptm, device clock control, PTP Tx
timestamp with direct registers access, PTP Tx timestamp using
virtchnl messages.

Creating structures and opcodes to support feautres introduced by
capabilities.

Signed-off-by: Milena Olech 
Signed-off-by: Simei Su 
Acked-by: Beilei Xing 
---
 drivers/common/idpf/base/virtchnl2.h | 145 +++
 1 file changed, 145 insertions(+)

diff --git a/drivers/common/idpf/base/virtchnl2.h 
b/drivers/common/idpf/base/virtchnl2.h
index c49e4b943c..320430df6f 100644
--- a/drivers/common/idpf/base/virtchnl2.h
+++ b/drivers/common/idpf/base/virtchnl2.h
@@ -98,6 +98,9 @@
 #defineVIRTCHNL2_OP_ADD_QUEUE_GROUPS   538
 #defineVIRTCHNL2_OP_DEL_QUEUE_GROUPS   539
 #defineVIRTCHNL2_OP_GET_PORT_STATS 540
+   /* TimeSync opcodes */
+#defineVIRTCHNL2_OP_GET_PTP_CAPS   541
+#defineVIRTCHNL2_OP_GET_PTP_TX_TSTAMP_LATCHES  542
 
 #define VIRTCHNL2_RDMA_INVALID_QUEUE_IDX   0x
 
@@ -1395,6 +1398,112 @@ struct virtchnl2_promisc_info {
 
 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_promisc_info);
 
+/* VIRTCHNL2_PTP_CAPS
+ * PTP capabilities
+ */
+#define VIRTCHNL2_PTP_CAP_LEGACY_CROSS_TIMEBIT(0)
+#define VIRTCHNL2_PTP_CAP_PTM  BIT(1)
+#define VIRTCHNL2_PTP_CAP_DEVICE_CLOCK_CONTROL BIT(2)
+#define VIRTCHNL2_PTP_CAP_TX_TSTAMPS_DIRECTBIT(3)
+#defineVIRTCHNL2_PTP_CAP_TX_TSTAMPS_VIRTCHNL   BIT(4)
+
+/* Legacy cross time registers offsets */
+struct virtchnl2_ptp_legacy_cross_time_reg {
+   __le32 shadow_time_0;
+   __le32 shadow_time_l;
+   __le32 shadow_time_h;
+   __le32 cmd_sync;
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_legacy_cross_time_reg);
+
+/* PTM cross time registers offsets */
+struct virtchnl2_ptp_ptm_cross_time_reg {
+   __le32 art_l;
+   __le32 art_h;
+   __le32 cmd_sync;
+   u8 pad[4];
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_ptm_cross_time_reg);
+
+/* Registers needed to control the main clock */
+struct virtchnl2_ptp_device_clock_control {
+   __le32 cmd;
+   __le32 incval_l;
+   __le32 incval_h;
+   __le32 shadj_l;
+   __le32 shadj_h;
+   u8 pad[4];
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_ptp_device_clock_control);
+
+/* Structure that defines tx tstamp entry - index and register offset */
+struct virtchnl2_ptp_tx_tstamp_entry {
+   __le32 tx_latch_register_base;
+   __le32 tx_latch_register_offset;
+   u8 index;
+   u8 pad[7];
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_tx_tstamp_entry);
+
+/* Structure that defines tx tstamp entries - total number of latches
+ * and the array of entries.
+ */
+struct virtchnl2_ptp_tx_tstamp {
+   __le16 num_latches;
+   /* latch size expressed in bits */
+   __le16 latch_size;
+   u8 pad[4];
+   struct virtchnl2_ptp_tx_tstamp_entry ptp_tx_tstamp_entries[1];
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_ptp_tx_tstamp);
+
+/* VIRTCHNL2_OP_GET_PTP_CAPS
+ * PV/VF sends this message to negotiate PTP capabilities. CP updates bitmap
+ * with supported features and fulfills appropriate structures.
+ */
+struct virtchnl2_get_ptp_caps {
+   /* PTP capability bitmap */
+   /* see VIRTCHNL2_PTP_CAPS definitions */
+   __le32 ptp_caps;
+   u8 pad[4];
+
+   struct virtchnl2_ptp_legacy_cross_time_reg legacy_cross_time_reg;
+   struct virtchnl2_ptp_ptm_cross_time_reg ptm_cross_time_reg;
+   struct virtchnl2_ptp_device_clock_control device_clock_control;
+   struct virtchnl2_ptp_tx_tstamp tx_tstamp;
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(88, virtchnl2_get_ptp_caps);
+
+/* Structure that describes tx tstamp values, index and validity */
+struct virtchnl2_ptp_tx_tstamp_latch {
+   __le32 tstamp_h;
+   __le32 tstamp_l;
+   u8 index;
+   u8 valid;
+   u8 pad[6];
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_tx_tstamp_latch);
+
+/* VIRTCHNL2_OP_GET_PTP_TX_TSTAMP_LATCHES
+ * PF/VF sends this message to receive a specified number of timestamps
+ * entries.
+ */
+struct virtchnl2_ptp_tx_tstamp_latches {
+   __le16 num_latches;
+   /* latch size expressed in bits */
+   __le16 latch_size;
+   u8 pad[4];
+   struct virtchnl2_ptp_tx_tstamp_latch tstamp_latches[1];
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_ptp_tx_tstamp_latches);
 
 static inline const char *virtchnl2_op_str(__le32 v_opcode)
 {
@@ -1463,6 +1572,10 @@ static inline const char *virtchnl2_op_str(__le32 
v_opcode)
return "VIRTCHNL2_OP_DEL_QUEUE_GROUPS";
case VIRTCHNL2_OP_GET_PORT_STATS:
return "VIRTCHNL2_OP_GET_PORT_STATS";
+   case VIRTCHNL2_OP_GET_PTP_CAPS:
+   return "VIRTCHNL2_OP_GET_PTP_CAPS";
+   case VIRTCHNL2_OP_GET_PTP_TX_TSTAMP_L

[PATCH v4 06/18] common/idpf/base: add some adi specific fields

2023-09-17 Thread Simei Su
a) Add maximum ADI count in capabilities message
b) Add PF side ADI index to create_adi message
c) Define another constant to indicate 'Function active' state of ADI

Signed-off-by: Shailendra Bhatnagar 
Signed-off-by: Simei Su 
Acked-by: Beilei Xing 
---
 .mailmap | 1 +
 drivers/common/idpf/base/virtchnl2.h | 8 ++--
 2 files changed, 7 insertions(+), 2 deletions(-)

diff --git a/.mailmap b/.mailmap
index d8782cd67e..75d534c53d 100644
--- a/.mailmap
+++ b/.mailmap
@@ -1643,3 +1643,4 @@ Jayaprakash Shanmugam 
 Zhenning Xiao 
 Josh Hay 
 Madhu Chittim 
+Shailendra Bhatnagar 
diff --git a/drivers/common/idpf/base/virtchnl2.h 
b/drivers/common/idpf/base/virtchnl2.h
index 320430df6f..7a099f5148 100644
--- a/drivers/common/idpf/base/virtchnl2.h
+++ b/drivers/common/idpf/base/virtchnl2.h
@@ -294,6 +294,7 @@
 /* These messages are only sent to PF from CP */
 #define VIRTCHNL2_EVENT_START_RESET_ADI2
 #define VIRTCHNL2_EVENT_FINISH_RESET_ADI   3
+#define VIRTCHNL2_EVENT_ADI_ACTIVE 4
 
 /* VIRTCHNL2_QUEUE_TYPE
  * Transmit and Receive queue types are valid in legacy as well as split queue
@@ -547,7 +548,8 @@ struct virtchnl2_get_capabilities {
u8 max_sg_bufs_per_tx_pkt;
 
u8 reserved1;
-   __le16 pad1;
+   /* upper bound of number of ADIs supported */
+   __le16 max_adis;
 
/* version of Control Plane that is running */
__le16 oem_cp_ver_major;
@@ -1076,10 +1078,12 @@ struct virtchnl2_create_adi {
__le16 mbx_id;
/* PF sends mailbox vector id to CP */
__le16 mbx_vec_id;
+   /* PF populates this ADI index */
+   __le16 adi_index;
/* CP populates ADI id */
__le16 adi_id;
u8 reserved[64];
-   u8 pad[6];
+   u8 pad[4];
/* CP populates queue chunks */
struct virtchnl2_queue_reg_chunks chunks;
/* PF sends vector chunks to CP */
-- 
2.25.1



[PATCH v4 07/18] common/idpf/base: add necessary check

2023-09-17 Thread Simei Su
Add necessary check for payload and message buffer.

Signed-off-by: Julianx Grajkowski 
Signed-off-by: Simei Su 
Acked-by: Beilei Xing 
---
 .mailmap   | 1 +
 drivers/common/idpf/base/idpf_common.c | 6 --
 2 files changed, 5 insertions(+), 2 deletions(-)

diff --git a/.mailmap b/.mailmap
index 75d534c53d..23aed53102 100644
--- a/.mailmap
+++ b/.mailmap
@@ -1644,3 +1644,4 @@ Zhenning Xiao 
 Josh Hay 
 Madhu Chittim 
 Shailendra Bhatnagar 
+Julianx Grajkowski 
diff --git a/drivers/common/idpf/base/idpf_common.c 
b/drivers/common/idpf/base/idpf_common.c
index fbf71416fd..9610916aa9 100644
--- a/drivers/common/idpf/base/idpf_common.c
+++ b/drivers/common/idpf/base/idpf_common.c
@@ -239,8 +239,10 @@ int idpf_clean_arq_element(struct idpf_hw *hw,
e->desc.ret_val = msg.status;
e->desc.datalen = msg.data_len;
if (msg.data_len > 0) {
-   if (!msg.ctx.indirect.payload)
-   return -EINVAL;
+   if (!msg.ctx.indirect.payload || !msg.ctx.indirect.payload->va 
||
+   !e->msg_buf) {
+   return -EFAULT;
+   }
e->buf_len = msg.data_len;
msg_data_len = msg.data_len;
idpf_memcpy(e->msg_buf, msg.ctx.indirect.payload->va, 
msg_data_len,
-- 
2.25.1



[PATCH v4 08/18] common/idpf/base: add union for SW cookie fields in ctlq msg

2023-09-17 Thread Simei Su
Instead of using something like a byte offset, we can add a union to the
struct here to enable direct addressing.

Signed-off-by: Alan Brady 
Signed-off-by: Simei Su 
Acked-by: Beilei Xing 
---
 .mailmap | 1 +
 drivers/common/idpf/base/idpf_controlq_api.h | 5 +
 2 files changed, 6 insertions(+)

diff --git a/.mailmap b/.mailmap
index 23aed53102..2fcadb4e4c 100644
--- a/.mailmap
+++ b/.mailmap
@@ -1645,3 +1645,4 @@ Josh Hay 
 Madhu Chittim 
 Shailendra Bhatnagar 
 Julianx Grajkowski 
+Alan Brady 
diff --git a/drivers/common/idpf/base/idpf_controlq_api.h 
b/drivers/common/idpf/base/idpf_controlq_api.h
index 3780304256..f4e7b53ac9 100644
--- a/drivers/common/idpf/base/idpf_controlq_api.h
+++ b/drivers/common/idpf/base/idpf_controlq_api.h
@@ -77,6 +77,11 @@ struct idpf_ctlq_msg {
u8 context[IDPF_INDIRECT_CTX_SIZE];
struct idpf_dma_mem *payload;
} indirect;
+   struct {
+   u32 rsvd;
+   u16 data;
+   u16 flags;
+   } sw_cookie;
} ctx;
 };
 
-- 
2.25.1



[PATCH v4 09/18] common/idpf/base: define non-flexible size structure for ADI

2023-09-17 Thread Simei Su
Customer has a requirement to use the legacy fixed size, single chunk
structure for ADI creation - one chunk for queue and one chunk for vector.
This is described in detail in customer case
https://issuetracker.google.com/issues/270157802.

On the other hand, upstream code review patch has been posted with
flex-array definitions. To accommodate the old style, the single chunk
structures are being renamed so that merger of upstream patches with
current code does not impact the existing workflows of the customer.

a) Define virtchnl2_non_flex_queue_reg_chunks with a single chunk in it.
b) Define virtchnl2_non_flex_vector_chunks with a single chunk in it.
c) Rename and modify virtchnl2_create_adi to use the above 2 new structs.
New structure is virtchnl2_non_flex_create_adi.

Signed-off-by: Shailendra Bhatnagar 
Signed-off-by: Simei Su 
Acked-by: Beilei Xing 
---
 drivers/common/idpf/base/virtchnl2.h | 66 ++--
 1 file changed, 43 insertions(+), 23 deletions(-)

diff --git a/drivers/common/idpf/base/virtchnl2.h 
b/drivers/common/idpf/base/virtchnl2.h
index 7a099f5148..a19bb193c9 100644
--- a/drivers/common/idpf/base/virtchnl2.h
+++ b/drivers/common/idpf/base/virtchnl2.h
@@ -89,8 +89,8 @@
 * VIRTCHNL2_OP_GET_PTYPE_INFO_RAW
 */
/* opcodes 529, 530, and 531 are reserved */
-#defineVIRTCHNL2_OP_CREATE_ADI 532
-#defineVIRTCHNL2_OP_DESTROY_ADI533
+#defineVIRTCHNL2_OP_NON_FLEX_CREATE_ADI532
+#defineVIRTCHNL2_OP_NON_FLEX_DESTROY_ADI   533
 #defineVIRTCHNL2_OP_LOOPBACK   534
 #defineVIRTCHNL2_OP_ADD_MAC_ADDR   535
 #defineVIRTCHNL2_OP_DEL_MAC_ADDR   536
@@ -1061,14 +1061,34 @@ struct virtchnl2_sriov_vfs_info {
 
 VIRTCHNL2_CHECK_STRUCT_LEN(4, virtchnl2_sriov_vfs_info);
 
-/* VIRTCHNL2_OP_CREATE_ADI
+/* structure to specify single chunk of queue */
+/* 'chunks' is fixed size(not flexible) and will be deprecated at some point */
+struct virtchnl2_non_flex_queue_reg_chunks {
+   __le16 num_chunks;
+   u8 reserved[6];
+   struct virtchnl2_queue_reg_chunk chunks[1];
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(40, virtchnl2_non_flex_queue_reg_chunks);
+
+/* structure to specify single chunk of interrupt vector */
+/* 'vchunks' is fixed size(not flexible) and will be deprecated at some point 
*/
+struct virtchnl2_non_flex_vector_chunks {
+   __le16 num_vchunks;
+   u8 reserved[14];
+   struct virtchnl2_vector_chunk vchunks[1];
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(48, virtchnl2_non_flex_vector_chunks);
+
+/* VIRTCHNL2_OP_NON_FLEX_CREATE_ADI
  * PF sends this message to CP to create ADI by filling in required
- * fields of virtchnl2_create_adi structure.
- * CP responds with the updated virtchnl2_create_adi structure containing the
- * necessary fields followed by chunks which in turn will have an array of
+ * fields of virtchnl2_non_flex_create_adi structure.
+ * CP responds with the updated virtchnl2_non_flex_create_adi structure 
containing
+ * the necessary fields followed by chunks which in turn will have an array of
  * num_chunks entries of virtchnl2_queue_chunk structures.
  */
-struct virtchnl2_create_adi {
+struct virtchnl2_non_flex_create_adi {
/* PF sends PASID to CP */
__le32 pasid;
/*
@@ -1085,24 +1105,24 @@ struct virtchnl2_create_adi {
u8 reserved[64];
u8 pad[4];
/* CP populates queue chunks */
-   struct virtchnl2_queue_reg_chunks chunks;
+   struct virtchnl2_non_flex_queue_reg_chunks chunks;
/* PF sends vector chunks to CP */
-   struct virtchnl2_vector_chunks vchunks;
+   struct virtchnl2_non_flex_vector_chunks vchunks;
 };
 
-VIRTCHNL2_CHECK_STRUCT_LEN(168, virtchnl2_create_adi);
+VIRTCHNL2_CHECK_STRUCT_LEN(168, virtchnl2_non_flex_create_adi);
 
-/* VIRTCHNL2_OP_DESTROY_ADI
+/* VIRTCHNL2_OP_NON_FLEX_DESTROY_ADI
  * PF sends this message to CP to destroy ADI by filling
  * in the adi_id in virtchnl2_destropy_adi structure.
  * CP responds with the status of the requested operation.
  */
-struct virtchnl2_destroy_adi {
+struct virtchnl2_non_flex_destroy_adi {
__le16 adi_id;
u8 reserved[2];
 };
 
-VIRTCHNL2_CHECK_STRUCT_LEN(4, virtchnl2_destroy_adi);
+VIRTCHNL2_CHECK_STRUCT_LEN(4, virtchnl2_non_flex_destroy_adi);
 
 /* Based on the descriptor type the PF supports, CP fills ptype_id_10 or
  * ptype_id_8 for flex and base descriptor respectively. If ptype_id_10 value
@@ -1566,10 +1586,10 @@ static inline const char *virtchnl2_op_str(__le32 
v_opcode)
return "VIRTCHNL2_OP_EVENT";
case VIRTCHNL2_OP_RESET_VF:
return "VIRTCHNL2_OP_RESET_VF";
-   case VIRTCHNL2_OP_CREATE_ADI:
-   return "VIRTCHNL2_OP_CREATE_ADI";
-   case VIRTCHNL2_OP_DESTROY_ADI:
-

[PATCH v4 10/18] common/idpf/base: use local pointer before updating 'CQ out'

2023-09-17 Thread Simei Su
Instead of updating directly to 'cq_out' double pointer, use a
local pointer and update only when we return success.

Signed-off-by: Pavan Kumar Linga 
Signed-off-by: Simei Su 
Acked-by: Beilei Xing 
---
 drivers/common/idpf/base/idpf_controlq.c | 43 +---
 1 file changed, 23 insertions(+), 20 deletions(-)

diff --git a/drivers/common/idpf/base/idpf_controlq.c 
b/drivers/common/idpf/base/idpf_controlq.c
index 6815153e1d..b84a1ea046 100644
--- a/drivers/common/idpf/base/idpf_controlq.c
+++ b/drivers/common/idpf/base/idpf_controlq.c
@@ -137,6 +137,7 @@ int idpf_ctlq_add(struct idpf_hw *hw,
  struct idpf_ctlq_create_info *qinfo,
  struct idpf_ctlq_info **cq_out)
 {
+   struct idpf_ctlq_info *cq;
bool is_rxq = false;
int status = 0;
 
@@ -145,26 +146,26 @@ int idpf_ctlq_add(struct idpf_hw *hw,
qinfo->buf_size > IDPF_CTLQ_MAX_BUF_LEN)
return -EINVAL;
 
-   *cq_out = (struct idpf_ctlq_info *)
-   idpf_calloc(hw, 1, sizeof(struct idpf_ctlq_info));
-   if (!(*cq_out))
+   cq = (struct idpf_ctlq_info *)
+idpf_calloc(hw, 1, sizeof(struct idpf_ctlq_info));
+   if (!cq)
return -ENOMEM;
 
-   (*cq_out)->cq_type = qinfo->type;
-   (*cq_out)->q_id = qinfo->id;
-   (*cq_out)->buf_size = qinfo->buf_size;
-   (*cq_out)->ring_size = qinfo->len;
+   (cq)->cq_type = qinfo->type;
+   (cq)->q_id = qinfo->id;
+   (cq)->buf_size = qinfo->buf_size;
+   (cq)->ring_size = qinfo->len;
 
-   (*cq_out)->next_to_use = 0;
-   (*cq_out)->next_to_clean = 0;
-   (*cq_out)->next_to_post = (*cq_out)->ring_size - 1;
+   (cq)->next_to_use = 0;
+   (cq)->next_to_clean = 0;
+   (cq)->next_to_post = cq->ring_size - 1;
 
switch (qinfo->type) {
case IDPF_CTLQ_TYPE_MAILBOX_RX:
is_rxq = true;
/* fallthrough */
case IDPF_CTLQ_TYPE_MAILBOX_TX:
-   status = idpf_ctlq_alloc_ring_res(hw, *cq_out);
+   status = idpf_ctlq_alloc_ring_res(hw, cq);
break;
default:
status = -EINVAL;
@@ -175,33 +176,35 @@ int idpf_ctlq_add(struct idpf_hw *hw,
goto init_free_q;
 
if (is_rxq) {
-   idpf_ctlq_init_rxq_bufs(*cq_out);
+   idpf_ctlq_init_rxq_bufs(cq);
} else {
/* Allocate the array of msg pointers for TX queues */
-   (*cq_out)->bi.tx_msg = (struct idpf_ctlq_msg **)
+   cq->bi.tx_msg = (struct idpf_ctlq_msg **)
idpf_calloc(hw, qinfo->len,
sizeof(struct idpf_ctlq_msg *));
-   if (!(*cq_out)->bi.tx_msg) {
+   if (!cq->bi.tx_msg) {
status = -ENOMEM;
goto init_dealloc_q_mem;
}
}
 
-   idpf_ctlq_setup_regs(*cq_out, qinfo);
+   idpf_ctlq_setup_regs(cq, qinfo);
 
-   idpf_ctlq_init_regs(hw, *cq_out, is_rxq);
+   idpf_ctlq_init_regs(hw, cq, is_rxq);
 
-   idpf_init_lock(&(*cq_out)->cq_lock);
+   idpf_init_lock(&(cq->cq_lock));
 
-   LIST_INSERT_HEAD(&hw->cq_list_head, (*cq_out), cq_list);
+   LIST_INSERT_HEAD(&hw->cq_list_head, cq, cq_list);
 
+   *cq_out = cq;
return status;
 
 init_dealloc_q_mem:
/* free ring buffers and the ring itself */
-   idpf_ctlq_dealloc_ring_res(hw, *cq_out);
+   idpf_ctlq_dealloc_ring_res(hw, cq);
 init_free_q:
-   idpf_free(hw, *cq_out);
+   idpf_free(hw, cq);
+   cq = NULL;
 
return status;
 }
-- 
2.25.1



[PATCH v4 11/18] common/idpf/base: use 'void' return type

2023-09-17 Thread Simei Su
As idpf_ctlq_deinit always returns success, make it 'void' instead
of returning only success. This also changes the return type for
idpf_deinit_hw as 'void'.

Based on the upstream comments, explicit __le16 typecasting is not
necessary as CPU_TO_LE16 is already being used.

Signed-off-by: Pavan Kumar Linga 
Signed-off-by: Simei Su 
Acked-by: Beilei Xing 
---
 drivers/common/idpf/base/idpf_common.c   | 4 ++--
 drivers/common/idpf/base/idpf_controlq.c | 7 ++-
 drivers/common/idpf/base/idpf_controlq_api.h | 2 +-
 drivers/common/idpf/base/idpf_prototype.h| 2 +-
 4 files changed, 6 insertions(+), 9 deletions(-)

diff --git a/drivers/common/idpf/base/idpf_common.c 
b/drivers/common/idpf/base/idpf_common.c
index 9610916aa9..7181a7f14c 100644
--- a/drivers/common/idpf/base/idpf_common.c
+++ b/drivers/common/idpf/base/idpf_common.c
@@ -262,12 +262,12 @@ int idpf_clean_arq_element(struct idpf_hw *hw,
  *  idpf_deinit_hw - shutdown routine
  *  @hw: pointer to the hardware structure
  */
-int idpf_deinit_hw(struct idpf_hw *hw)
+void idpf_deinit_hw(struct idpf_hw *hw)
 {
hw->asq = NULL;
hw->arq = NULL;
 
-   return idpf_ctlq_deinit(hw);
+   idpf_ctlq_deinit(hw);
 }
 
 /**
diff --git a/drivers/common/idpf/base/idpf_controlq.c 
b/drivers/common/idpf/base/idpf_controlq.c
index b84a1ea046..7b12dfab18 100644
--- a/drivers/common/idpf/base/idpf_controlq.c
+++ b/drivers/common/idpf/base/idpf_controlq.c
@@ -75,7 +75,7 @@ static void idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)
desc->flags =
CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
desc->opcode = 0;
-   desc->datalen = (__le16)CPU_TO_LE16(bi->size);
+   desc->datalen = CPU_TO_LE16(bi->size);
desc->ret_val = 0;
desc->cookie_high = 0;
desc->cookie_low = 0;
@@ -264,16 +264,13 @@ int idpf_ctlq_init(struct idpf_hw *hw, u8 num_q,
  * idpf_ctlq_deinit - destroy all control queues
  * @hw: pointer to hw struct
  */
-int idpf_ctlq_deinit(struct idpf_hw *hw)
+void idpf_ctlq_deinit(struct idpf_hw *hw)
 {
struct idpf_ctlq_info *cq = NULL, *tmp = NULL;
-   int ret_code = 0;
 
LIST_FOR_EACH_ENTRY_SAFE(cq, tmp, &hw->cq_list_head,
 idpf_ctlq_info, cq_list)
idpf_ctlq_remove(hw, cq);
-
-   return ret_code;
 }
 
 /**
diff --git a/drivers/common/idpf/base/idpf_controlq_api.h 
b/drivers/common/idpf/base/idpf_controlq_api.h
index f4e7b53ac9..78a54f6b4c 100644
--- a/drivers/common/idpf/base/idpf_controlq_api.h
+++ b/drivers/common/idpf/base/idpf_controlq_api.h
@@ -205,6 +205,6 @@ int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw,
struct idpf_dma_mem **buffs);
 
 /* Will destroy all q including the default mb */
-int idpf_ctlq_deinit(struct idpf_hw *hw);
+void idpf_ctlq_deinit(struct idpf_hw *hw);
 
 #endif /* _IDPF_CONTROLQ_API_H_ */
diff --git a/drivers/common/idpf/base/idpf_prototype.h 
b/drivers/common/idpf/base/idpf_prototype.h
index 988ff00506..e2f090a9e3 100644
--- a/drivers/common/idpf/base/idpf_prototype.h
+++ b/drivers/common/idpf/base/idpf_prototype.h
@@ -20,7 +20,7 @@
 #define APF
 
 int idpf_init_hw(struct idpf_hw *hw, struct idpf_ctlq_size ctlq_size);
-int idpf_deinit_hw(struct idpf_hw *hw);
+void idpf_deinit_hw(struct idpf_hw *hw);
 
 int idpf_clean_arq_element(struct idpf_hw *hw,
   struct idpf_arq_event_info *e,
-- 
2.25.1



[PATCH v4 12/18] common/idpf/base: refactor descriptor 'ret val' stripping

2023-09-17 Thread Simei Su
Conditional check is not necessary to strip and get status bits
from the descriptor.

Signed-off-by: Pavan Kumar Linga 
Signed-off-by: Simei Su 
Acked-by: Beilei Xing 
---
 drivers/common/idpf/base/idpf_controlq.c | 7 ++-
 1 file changed, 2 insertions(+), 5 deletions(-)

diff --git a/drivers/common/idpf/base/idpf_controlq.c 
b/drivers/common/idpf/base/idpf_controlq.c
index 7b12dfab18..da5c930578 100644
--- a/drivers/common/idpf/base/idpf_controlq.c
+++ b/drivers/common/idpf/base/idpf_controlq.c
@@ -426,11 +426,8 @@ static int __idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, 
u16 *clean_count,
if (!force && !(LE16_TO_CPU(desc->flags) & IDPF_CTLQ_FLAG_DD))
break;
 
-   desc_err = LE16_TO_CPU(desc->ret_val);
-   if (desc_err) {
-   /* strip off FW internal code */
-   desc_err &= 0xff;
-   }
+   /* strip off FW internal code */
+   desc_err = LE16_TO_CPU(desc->ret_val) & 0xff;
 
msg_status[i] = cq->bi.tx_msg[ntc];
if (!msg_status[i])
-- 
2.25.1



[PATCH v4 13/18] common/idpf/base: refine comments and alignment

2023-09-17 Thread Simei Su
Refine the macros and definitions by using 'tab' spaces and new
lines wherever necessary. Also refine the comment in
'idpf_ctlq_setup_regs' and remove the TODO comment in idpf_rss_hash
enum as it doesn't make any sense.

Signed-off-by: Pavan Kumar Linga 
Signed-off-by: Simei Su 
Acked-by: Beilei Xing 
---
 drivers/common/idpf/base/idpf_controlq.c |  2 +-
 drivers/common/idpf/base/idpf_controlq_api.h | 10 +
 drivers/common/idpf/base/idpf_lan_pf_regs.h  |  7 +--
 drivers/common/idpf/base/idpf_lan_txrx.h | 47 +---
 drivers/common/idpf/base/idpf_lan_vf_regs.h  | 25 +++
 5 files changed, 46 insertions(+), 45 deletions(-)

diff --git a/drivers/common/idpf/base/idpf_controlq.c 
b/drivers/common/idpf/base/idpf_controlq.c
index da5c930578..c24bfd23ef 100644
--- a/drivers/common/idpf/base/idpf_controlq.c
+++ b/drivers/common/idpf/base/idpf_controlq.c
@@ -13,7 +13,7 @@ static void
 idpf_ctlq_setup_regs(struct idpf_ctlq_info *cq,
 struct idpf_ctlq_create_info *q_create_info)
 {
-   /* set head and tail registers in our local struct */
+   /* set control queue registers in our local struct */
cq->reg.head = q_create_info->reg.head;
cq->reg.tail = q_create_info->reg.tail;
cq->reg.len = q_create_info->reg.len;
diff --git a/drivers/common/idpf/base/idpf_controlq_api.h 
b/drivers/common/idpf/base/idpf_controlq_api.h
index 78a54f6b4c..38f5d2df3c 100644
--- a/drivers/common/idpf/base/idpf_controlq_api.h
+++ b/drivers/common/idpf/base/idpf_controlq_api.h
@@ -21,10 +21,7 @@ enum idpf_ctlq_type {
IDPF_CTLQ_TYPE_RDMA_COMPL   = 7
 };
 
-/*
- * Generic Control Queue Structures
- */
-
+/* Generic Control Queue Structures */
 struct idpf_ctlq_reg {
/* used for queue tracking */
u32 head;
@@ -157,10 +154,7 @@ enum idpf_mbx_opc {
idpf_mbq_opc_send_msg_to_peer_drv   = 0x0804,
 };
 
-/*
- * API supported for control queue management
- */
-
+/* API supported for control queue management */
 /* Will init all required q including default mb.  "q_info" is an array of
  * create_info structs equal to the number of control queues to be created.
  */
diff --git a/drivers/common/idpf/base/idpf_lan_pf_regs.h 
b/drivers/common/idpf/base/idpf_lan_pf_regs.h
index 8542620e01..e47afad6e9 100644
--- a/drivers/common/idpf/base/idpf_lan_pf_regs.h
+++ b/drivers/common/idpf/base/idpf_lan_pf_regs.h
@@ -80,10 +80,11 @@
 /* _ITR is ITR index, _INT is interrupt index, _itrn_indx_spacing is
  * spacing b/w itrn registers of the same vector.
  */
-#define PF_GLINT_ITR_ADDR(_ITR, _reg_start, _itrn_indx_spacing) \
-   ((_reg_start) + (((_ITR)) * (_itrn_indx_spacing)))
+#define PF_GLINT_ITR_ADDR(_ITR, _reg_start, _itrn_indx_spacing)
\
+   ((_reg_start) + ((_ITR) * (_itrn_indx_spacing)))
 /* For PF, itrn_indx_spacing is 4 and itrn_reg_spacing is 0x1000 */
-#define PF_GLINT_ITR(_ITR, _INT) (PF_GLINT_BASE + (((_ITR) + 1) * 4) + ((_INT) 
* 0x1000))
+#define PF_GLINT_ITR(_ITR, _INT)   \
+   (PF_GLINT_BASE + (((_ITR) + 1) * 4) + ((_INT) * 0x1000))
 #define PF_GLINT_ITR_MAX_INDEX 2
 #define PF_GLINT_ITR_INTERVAL_S0
 #define PF_GLINT_ITR_INTERVAL_MIDPF_M(0xFFF, 
PF_GLINT_ITR_INTERVAL_S)
diff --git a/drivers/common/idpf/base/idpf_lan_txrx.h 
b/drivers/common/idpf/base/idpf_lan_txrx.h
index 7b03693eb1..4951e266f0 100644
--- a/drivers/common/idpf/base/idpf_lan_txrx.h
+++ b/drivers/common/idpf/base/idpf_lan_txrx.h
@@ -8,9 +8,9 @@
 #include "idpf_osdep.h"
 
 enum idpf_rss_hash {
-   /* Values 0 - 28 are reserved for future use */
-   IDPF_HASH_INVALID   = 0,
-   IDPF_HASH_NONF_UNICAST_IPV4_UDP = 29,
+   IDPF_HASH_INVALID   = 0,
+   /* Values 1 - 28 are reserved for future use */
+   IDPF_HASH_NONF_UNICAST_IPV4_UDP = 29,
IDPF_HASH_NONF_MULTICAST_IPV4_UDP,
IDPF_HASH_NONF_IPV4_UDP,
IDPF_HASH_NONF_IPV4_TCP_SYN_NO_ACK,
@@ -19,7 +19,7 @@ enum idpf_rss_hash {
IDPF_HASH_NONF_IPV4_OTHER,
IDPF_HASH_FRAG_IPV4,
/* Values 37-38 are reserved */
-   IDPF_HASH_NONF_UNICAST_IPV6_UDP = 39,
+   IDPF_HASH_NONF_UNICAST_IPV6_UDP = 39,
IDPF_HASH_NONF_MULTICAST_IPV6_UDP,
IDPF_HASH_NONF_IPV6_UDP,
IDPF_HASH_NONF_IPV6_TCP_SYN_NO_ACK,
@@ -32,34 +32,31 @@ enum idpf_rss_hash {
IDPF_HASH_NONF_FCOE_RX,
IDPF_HASH_NONF_FCOE_OTHER,
/* Values 51-62 are reserved */
-   IDPF_HASH_L2_PAYLOAD= 63,
+   IDPF_HASH_L2_PAYLOAD= 63,
+
IDPF_HASH_MAX
 };
 
 /* Supported RSS offloads */
-#define IDPF_DEFAULT_RSS_HASH ( \
-   BIT_ULL(IDPF_HASH_NONF_IPV4_UDP) | \
-   BIT_ULL(IDPF_HASH_NONF_IPV4_SCTP) | \
-   BIT_ULL(IDPF_HASH_NONF_IPV4_TCP) | \
-   BIT_ULL(IDPF_HASH_NONF_IPV4_OTHER) | \
-   BIT_ULL(IDPF_HASH_FRAG_IPV4) |

[PATCH v4 14/18] common/idpf/base: use GENMASK macro

2023-09-17 Thread Simei Su
Instead of using a custom defined macro for generating a mask,
use the standard GENMASK macro.

Signed-off-by: Pavan Kumar Linga 
Signed-off-by: Simei Su 
Acked-by: Beilei Xing 
---
 drivers/common/idpf/base/idpf_lan_pf_regs.h |  26 ++---
 drivers/common/idpf/base/idpf_lan_txrx.h| 116 +---
 drivers/common/idpf/base/idpf_lan_vf_regs.h |  16 +--
 drivers/common/idpf/base/idpf_osdep.h   |   7 ++
 4 files changed, 80 insertions(+), 85 deletions(-)

diff --git a/drivers/common/idpf/base/idpf_lan_pf_regs.h 
b/drivers/common/idpf/base/idpf_lan_pf_regs.h
index e47afad6e9..b9d82592c0 100644
--- a/drivers/common/idpf/base/idpf_lan_pf_regs.h
+++ b/drivers/common/idpf/base/idpf_lan_pf_regs.h
@@ -24,7 +24,7 @@
 #define PF_FW_ARQBAH   (PF_FW_BASE + 0x4)
 #define PF_FW_ARQLEN   (PF_FW_BASE + 0x8)
 #define PF_FW_ARQLEN_ARQLEN_S  0
-#define PF_FW_ARQLEN_ARQLEN_M  IDPF_M(0x1FFF, PF_FW_ARQLEN_ARQLEN_S)
+#define PF_FW_ARQLEN_ARQLEN_M  GENMASK(12, 0)
 #define PF_FW_ARQLEN_ARQVFE_S  28
 #define PF_FW_ARQLEN_ARQVFE_M  BIT(PF_FW_ARQLEN_ARQVFE_S)
 #define PF_FW_ARQLEN_ARQOVFL_S 29
@@ -35,14 +35,14 @@
 #define PF_FW_ARQLEN_ARQENABLE_M   BIT(PF_FW_ARQLEN_ARQENABLE_S)
 #define PF_FW_ARQH (PF_FW_BASE + 0xC)
 #define PF_FW_ARQH_ARQH_S  0
-#define PF_FW_ARQH_ARQH_M  IDPF_M(0x1FFF, PF_FW_ARQH_ARQH_S)
+#define PF_FW_ARQH_ARQH_M  GENMASK(12, 0)
 #define PF_FW_ARQT (PF_FW_BASE + 0x10)
 
 #define PF_FW_ATQBAL   (PF_FW_BASE + 0x14)
 #define PF_FW_ATQBAH   (PF_FW_BASE + 0x18)
 #define PF_FW_ATQLEN   (PF_FW_BASE + 0x1C)
 #define PF_FW_ATQLEN_ATQLEN_S  0
-#define PF_FW_ATQLEN_ATQLEN_M  IDPF_M(0x3FF, PF_FW_ATQLEN_ATQLEN_S)
+#define PF_FW_ATQLEN_ATQLEN_M  GENMASK(9, 0)
 #define PF_FW_ATQLEN_ATQVFE_S  28
 #define PF_FW_ATQLEN_ATQVFE_M  BIT(PF_FW_ATQLEN_ATQVFE_S)
 #define PF_FW_ATQLEN_ATQOVFL_S 29
@@ -53,7 +53,7 @@
 #define PF_FW_ATQLEN_ATQENABLE_M   BIT(PF_FW_ATQLEN_ATQENABLE_S)
 #define PF_FW_ATQH (PF_FW_BASE + 0x20)
 #define PF_FW_ATQH_ATQH_S  0
-#define PF_FW_ATQH_ATQH_M  IDPF_M(0x3FF, PF_FW_ATQH_ATQH_S)
+#define PF_FW_ATQH_ATQH_M  GENMASK(9, 0)
 #define PF_FW_ATQT (PF_FW_BASE + 0x24)
 
 /* Interrupts */
@@ -66,7 +66,7 @@
 #define PF_GLINT_DYN_CTL_SWINT_TRIG_S  2
 #define PF_GLINT_DYN_CTL_SWINT_TRIG_M  BIT(PF_GLINT_DYN_CTL_SWINT_TRIG_S)
 #define PF_GLINT_DYN_CTL_ITR_INDX_S3
-#define PF_GLINT_DYN_CTL_ITR_INDX_MIDPF_M(0x3, PF_GLINT_DYN_CTL_ITR_INDX_S)
+#define PF_GLINT_DYN_CTL_ITR_INDX_MGENMASK(4, 3)
 #define PF_GLINT_DYN_CTL_INTERVAL_S5
 #define PF_GLINT_DYN_CTL_INTERVAL_MBIT(PF_GLINT_DYN_CTL_INTERVAL_S)
 #define PF_GLINT_DYN_CTL_SW_ITR_INDX_ENA_S 24
@@ -87,13 +87,13 @@
(PF_GLINT_BASE + (((_ITR) + 1) * 4) + ((_INT) * 0x1000))
 #define PF_GLINT_ITR_MAX_INDEX 2
 #define PF_GLINT_ITR_INTERVAL_S0
-#define PF_GLINT_ITR_INTERVAL_MIDPF_M(0xFFF, 
PF_GLINT_ITR_INTERVAL_S)
+#define PF_GLINT_ITR_INTERVAL_MGENMASK(11, 0)
 
 /* Timesync registers */
 #define PF_TIMESYNC_BASE   0x08404000
 #define PF_GLTSYN_CMD_SYNC (PF_TIMESYNC_BASE)
 #define PF_GLTSYN_CMD_SYNC_EXEC_CMD_S  0
-#define PF_GLTSYN_CMD_SYNC_EXEC_CMD_M  IDPF_M(0x3, 
PF_GLTSYN_CMD_SYNC_EXEC_CMD_S)
+#define PF_GLTSYN_CMD_SYNC_EXEC_CMD_M  GENMASK(1, 0)
 #define PF_GLTSYN_CMD_SYNC_SHTIME_EN_S 2
 #define PF_GLTSYN_CMD_SYNC_SHTIME_EN_M BIT(PF_GLTSYN_CMD_SYNC_SHTIME_EN_S)
 #define PF_GLTSYN_SHTIME_0 (PF_TIMESYNC_BASE + 0x4)
@@ -105,23 +105,23 @@
 /* Generic registers */
 #define PF_INT_DIR_OICR_ENA0x08406000
 #define PF_INT_DIR_OICR_ENA_S  0
-#define PF_INT_DIR_OICR_ENA_M  IDPF_M(0x, PF_INT_DIR_OICR_ENA_S)
+#define PF_INT_DIR_OICR_ENA_M  GENMASK(31, 0)
 #define PF_INT_DIR_OICR0x08406004
 #define PF_INT_DIR_OICR_TSYN_EVNT  0
 #define PF_INT_DIR_OICR_PHY_TS_0   BIT(1)
 #define PF_INT_DIR_OICR_PHY_TS_1   BIT(2)
 #define PF_INT_DIR_OICR_CAUSE  0x08406008
 #define PF_INT_DIR_OICR_CAUSE_CAUSE_S  0
-#define PF_INT_DIR_OICR_CAUSE_CAUSE_M  IDPF_M(0x, 
PF_INT_DIR_OICR_CAUSE_CAUSE_S)
+#define PF_INT_DIR_OICR_CAUSE_CAUSE_M  GENMASK(31, 0)
 #define PF_INT_PBA_CLEAR   0x0840600C
 
 #define PF_FUNC_RID0x08406010
 #define PF_FUNC_RID_FUNCTION_NUMBER_S  0
-#define PF_FUNC_RID_FUNCTION_NUMBER_M  IDPF_M(0x7, 
PF_FUNC_RID_FUNCTION_NUMBER_S)
+#define PF_FUNC_RID_FUNCTION_NUMBER_M  GENMASK(2, 0)
 #define PF_FUNC_RID_DEVICE_NUMBER_S3
-#define PF_FUNC_RID_DEVICE_NUMBER_MIDPF_M(0x1F, 
PF_FUNC_RID_DEVICE_NUMBER_S)
+#define PF_FUNC_RID_DEVICE_NUMBER_MGENMASK(7, 3)
 #define PF_FUNC_RID_BUS_NUMBER_S   8
-#define

[PATCH v4 16/18] common/idpf/base: don't declare union with 'flex'

2023-09-17 Thread Simei Su
In idpf_flex_tx_desc structure, instead of naming the union with 'flex',
use no name union as the union name is not really necessary there. This
reduces the level of indirection in the hotpath.

Signed-off-by: Pavan Kumar Linga 
Signed-off-by: Simei Su 
Acked-by: Beilei Xing 
---
 drivers/common/idpf/base/idpf_lan_txrx.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/common/idpf/base/idpf_lan_txrx.h 
b/drivers/common/idpf/base/idpf_lan_txrx.h
index f213c49e47..1e19aeafac 100644
--- a/drivers/common/idpf/base/idpf_lan_txrx.h
+++ b/drivers/common/idpf/base/idpf_lan_txrx.h
@@ -226,11 +226,11 @@ enum idpf_tx_flex_desc_cmd_bits {
 struct idpf_flex_tx_desc {
__le64 buf_addr;/* Packet buffer address */
struct {
-   __le16 cmd_dtype;
 #define IDPF_FLEX_TXD_QW1_DTYPE_S  0
 #define IDPF_FLEX_TXD_QW1_DTYPE_M  GENMASK(4, 0)
 #define IDPF_FLEX_TXD_QW1_CMD_S5
 #define IDPF_FLEX_TXD_QW1_CMD_MGENMASK(15, 5)
+   __le16 cmd_dtype;
union {
/* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_DATA_(0x03) */
u8 raw[4];
@@ -247,7 +247,7 @@ struct idpf_flex_tx_desc {
__le16 l2tag1;
__le16 l2tag2;
} l2tags;
-   } flex;
+   };
__le16 buf_size;
} qw1;
 };
-- 
2.25.1



[PATCH v4 15/18] common/idpf/base: use 'type functionname(args)' style

2023-09-17 Thread Simei Su
Instead of splitting the function name and function type into
multiple lines, use then in a single line.

Signed-off-by: Pavan Kumar Linga 
Signed-off-by: Simei Su 
Acked-by: Beilei Xing 
---
 drivers/common/idpf/base/idpf_controlq.c   | 5 ++---
 drivers/common/idpf/base/idpf_controlq_setup.c | 5 ++---
 2 files changed, 4 insertions(+), 6 deletions(-)

diff --git a/drivers/common/idpf/base/idpf_controlq.c 
b/drivers/common/idpf/base/idpf_controlq.c
index c24bfd23ef..07bbec91b9 100644
--- a/drivers/common/idpf/base/idpf_controlq.c
+++ b/drivers/common/idpf/base/idpf_controlq.c
@@ -9,9 +9,8 @@
  * @cq: pointer to the specific control queue
  * @q_create_info: structs containing info for each queue to be initialized
  */
-static void
-idpf_ctlq_setup_regs(struct idpf_ctlq_info *cq,
-struct idpf_ctlq_create_info *q_create_info)
+static void idpf_ctlq_setup_regs(struct idpf_ctlq_info *cq,
+struct idpf_ctlq_create_info *q_create_info)
 {
/* set control queue registers in our local struct */
cq->reg.head = q_create_info->reg.head;
diff --git a/drivers/common/idpf/base/idpf_controlq_setup.c 
b/drivers/common/idpf/base/idpf_controlq_setup.c
index 0f1b52a7e9..21f43c74f5 100644
--- a/drivers/common/idpf/base/idpf_controlq_setup.c
+++ b/drivers/common/idpf/base/idpf_controlq_setup.c
@@ -11,9 +11,8 @@
  * @hw: pointer to hw struct
  * @cq: pointer to the specific Control queue
  */
-static int
-idpf_ctlq_alloc_desc_ring(struct idpf_hw *hw,
- struct idpf_ctlq_info *cq)
+static int idpf_ctlq_alloc_desc_ring(struct idpf_hw *hw,
+struct idpf_ctlq_info *cq)
 {
size_t size = cq->ring_size * sizeof(struct idpf_ctlq_desc);
 
-- 
2.25.1



[PATCH v4 17/18] common/idpf/base: remove unused Tx descriptor types

2023-09-17 Thread Simei Su
Remove the unused TX descriptor types and mark them as reserved.

Signed-off-by: Pavan Kumar Linga 
Signed-off-by: Simei Su 
Acked-by: Beilei Xing 
---
 drivers/common/idpf/base/idpf_lan_txrx.h | 132 ++-
 1 file changed, 10 insertions(+), 122 deletions(-)

diff --git a/drivers/common/idpf/base/idpf_lan_txrx.h 
b/drivers/common/idpf/base/idpf_lan_txrx.h
index 1e19aeafac..5bc4271584 100644
--- a/drivers/common/idpf/base/idpf_lan_txrx.h
+++ b/drivers/common/idpf/base/idpf_lan_txrx.h
@@ -120,19 +120,19 @@ enum idpf_rss_hash {
 enum idpf_tx_desc_dtype_value {
IDPF_TX_DESC_DTYPE_DATA = 0,
IDPF_TX_DESC_DTYPE_CTX  = 1,
-   IDPF_TX_DESC_DTYPE_REINJECT_CTX = 2,
-   IDPF_TX_DESC_DTYPE_FLEX_DATA= 3,
-   IDPF_TX_DESC_DTYPE_FLEX_CTX = 4,
+   /* DTYPE 2 is reserved
+* DTYPE 3 is free for future use
+* DTYPE 4 is reserved
+*/
IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX = 5,
-   IDPF_TX_DESC_DTYPE_FLEX_TSYN_L2TAG1 = 6,
+   /* DTYPE 6 is reserved */
IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2   = 7,
-   IDPF_TX_DESC_DTYPE_FLEX_TSO_L2TAG2_PARSTAG_CTX  = 8,
-   IDPF_TX_DESC_DTYPE_FLEX_HOSTSPLIT_SA_TSO_CTX= 9,
-   IDPF_TX_DESC_DTYPE_FLEX_HOSTSPLIT_SA_CTX= 10,
-   IDPF_TX_DESC_DTYPE_FLEX_L2TAG2_CTX  = 11,
+   /* DTYPE 8, 9 are free for future use
+* DTYPE 10 is reserved
+* DTYPE 11 is free for future use
+*/
IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE   = 12,
-   IDPF_TX_DESC_DTYPE_FLEX_HOSTSPLIT_TSO_CTX   = 13,
-   IDPF_TX_DESC_DTYPE_FLEX_HOSTSPLIT_CTX   = 14,
+   /* DTYPE 13, 14 are free for future use */
/* DESC_DONE - HW has completed write-back of descriptor */
IDPF_TX_DESC_DTYPE_DESC_DONE= 15,
 };
@@ -232,16 +232,6 @@ struct idpf_flex_tx_desc {
 #define IDPF_FLEX_TXD_QW1_CMD_MGENMASK(15, 5)
__le16 cmd_dtype;
union {
-   /* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_DATA_(0x03) */
-   u8 raw[4];
-
-   /* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_TSYN_L2TAG1 (0x06) */
-   struct {
-   __le16 l2tag1;
-   u8 flex;
-   u8 tsync;
-   } tsync;
-
/* DTYPE=IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2 (0x07) */
struct {
__le16 l2tag1;
@@ -297,16 +287,6 @@ struct idpf_flex_tx_tso_ctx_qw {
 };
 
 union idpf_flex_tx_ctx_desc {
-   /* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_CTX (0x04) */
-   struct {
-   u8 qw0_flex[8];
-   struct {
-   __le16 cmd_dtype;
-   __le16 l2tag1;
-   u8 qw1_flex[4];
-   } qw1;
-   } gen;
-
/* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX (0x05) */
struct {
struct idpf_flex_tx_tso_ctx_qw qw0;
@@ -315,98 +295,6 @@ union idpf_flex_tx_ctx_desc {
u8 flex[6];
} qw1;
} tso;
-
-   /* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_TSO_L2TAG2_PARSTAG_CTX (0x08) */
-   struct {
-   struct idpf_flex_tx_tso_ctx_qw qw0;
-   struct {
-   __le16 cmd_dtype;
-   __le16 l2tag2;
-   u8 flex0;
-   u8 ptag;
-   u8 flex1[2];
-   } qw1;
-   } tso_l2tag2_ptag;
-
-   /* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_L2TAG2_CTX (0x0B) */
-   struct {
-   u8 qw0_flex[8];
-   struct {
-   __le16 cmd_dtype;
-   __le16 l2tag2;
-   u8 flex[4];
-   } qw1;
-   } l2tag2;
-
-   /* DTYPE = IDPF_TX_DESC_DTYPE_REINJECT_CTX (0x02) */
-   struct {
-   struct {
-   __le32 sa_domain;
-#define IDPF_TXD_FLEX_CTX_SA_DOM_M 0x
-#define IDPF_TXD_FLEX_CTX_SA_DOM_VAL   0x1
-   __le32 sa_idx;
-#define IDPF_TXD_FLEX_CTX_SAIDX_M  0x1F
-   } qw0;
-   struct {
-   __le16 cmd_dtype;
-   __le16 txr2comp;
-#define IDPF_TXD_FLEX_CTX_TXR2COMP 0x1
-   __le16 miss_txq_comp_tag;
-   __le16 miss_txq_id;
-   } qw1;
-   } reinjection_pkt;
 };
 
-/* Host Split Context Descriptors */
-struct idpf_flex_tx_hs_ctx_desc {
-   union {
-   struct {
-   __le32 host_fnum_tlen;
-#define IDPF_TXD_FLEX_CTX_TLEN_S   0
-/* see IDPF_TXD_FLEX_CTX_TLEN_M for mask definition */
-#define IDPF_TXD_FLEX_CTX_FNUM_S   18
-#define

[PATCH v4 18/18] common/idpf/base: update version

2023-09-17 Thread Simei Su
Update README

Signed-off-by: Simei Su 
Acked-by: Beilei Xing 
---
 drivers/common/idpf/base/README | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/common/idpf/base/README b/drivers/common/idpf/base/README
index 693049c057..ff26f736ec 100644
--- a/drivers/common/idpf/base/README
+++ b/drivers/common/idpf/base/README
@@ -6,7 +6,7 @@ Intel® IDPF driver
 ==
 
 This directory contains source code of BSD-3-Clause idpf driver of version
-2023.02.23 released by the team which develops basic drivers for Intel IPU.
+2023.07.25 released by the team which develops basic drivers for Intel IPU.
 The directory of base/ contains the original source package.
 This driver is valid for the product(s) listed below
 
-- 
2.25.1



[PATCH v5 00/11] update idpf base code

2023-09-19 Thread Simei Su
Currently, single queue Tx data path uses flex Tx data
descriptor(DTYPE3) which is removed in the latest idpf spec.
[PATCH v5 01/11] replaces flex Tx data descriptor with base Tx data
descriptor for single queue Tx data path and refines Tx single
queue setup to align with Tx data path.

[PATCH v5 02/11]~[PATCH v5 11/11] update idpf base code based on
[PATCH v5 01/11].

v5:
* Refine commit title and commit log.
* Combine several patches into one patch.

v4:
* Put single queue Tx data path refactor patch in this patchset.

v3:
* Fix coding style issue.
* Modify unexpected error in the update version patch.

v2:
* Add two patches for share code update.
* Add version update.
* Fix coding style issue.

Simei Su (11):
  common/idpf: refactor single queue Tx data path
  common/idpf/base: enable support for physical port stats
  common/idpf/base: add miss completion capabilities
  common/idpf/base: initialize PTP support
  common/idpf/base: remove mailbox registers
  common/idpf/base: refine structure and necessary check
  common/idpf/base: add union for SW cookie fields
  common/idpf/base: refine code and alignments
  common/idpf/base: use GENMASK macro
  common/idpf/base: remove unused Tx descriptor types
  common/idpf/base: update version

 .mailmap  |   7 +
 drivers/common/idpf/base/README   |   2 +-
 drivers/common/idpf/base/idpf_common.c|  10 +-
 drivers/common/idpf/base/idpf_controlq.c  |  64 ++--
 drivers/common/idpf/base/idpf_controlq_api.h  |  17 +-
 .../common/idpf/base/idpf_controlq_setup.c|   5 +-
 drivers/common/idpf/base/idpf_lan_pf_regs.h   |  33 +-
 drivers/common/idpf/base/idpf_lan_txrx.h  | 284 +
 drivers/common/idpf/base/idpf_lan_vf_regs.h   |  41 ++-
 drivers/common/idpf/base/idpf_osdep.h |   7 +
 drivers/common/idpf/base/idpf_prototype.h |   2 +-
 drivers/common/idpf/base/siov_regs.h  |  13 +-
 drivers/common/idpf/base/virtchnl2.h  | 301 --
 drivers/common/idpf/idpf_common_rxtx.c|  39 ++-
 drivers/common/idpf/idpf_common_rxtx.h|   2 +-
 drivers/common/idpf/idpf_common_rxtx_avx512.c |  37 ++-
 drivers/net/cpfl/cpfl_rxtx.c  |   2 +-
 drivers/net/idpf/idpf_rxtx.c  |   2 +-
 18 files changed, 500 insertions(+), 368 deletions(-)

-- 
2.25.1



[PATCH v5 01/11] common/idpf: refactor single queue Tx data path

2023-09-19 Thread Simei Su
Currently, single queue Tx data path uses flex Tx data
descriptor(DTYPE3) which is removed in the latest idpf spec.
This patch replaces flex Tx data descriptor with base Tx data
descriptor for single queue Tx data path and refines Tx single
queue setup to align with Tx data path.

Signed-off-by: Simei Su 
Acked-by: Wenjun Wu 
Acked-by: Beilei Xing 
---
 drivers/common/idpf/idpf_common_rxtx.c| 39 +--
 drivers/common/idpf/idpf_common_rxtx.h|  2 +-
 drivers/common/idpf/idpf_common_rxtx_avx512.c | 37 +-
 drivers/net/cpfl/cpfl_rxtx.c  |  2 +-
 drivers/net/idpf/idpf_rxtx.c  |  2 +-
 5 files changed, 40 insertions(+), 42 deletions(-)

diff --git a/drivers/common/idpf/idpf_common_rxtx.c 
b/drivers/common/idpf/idpf_common_rxtx.c
index fc87e3e243..e6d2486272 100644
--- a/drivers/common/idpf/idpf_common_rxtx.c
+++ b/drivers/common/idpf/idpf_common_rxtx.c
@@ -276,14 +276,14 @@ idpf_qc_single_tx_queue_reset(struct idpf_tx_queue *txq)
}
 
txe = txq->sw_ring;
-   size = sizeof(struct idpf_flex_tx_desc) * txq->nb_tx_desc;
+   size = sizeof(struct idpf_base_tx_desc) * txq->nb_tx_desc;
for (i = 0; i < size; i++)
((volatile char *)txq->tx_ring)[i] = 0;
 
prev = (uint16_t)(txq->nb_tx_desc - 1);
for (i = 0; i < txq->nb_tx_desc; i++) {
-   txq->tx_ring[i].qw1.cmd_dtype =
-   rte_cpu_to_le_16(IDPF_TX_DESC_DTYPE_DESC_DONE);
+   txq->tx_ring[i].qw1 =
+   rte_cpu_to_le_64(IDPF_TX_DESC_DTYPE_DESC_DONE);
txe[i].mbuf =  NULL;
txe[i].last_id = i;
txe[prev].next_id = i;
@@ -1307,17 +1307,16 @@ idpf_xmit_cleanup(struct idpf_tx_queue *txq)
uint16_t nb_tx_to_clean;
uint16_t i;
 
-   volatile struct idpf_flex_tx_desc *txd = txq->tx_ring;
+   volatile struct idpf_base_tx_desc *txd = txq->tx_ring;
 
desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->rs_thresh);
if (desc_to_clean_to >= nb_tx_desc)
desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
 
desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
-   /* In the writeback Tx desccriptor, the only significant fields are the 
4-bit DTYPE */
-   if ((txd[desc_to_clean_to].qw1.cmd_dtype &
-rte_cpu_to_le_16(IDPF_TXD_QW1_DTYPE_M)) !=
-   rte_cpu_to_le_16(IDPF_TX_DESC_DTYPE_DESC_DONE)) {
+   if ((txd[desc_to_clean_to].qw1 &
+rte_cpu_to_le_64(IDPF_TXD_QW1_DTYPE_M)) !=
+   rte_cpu_to_le_64(IDPF_TX_DESC_DTYPE_DESC_DONE)) {
TX_LOG(DEBUG, "TX descriptor %4u is not done "
   "(port=%d queue=%d)", desc_to_clean_to,
   txq->port_id, txq->queue_id);
@@ -1331,10 +1330,7 @@ idpf_xmit_cleanup(struct idpf_tx_queue *txq)
nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
last_desc_cleaned);
 
-   txd[desc_to_clean_to].qw1.cmd_dtype = 0;
-   txd[desc_to_clean_to].qw1.buf_size = 0;
-   for (i = 0; i < RTE_DIM(txd[desc_to_clean_to].qw1.flex.raw); i++)
-   txd[desc_to_clean_to].qw1.flex.raw[i] = 0;
+   txd[desc_to_clean_to].qw1 = 0;
 
txq->last_desc_cleaned = desc_to_clean_to;
txq->nb_free = (uint16_t)(txq->nb_free + nb_tx_to_clean);
@@ -1347,8 +1343,8 @@ uint16_t
 idpf_dp_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
  uint16_t nb_pkts)
 {
-   volatile struct idpf_flex_tx_desc *txd;
-   volatile struct idpf_flex_tx_desc *txr;
+   volatile struct idpf_base_tx_desc *txd;
+   volatile struct idpf_base_tx_desc *txr;
union idpf_tx_offload tx_offload = {0};
struct idpf_tx_entry *txe, *txn;
struct idpf_tx_entry *sw_ring;
@@ -1356,6 +1352,7 @@ idpf_dp_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf 
**tx_pkts,
struct rte_mbuf *tx_pkt;
struct rte_mbuf *m_seg;
uint64_t buf_dma_addr;
+   uint32_t td_offset;
uint64_t ol_flags;
uint16_t tx_last;
uint16_t nb_used;
@@ -1382,6 +1379,7 @@ idpf_dp_singleq_xmit_pkts(void *tx_queue, struct rte_mbuf 
**tx_pkts,
 
for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
td_cmd = 0;
+   td_offset = 0;
 
tx_pkt = *tx_pkts++;
RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
@@ -1462,9 +1460,10 @@ idpf_dp_singleq_xmit_pkts(void *tx_queue, struct 
rte_mbuf **tx_pkts,
slen = m_seg->data_len;
buf_dma_addr = rte_mbuf_data_iova(m_seg);
txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
-   txd->qw1.buf_size = slen;
-   txd->qw1.cmd_dtype = 
rte_cpu_to_le_16(IDPF_TX_DESC_D

[PATCH v5 02/11] common/idpf/base: enable support for physical port stats

2023-09-19 Thread Simei Su
Add support to indicate physical port representor and query its statistics.

Signed-off-by: Zhenning Xiao 
Signed-off-by: Jayaprakash Shanmugam 
Signed-off-by: Simei Su 
Acked-by: Beilei Xing 
---
 .mailmap |  2 +
 drivers/common/idpf/base/virtchnl2.h | 80 +++-
 2 files changed, 81 insertions(+), 1 deletion(-)

diff --git a/.mailmap b/.mailmap
index 4dac53011b..3dfdd81797 100644
--- a/.mailmap
+++ b/.mailmap
@@ -1639,3 +1639,5 @@ Ziye Yang 
 Zoltan Kiss  
 Zorik Machulsky 
 Zyta Szpak   
+Jayaprakash Shanmugam 
+Zhenning Xiao 
diff --git a/drivers/common/idpf/base/virtchnl2.h 
b/drivers/common/idpf/base/virtchnl2.h
index 594bc26b8c..cd47444835 100644
--- a/drivers/common/idpf/base/virtchnl2.h
+++ b/drivers/common/idpf/base/virtchnl2.h
@@ -97,6 +97,7 @@
 #defineVIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE537
 #defineVIRTCHNL2_OP_ADD_QUEUE_GROUPS   538
 #defineVIRTCHNL2_OP_DEL_QUEUE_GROUPS   539
+#defineVIRTCHNL2_OP_GET_PORT_STATS 540
 
 #define VIRTCHNL2_RDMA_INVALID_QUEUE_IDX   0x
 
@@ -582,6 +583,9 @@ struct virtchnl2_queue_reg_chunks {
 
 VIRTCHNL2_CHECK_STRUCT_LEN(40, virtchnl2_queue_reg_chunks);
 
+/* VIRTCHNL2_VPORT_FLAGS */
+#define VIRTCHNL2_VPORT_UPLINK_PORTBIT(0)
+
 #define VIRTCHNL2_ETH_LENGTH_OF_ADDRESS  6
 
 /* VIRTCHNL2_OP_CREATE_VPORT
@@ -620,7 +624,8 @@ struct virtchnl2_create_vport {
__le16 max_mtu;
__le32 vport_id;
u8 default_mac_addr[VIRTCHNL2_ETH_LENGTH_OF_ADDRESS];
-   __le16 pad;
+   /* see VIRTCHNL2_VPORT_FLAGS definitions */
+   __le16 vport_flags;
/* see VIRTCHNL2_RX_DESC_IDS definitions */
__le64 rx_desc_ids;
/* see VIRTCHNL2_TX_DESC_IDS definitions */
@@ -1159,6 +1164,74 @@ struct virtchnl2_vport_stats {
 
 VIRTCHNL2_CHECK_STRUCT_LEN(128, virtchnl2_vport_stats);
 
+/* physical port statistics */
+struct virtchnl2_phy_port_stats {
+   __le64 rx_bytes;
+   __le64 rx_unicast_pkts;
+   __le64 rx_multicast_pkts;
+   __le64 rx_broadcast_pkts;
+   __le64 rx_size_64_pkts;
+   __le64 rx_size_127_pkts;
+   __le64 rx_size_255_pkts;
+   __le64 rx_size_511_pkts;
+   __le64 rx_size_1023_pkts;
+   __le64 rx_size_1518_pkts;
+   __le64 rx_size_jumbo_pkts;
+   __le64 rx_xon_events;
+   __le64 rx_xoff_events;
+   __le64 rx_undersized_pkts;
+   __le64 rx_fragmented_pkts;
+   __le64 rx_oversized_pkts;
+   __le64 rx_jabber_pkts;
+   __le64 rx_csum_errors;
+   __le64 rx_length_errors;
+   __le64 rx_dropped_pkts;
+   __le64 rx_crc_errors;
+   /* Frames with length < 64 and a bad CRC */
+   __le64 rx_runt_errors;
+   __le64 rx_illegal_bytes;
+   __le64 rx_total_pkts;
+   u8 rx_reserved[128];
+
+   __le64 tx_bytes;
+   __le64 tx_unicast_pkts;
+   __le64 tx_multicast_pkts;
+   __le64 tx_broadcast_pkts;
+   __le64 tx_errors;
+   __le64 tx_timeout_events;
+   __le64 tx_size_64_pkts;
+   __le64 tx_size_127_pkts;
+   __le64 tx_size_255_pkts;
+   __le64 tx_size_511_pkts;
+   __le64 tx_size_1023_pkts;
+   __le64 tx_size_1518_pkts;
+   __le64 tx_size_jumbo_pkts;
+   __le64 tx_xon_events;
+   __le64 tx_xoff_events;
+   __le64 tx_dropped_link_down_pkts;
+   __le64 tx_total_pkts;
+   u8 tx_reserved[128];
+   __le64 mac_local_faults;
+   __le64 mac_remote_faults;
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(600, virtchnl2_phy_port_stats);
+
+/* VIRTCHNL2_OP_GET_PORT_STATS
+ * PF/VF sends this message to CP to get the updated stats by specifying the
+ * vport_id. CP responds with stats in struct virtchnl2_port_stats that
+ * includes both physical port as well as vport statistics.
+ */
+struct virtchnl2_port_stats {
+   __le32 vport_id;
+   u8 pad[4];
+
+   struct virtchnl2_phy_port_stats phy_port_stats;
+   struct virtchnl2_vport_stats virt_port_stats;
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(736, virtchnl2_port_stats);
+
 /* VIRTCHNL2_OP_EVENT
  * CP sends this message to inform the PF/VF driver of events that may affect
  * it. No direct response is expected from the driver, though it may generate
@@ -1384,6 +1457,8 @@ static inline const char *virtchnl2_op_str(__le32 
v_opcode)
return "VIRTCHNL2_OP_ADD_QUEUE_GROUPS";
case VIRTCHNL2_OP_DEL_QUEUE_GROUPS:
return "VIRTCHNL2_OP_DEL_QUEUE_GROUPS";
+   case VIRTCHNL2_OP_GET_PORT_STATS:
+   return "VIRTCHNL2_OP_GET_PORT_STATS";
default:
return "Unsupported (update virtchnl2.h)";
}
@@ -1648,6 +1723,9 @@ virtchnl2_vc_validate_vf_msg(__rte_unused struct 
virtchnl2_version_info *ver, u3
case VIRTCHNL2_OP_GET_STATS:
valid_len = sizeof(struct virtchnl2_vport_stats);
break;
+   case VIRTCHNL2_OP_GET_PORT_STATS

[PATCH v5 03/11] common/idpf/base: add miss completion capabilities

2023-09-19 Thread Simei Su
Add miss completion tag in other capability flags to indicate support for
detecting a miss completion based on the upper bit of the completion tag.

Signed-off-by: Josh Hay 
Signed-off-by: Simei Su 
Acked-by: Beilei Xing 
---
 .mailmap | 1 +
 drivers/common/idpf/base/virtchnl2.h | 4 
 2 files changed, 5 insertions(+)

diff --git a/.mailmap b/.mailmap
index 3dfdd81797..91d8cca78f 100644
--- a/.mailmap
+++ b/.mailmap
@@ -1641,3 +1641,4 @@ Zorik Machulsky 
 Zyta Szpak   
 Jayaprakash Shanmugam 
 Zhenning Xiao 
+Josh Hay 
diff --git a/drivers/common/idpf/base/virtchnl2.h 
b/drivers/common/idpf/base/virtchnl2.h
index cd47444835..c49e4b943c 100644
--- a/drivers/common/idpf/base/virtchnl2.h
+++ b/drivers/common/idpf/base/virtchnl2.h
@@ -231,6 +231,10 @@
 #define VIRTCHNL2_CAP_RX_FLEX_DESC BIT(17)
 #define VIRTCHNL2_CAP_PTYPEBIT(18)
 #define VIRTCHNL2_CAP_LOOPBACK BIT(19)
+/* Enable miss completion types plus ability to detect a miss completion if a
+ * reserved bit is set in a standared completion's tag.
+ */
+#define VIRTCHNL2_CAP_MISS_COMPL_TAG   BIT(20)
 /* this must be the last capability */
 #define VIRTCHNL2_CAP_OEM  BIT(63)
 
-- 
2.25.1



[PATCH v5 04/11] common/idpf/base: initialize PTP support

2023-09-19 Thread Simei Su
Add a few PTP capabilities to determine which PTP features are
enabled including legacy cross time, ptm, device clock control,
PTP Tx timestamp with direct registers access and PTP Tx timestamp
with virtchnl messages. Also create opcodes and structures to
support feautres introduced by capabilities.

Signed-off-by: Milena Olech 
Signed-off-by: Simei Su 
Acked-by: Beilei Xing 
---
 drivers/common/idpf/base/virtchnl2.h | 145 +++
 1 file changed, 145 insertions(+)

diff --git a/drivers/common/idpf/base/virtchnl2.h 
b/drivers/common/idpf/base/virtchnl2.h
index c49e4b943c..320430df6f 100644
--- a/drivers/common/idpf/base/virtchnl2.h
+++ b/drivers/common/idpf/base/virtchnl2.h
@@ -98,6 +98,9 @@
 #defineVIRTCHNL2_OP_ADD_QUEUE_GROUPS   538
 #defineVIRTCHNL2_OP_DEL_QUEUE_GROUPS   539
 #defineVIRTCHNL2_OP_GET_PORT_STATS 540
+   /* TimeSync opcodes */
+#defineVIRTCHNL2_OP_GET_PTP_CAPS   541
+#defineVIRTCHNL2_OP_GET_PTP_TX_TSTAMP_LATCHES  542
 
 #define VIRTCHNL2_RDMA_INVALID_QUEUE_IDX   0x
 
@@ -1395,6 +1398,112 @@ struct virtchnl2_promisc_info {
 
 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_promisc_info);
 
+/* VIRTCHNL2_PTP_CAPS
+ * PTP capabilities
+ */
+#define VIRTCHNL2_PTP_CAP_LEGACY_CROSS_TIMEBIT(0)
+#define VIRTCHNL2_PTP_CAP_PTM  BIT(1)
+#define VIRTCHNL2_PTP_CAP_DEVICE_CLOCK_CONTROL BIT(2)
+#define VIRTCHNL2_PTP_CAP_TX_TSTAMPS_DIRECTBIT(3)
+#defineVIRTCHNL2_PTP_CAP_TX_TSTAMPS_VIRTCHNL   BIT(4)
+
+/* Legacy cross time registers offsets */
+struct virtchnl2_ptp_legacy_cross_time_reg {
+   __le32 shadow_time_0;
+   __le32 shadow_time_l;
+   __le32 shadow_time_h;
+   __le32 cmd_sync;
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_legacy_cross_time_reg);
+
+/* PTM cross time registers offsets */
+struct virtchnl2_ptp_ptm_cross_time_reg {
+   __le32 art_l;
+   __le32 art_h;
+   __le32 cmd_sync;
+   u8 pad[4];
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_ptm_cross_time_reg);
+
+/* Registers needed to control the main clock */
+struct virtchnl2_ptp_device_clock_control {
+   __le32 cmd;
+   __le32 incval_l;
+   __le32 incval_h;
+   __le32 shadj_l;
+   __le32 shadj_h;
+   u8 pad[4];
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_ptp_device_clock_control);
+
+/* Structure that defines tx tstamp entry - index and register offset */
+struct virtchnl2_ptp_tx_tstamp_entry {
+   __le32 tx_latch_register_base;
+   __le32 tx_latch_register_offset;
+   u8 index;
+   u8 pad[7];
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_tx_tstamp_entry);
+
+/* Structure that defines tx tstamp entries - total number of latches
+ * and the array of entries.
+ */
+struct virtchnl2_ptp_tx_tstamp {
+   __le16 num_latches;
+   /* latch size expressed in bits */
+   __le16 latch_size;
+   u8 pad[4];
+   struct virtchnl2_ptp_tx_tstamp_entry ptp_tx_tstamp_entries[1];
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_ptp_tx_tstamp);
+
+/* VIRTCHNL2_OP_GET_PTP_CAPS
+ * PV/VF sends this message to negotiate PTP capabilities. CP updates bitmap
+ * with supported features and fulfills appropriate structures.
+ */
+struct virtchnl2_get_ptp_caps {
+   /* PTP capability bitmap */
+   /* see VIRTCHNL2_PTP_CAPS definitions */
+   __le32 ptp_caps;
+   u8 pad[4];
+
+   struct virtchnl2_ptp_legacy_cross_time_reg legacy_cross_time_reg;
+   struct virtchnl2_ptp_ptm_cross_time_reg ptm_cross_time_reg;
+   struct virtchnl2_ptp_device_clock_control device_clock_control;
+   struct virtchnl2_ptp_tx_tstamp tx_tstamp;
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(88, virtchnl2_get_ptp_caps);
+
+/* Structure that describes tx tstamp values, index and validity */
+struct virtchnl2_ptp_tx_tstamp_latch {
+   __le32 tstamp_h;
+   __le32 tstamp_l;
+   u8 index;
+   u8 valid;
+   u8 pad[6];
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_tx_tstamp_latch);
+
+/* VIRTCHNL2_OP_GET_PTP_TX_TSTAMP_LATCHES
+ * PF/VF sends this message to receive a specified number of timestamps
+ * entries.
+ */
+struct virtchnl2_ptp_tx_tstamp_latches {
+   __le16 num_latches;
+   /* latch size expressed in bits */
+   __le16 latch_size;
+   u8 pad[4];
+   struct virtchnl2_ptp_tx_tstamp_latch tstamp_latches[1];
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_ptp_tx_tstamp_latches);
 
 static inline const char *virtchnl2_op_str(__le32 v_opcode)
 {
@@ -1463,6 +1572,10 @@ static inline const char *virtchnl2_op_str(__le32 
v_opcode)
return "VIRTCHNL2_OP_DEL_QUEUE_GROUPS";
case VIRTCHNL2_OP_GET_PORT_STATS:
return "VIRTCHNL2_OP_GET_PORT_STATS";
+   case VIRTCHNL2_OP_GET_PTP_CAPS:
+   return "VIRTCHNL2_OP_GET_PTP_CAPS";
+   case VIRTCHNL2_OP_GET_PTP_TX_TSTAMP_L

[PATCH v5 05/11] common/idpf/base: remove mailbox registers

2023-09-19 Thread Simei Su
Remove mailbox register offsets because individual drivers will define
the offsets based on how registers address the registers.

Signed-off-by: Madhu Chittim 
Signed-off-by: Simei Su 
Acked-by: Beilei Xing 
---
 .mailmap |  1 +
 drivers/common/idpf/base/siov_regs.h | 13 ++---
 2 files changed, 3 insertions(+), 11 deletions(-)

diff --git a/.mailmap b/.mailmap
index 91d8cca78f..d8782cd67e 100644
--- a/.mailmap
+++ b/.mailmap
@@ -1642,3 +1642,4 @@ Zyta Szpak   

 Jayaprakash Shanmugam 
 Zhenning Xiao 
 Josh Hay 
+Madhu Chittim 
diff --git a/drivers/common/idpf/base/siov_regs.h 
b/drivers/common/idpf/base/siov_regs.h
index fad329601a..7e1ae2e300 100644
--- a/drivers/common/idpf/base/siov_regs.h
+++ b/drivers/common/idpf/base/siov_regs.h
@@ -4,16 +4,6 @@
 #ifndef _SIOV_REGS_H_
 #define _SIOV_REGS_H_
 #define VDEV_MBX_START 0x2 /* Begin at 128KB */
-#define VDEV_MBX_ATQBAL(VDEV_MBX_START + 0x)
-#define VDEV_MBX_ATQBAH(VDEV_MBX_START + 0x0004)
-#define VDEV_MBX_ATQLEN(VDEV_MBX_START + 0x0008)
-#define VDEV_MBX_ATQH  (VDEV_MBX_START + 0x000C)
-#define VDEV_MBX_ATQT  (VDEV_MBX_START + 0x0010)
-#define VDEV_MBX_ARQBAL(VDEV_MBX_START + 0x0014)
-#define VDEV_MBX_ARQBAH(VDEV_MBX_START + 0x0018)
-#define VDEV_MBX_ARQLEN(VDEV_MBX_START + 0x001C)
-#define VDEV_MBX_ARQH  (VDEV_MBX_START + 0x0020)
-#define VDEV_MBX_ARQT  (VDEV_MBX_START + 0x0024)
 #define VDEV_GET_RSTAT 0x21000 /* 132KB for RSTAT */
 
 /* Begin at offset after 1MB (after 256 4k pages) */
@@ -43,5 +33,6 @@
 #define VDEV_INT_ITR_1(_i) (VDEV_INT_DYN_START + ((_i) * 0x1000) + 
0x08)
 #define VDEV_INT_ITR_2(_i) (VDEV_INT_DYN_START + ((_i) * 0x1000) + 
0x0C)
 
-/* Next offset to begin at 42MB (0x2A0) */
+#define SIOV_REG_BAR_SIZE   0x2A0
+/* Next offset to begin at 42MB + 4K (0x2A0 + 0x1000) */
 #endif /* _SIOV_REGS_H_ */
-- 
2.25.1



[PATCH v5 06/11] common/idpf/base: refine structure and necessary check

2023-09-19 Thread Simei Su
a) Refine queue chunk and vector chunk structures.
b) Add non_flex prefix to distinguish the flex array definitions.
c) Add some specific fields.
d) Refine condition check.

Signed-off-by: Shailendra Bhatnagar 
Signed-off-by: Julianx Grajkowski 
Signed-off-by: Simei Su 
Acked-by: Beilei Xing 
---
 .mailmap   |  2 +
 drivers/common/idpf/base/idpf_common.c |  6 ++-
 drivers/common/idpf/base/virtchnl2.h   | 72 +-
 3 files changed, 54 insertions(+), 26 deletions(-)

diff --git a/.mailmap b/.mailmap
index d8782cd67e..23aed53102 100644
--- a/.mailmap
+++ b/.mailmap
@@ -1643,3 +1643,5 @@ Jayaprakash Shanmugam 
 Zhenning Xiao 
 Josh Hay 
 Madhu Chittim 
+Shailendra Bhatnagar 
+Julianx Grajkowski 
diff --git a/drivers/common/idpf/base/idpf_common.c 
b/drivers/common/idpf/base/idpf_common.c
index fbf71416fd..9610916aa9 100644
--- a/drivers/common/idpf/base/idpf_common.c
+++ b/drivers/common/idpf/base/idpf_common.c
@@ -239,8 +239,10 @@ int idpf_clean_arq_element(struct idpf_hw *hw,
e->desc.ret_val = msg.status;
e->desc.datalen = msg.data_len;
if (msg.data_len > 0) {
-   if (!msg.ctx.indirect.payload)
-   return -EINVAL;
+   if (!msg.ctx.indirect.payload || !msg.ctx.indirect.payload->va 
||
+   !e->msg_buf) {
+   return -EFAULT;
+   }
e->buf_len = msg.data_len;
msg_data_len = msg.data_len;
idpf_memcpy(e->msg_buf, msg.ctx.indirect.payload->va, 
msg_data_len,
diff --git a/drivers/common/idpf/base/virtchnl2.h 
b/drivers/common/idpf/base/virtchnl2.h
index 320430df6f..3900b784d0 100644
--- a/drivers/common/idpf/base/virtchnl2.h
+++ b/drivers/common/idpf/base/virtchnl2.h
@@ -89,8 +89,8 @@
 * VIRTCHNL2_OP_GET_PTYPE_INFO_RAW
 */
/* opcodes 529, 530, and 531 are reserved */
-#defineVIRTCHNL2_OP_CREATE_ADI 532
-#defineVIRTCHNL2_OP_DESTROY_ADI533
+#defineVIRTCHNL2_OP_NON_FLEX_CREATE_ADI532
+#defineVIRTCHNL2_OP_NON_FLEX_DESTROY_ADI   533
 #defineVIRTCHNL2_OP_LOOPBACK   534
 #defineVIRTCHNL2_OP_ADD_MAC_ADDR   535
 #defineVIRTCHNL2_OP_DEL_MAC_ADDR   536
@@ -294,6 +294,7 @@
 /* These messages are only sent to PF from CP */
 #define VIRTCHNL2_EVENT_START_RESET_ADI2
 #define VIRTCHNL2_EVENT_FINISH_RESET_ADI   3
+#define VIRTCHNL2_EVENT_ADI_ACTIVE 4
 
 /* VIRTCHNL2_QUEUE_TYPE
  * Transmit and Receive queue types are valid in legacy as well as split queue
@@ -547,7 +548,8 @@ struct virtchnl2_get_capabilities {
u8 max_sg_bufs_per_tx_pkt;
 
u8 reserved1;
-   __le16 pad1;
+   /* upper bound of number of ADIs supported */
+   __le16 max_adis;
 
/* version of Control Plane that is running */
__le16 oem_cp_ver_major;
@@ -1059,14 +1061,34 @@ struct virtchnl2_sriov_vfs_info {
 
 VIRTCHNL2_CHECK_STRUCT_LEN(4, virtchnl2_sriov_vfs_info);
 
-/* VIRTCHNL2_OP_CREATE_ADI
+/* structure to specify single chunk of queue */
+/* 'chunks' is fixed size(not flexible) and will be deprecated at some point */
+struct virtchnl2_non_flex_queue_reg_chunks {
+   __le16 num_chunks;
+   u8 reserved[6];
+   struct virtchnl2_queue_reg_chunk chunks[1];
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(40, virtchnl2_non_flex_queue_reg_chunks);
+
+/* structure to specify single chunk of interrupt vector */
+/* 'vchunks' is fixed size(not flexible) and will be deprecated at some point 
*/
+struct virtchnl2_non_flex_vector_chunks {
+   __le16 num_vchunks;
+   u8 reserved[14];
+   struct virtchnl2_vector_chunk vchunks[1];
+};
+
+VIRTCHNL2_CHECK_STRUCT_LEN(48, virtchnl2_non_flex_vector_chunks);
+
+/* VIRTCHNL2_OP_NON_FLEX_CREATE_ADI
  * PF sends this message to CP to create ADI by filling in required
- * fields of virtchnl2_create_adi structure.
- * CP responds with the updated virtchnl2_create_adi structure containing the
- * necessary fields followed by chunks which in turn will have an array of
+ * fields of virtchnl2_non_flex_create_adi structure.
+ * CP responds with the updated virtchnl2_non_flex_create_adi structure 
containing
+ * the necessary fields followed by chunks which in turn will have an array of
  * num_chunks entries of virtchnl2_queue_chunk structures.
  */
-struct virtchnl2_create_adi {
+struct virtchnl2_non_flex_create_adi {
/* PF sends PASID to CP */
__le32 pasid;
/*
@@ -1076,29 +1098,31 @@ struct virtchnl2_create_adi {
__le16 mbx_id;
/* PF sends mailbox vector id to CP */
__le16 mbx_vec_id;
+   /* PF populates this ADI index */
+   __le16 adi_index;
/* CP populates ADI id */
__le16 adi_id;
u8 reserved[64];
-  

[PATCH v5 07/11] common/idpf/base: add union for SW cookie fields

2023-09-19 Thread Simei Su
Instead of using something like a byte offset, we can add a union to the
struct to enable direct addressing.

Signed-off-by: Alan Brady 
Signed-off-by: Simei Su 
Acked-by: Beilei Xing 
---
 .mailmap | 1 +
 drivers/common/idpf/base/idpf_controlq_api.h | 5 +
 2 files changed, 6 insertions(+)

diff --git a/.mailmap b/.mailmap
index 23aed53102..2fcadb4e4c 100644
--- a/.mailmap
+++ b/.mailmap
@@ -1645,3 +1645,4 @@ Josh Hay 
 Madhu Chittim 
 Shailendra Bhatnagar 
 Julianx Grajkowski 
+Alan Brady 
diff --git a/drivers/common/idpf/base/idpf_controlq_api.h 
b/drivers/common/idpf/base/idpf_controlq_api.h
index 3780304256..f4e7b53ac9 100644
--- a/drivers/common/idpf/base/idpf_controlq_api.h
+++ b/drivers/common/idpf/base/idpf_controlq_api.h
@@ -77,6 +77,11 @@ struct idpf_ctlq_msg {
u8 context[IDPF_INDIRECT_CTX_SIZE];
struct idpf_dma_mem *payload;
} indirect;
+   struct {
+   u32 rsvd;
+   u16 data;
+   u16 flags;
+   } sw_cookie;
} ctx;
 };
 
-- 
2.25.1



[PATCH v5 08/11] common/idpf/base: refine code and alignments

2023-09-19 Thread Simei Su
a) Refine double pointer with a local pointer.
b) Refine return type for function instead of only returning success.
c) Remove unnecessary check and comments.
d) Use tab spaces and new lines wherever necessary.

Signed-off-by: Pavan Kumar Linga 
Signed-off-by: Simei Su 
Acked-by: Beilei Xing 
---
 drivers/common/idpf/base/idpf_common.c|  4 +-
 drivers/common/idpf/base/idpf_controlq.c  | 64 +--
 drivers/common/idpf/base/idpf_controlq_api.h  | 12 +---
 .../common/idpf/base/idpf_controlq_setup.c|  5 +-
 drivers/common/idpf/base/idpf_lan_pf_regs.h   |  7 +-
 drivers/common/idpf/base/idpf_lan_txrx.h  | 46 ++---
 drivers/common/idpf/base/idpf_lan_vf_regs.h   | 25 +---
 drivers/common/idpf/base/idpf_prototype.h |  2 +-
 8 files changed, 80 insertions(+), 85 deletions(-)

diff --git a/drivers/common/idpf/base/idpf_common.c 
b/drivers/common/idpf/base/idpf_common.c
index 9610916aa9..7181a7f14c 100644
--- a/drivers/common/idpf/base/idpf_common.c
+++ b/drivers/common/idpf/base/idpf_common.c
@@ -262,12 +262,12 @@ int idpf_clean_arq_element(struct idpf_hw *hw,
  *  idpf_deinit_hw - shutdown routine
  *  @hw: pointer to the hardware structure
  */
-int idpf_deinit_hw(struct idpf_hw *hw)
+void idpf_deinit_hw(struct idpf_hw *hw)
 {
hw->asq = NULL;
hw->arq = NULL;
 
-   return idpf_ctlq_deinit(hw);
+   idpf_ctlq_deinit(hw);
 }
 
 /**
diff --git a/drivers/common/idpf/base/idpf_controlq.c 
b/drivers/common/idpf/base/idpf_controlq.c
index 6815153e1d..a82ca628de 100644
--- a/drivers/common/idpf/base/idpf_controlq.c
+++ b/drivers/common/idpf/base/idpf_controlq.c
@@ -9,11 +9,10 @@
  * @cq: pointer to the specific control queue
  * @q_create_info: structs containing info for each queue to be initialized
  */
-static void
-idpf_ctlq_setup_regs(struct idpf_ctlq_info *cq,
-struct idpf_ctlq_create_info *q_create_info)
+static void idpf_ctlq_setup_regs(struct idpf_ctlq_info *cq,
+struct idpf_ctlq_create_info *q_create_info)
 {
-   /* set head and tail registers in our local struct */
+   /* set control queue registers in our local struct */
cq->reg.head = q_create_info->reg.head;
cq->reg.tail = q_create_info->reg.tail;
cq->reg.len = q_create_info->reg.len;
@@ -75,7 +74,7 @@ static void idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)
desc->flags =
CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
desc->opcode = 0;
-   desc->datalen = (__le16)CPU_TO_LE16(bi->size);
+   desc->datalen = CPU_TO_LE16(bi->size);
desc->ret_val = 0;
desc->cookie_high = 0;
desc->cookie_low = 0;
@@ -137,6 +136,7 @@ int idpf_ctlq_add(struct idpf_hw *hw,
  struct idpf_ctlq_create_info *qinfo,
  struct idpf_ctlq_info **cq_out)
 {
+   struct idpf_ctlq_info *cq;
bool is_rxq = false;
int status = 0;
 
@@ -145,26 +145,26 @@ int idpf_ctlq_add(struct idpf_hw *hw,
qinfo->buf_size > IDPF_CTLQ_MAX_BUF_LEN)
return -EINVAL;
 
-   *cq_out = (struct idpf_ctlq_info *)
-   idpf_calloc(hw, 1, sizeof(struct idpf_ctlq_info));
-   if (!(*cq_out))
+   cq = (struct idpf_ctlq_info *)
+idpf_calloc(hw, 1, sizeof(struct idpf_ctlq_info));
+   if (!cq)
return -ENOMEM;
 
-   (*cq_out)->cq_type = qinfo->type;
-   (*cq_out)->q_id = qinfo->id;
-   (*cq_out)->buf_size = qinfo->buf_size;
-   (*cq_out)->ring_size = qinfo->len;
+   cq->cq_type = qinfo->type;
+   cq->q_id = qinfo->id;
+   cq->buf_size = qinfo->buf_size;
+   cq->ring_size = qinfo->len;
 
-   (*cq_out)->next_to_use = 0;
-   (*cq_out)->next_to_clean = 0;
-   (*cq_out)->next_to_post = (*cq_out)->ring_size - 1;
+   cq->next_to_use = 0;
+   cq->next_to_clean = 0;
+   cq->next_to_post = cq->ring_size - 1;
 
switch (qinfo->type) {
case IDPF_CTLQ_TYPE_MAILBOX_RX:
is_rxq = true;
/* fallthrough */
case IDPF_CTLQ_TYPE_MAILBOX_TX:
-   status = idpf_ctlq_alloc_ring_res(hw, *cq_out);
+   status = idpf_ctlq_alloc_ring_res(hw, cq);
break;
default:
status = -EINVAL;
@@ -175,33 +175,35 @@ int idpf_ctlq_add(struct idpf_hw *hw,
goto init_free_q;
 
if (is_rxq) {
-   idpf_ctlq_init_rxq_bufs(*cq_out);
+   idpf_ctlq_init_rxq_bufs(cq);
} else {
/* Allocate the array of msg pointers for TX queues */
-   (*cq_out)->bi.tx_msg = (struct idpf_ctlq_msg **)
+   cq->bi.tx_msg = (struct idpf_ctlq_msg **)

[PATCH v5 09/11] common/idpf/base: use GENMASK macro

2023-09-19 Thread Simei Su
Instead of using a custom defined macro for generating a mask,
use the standard GENMASK macro.

Signed-off-by: Pavan Kumar Linga 
Signed-off-by: Simei Su 
Acked-by: Beilei Xing 
---
 drivers/common/idpf/base/idpf_lan_pf_regs.h |  26 ++---
 drivers/common/idpf/base/idpf_lan_txrx.h| 116 +---
 drivers/common/idpf/base/idpf_lan_vf_regs.h |  16 +--
 drivers/common/idpf/base/idpf_osdep.h   |   7 ++
 4 files changed, 80 insertions(+), 85 deletions(-)

diff --git a/drivers/common/idpf/base/idpf_lan_pf_regs.h 
b/drivers/common/idpf/base/idpf_lan_pf_regs.h
index eab23f279a..a51e39a502 100644
--- a/drivers/common/idpf/base/idpf_lan_pf_regs.h
+++ b/drivers/common/idpf/base/idpf_lan_pf_regs.h
@@ -24,7 +24,7 @@
 #define PF_FW_ARQBAH   (PF_FW_BASE + 0x4)
 #define PF_FW_ARQLEN   (PF_FW_BASE + 0x8)
 #define PF_FW_ARQLEN_ARQLEN_S  0
-#define PF_FW_ARQLEN_ARQLEN_M  IDPF_M(0x1FFF, PF_FW_ARQLEN_ARQLEN_S)
+#define PF_FW_ARQLEN_ARQLEN_M  GENMASK(12, 0)
 #define PF_FW_ARQLEN_ARQVFE_S  28
 #define PF_FW_ARQLEN_ARQVFE_M  BIT(PF_FW_ARQLEN_ARQVFE_S)
 #define PF_FW_ARQLEN_ARQOVFL_S 29
@@ -35,14 +35,14 @@
 #define PF_FW_ARQLEN_ARQENABLE_M   BIT(PF_FW_ARQLEN_ARQENABLE_S)
 #define PF_FW_ARQH (PF_FW_BASE + 0xC)
 #define PF_FW_ARQH_ARQH_S  0
-#define PF_FW_ARQH_ARQH_M  IDPF_M(0x1FFF, PF_FW_ARQH_ARQH_S)
+#define PF_FW_ARQH_ARQH_M  GENMASK(12, 0)
 #define PF_FW_ARQT (PF_FW_BASE + 0x10)
 
 #define PF_FW_ATQBAL   (PF_FW_BASE + 0x14)
 #define PF_FW_ATQBAH   (PF_FW_BASE + 0x18)
 #define PF_FW_ATQLEN   (PF_FW_BASE + 0x1C)
 #define PF_FW_ATQLEN_ATQLEN_S  0
-#define PF_FW_ATQLEN_ATQLEN_M  IDPF_M(0x3FF, PF_FW_ATQLEN_ATQLEN_S)
+#define PF_FW_ATQLEN_ATQLEN_M  GENMASK(9, 0)
 #define PF_FW_ATQLEN_ATQVFE_S  28
 #define PF_FW_ATQLEN_ATQVFE_M  BIT(PF_FW_ATQLEN_ATQVFE_S)
 #define PF_FW_ATQLEN_ATQOVFL_S 29
@@ -53,7 +53,7 @@
 #define PF_FW_ATQLEN_ATQENABLE_M   BIT(PF_FW_ATQLEN_ATQENABLE_S)
 #define PF_FW_ATQH (PF_FW_BASE + 0x20)
 #define PF_FW_ATQH_ATQH_S  0
-#define PF_FW_ATQH_ATQH_M  IDPF_M(0x3FF, PF_FW_ATQH_ATQH_S)
+#define PF_FW_ATQH_ATQH_M  GENMASK(9, 0)
 #define PF_FW_ATQT (PF_FW_BASE + 0x24)
 
 /* Interrupts */
@@ -66,7 +66,7 @@
 #define PF_GLINT_DYN_CTL_SWINT_TRIG_S  2
 #define PF_GLINT_DYN_CTL_SWINT_TRIG_M  BIT(PF_GLINT_DYN_CTL_SWINT_TRIG_S)
 #define PF_GLINT_DYN_CTL_ITR_INDX_S3
-#define PF_GLINT_DYN_CTL_ITR_INDX_MIDPF_M(0x3, PF_GLINT_DYN_CTL_ITR_INDX_S)
+#define PF_GLINT_DYN_CTL_ITR_INDX_MGENMASK(4, 3)
 #define PF_GLINT_DYN_CTL_INTERVAL_S5
 #define PF_GLINT_DYN_CTL_INTERVAL_MBIT(PF_GLINT_DYN_CTL_INTERVAL_S)
 #define PF_GLINT_DYN_CTL_SW_ITR_INDX_ENA_S 24
@@ -87,13 +87,13 @@
(PF_GLINT_BASE + (((_ITR) + 1) * 4) + ((_INT) * 0x1000))
 #define PF_GLINT_ITR_MAX_INDEX 2
 #define PF_GLINT_ITR_INTERVAL_S0
-#define PF_GLINT_ITR_INTERVAL_MIDPF_M(0xFFF, 
PF_GLINT_ITR_INTERVAL_S)
+#define PF_GLINT_ITR_INTERVAL_MGENMASK(11, 0)
 
 /* Timesync registers */
 #define PF_TIMESYNC_BASE   0x08404000
 #define PF_GLTSYN_CMD_SYNC (PF_TIMESYNC_BASE)
 #define PF_GLTSYN_CMD_SYNC_EXEC_CMD_S  0
-#define PF_GLTSYN_CMD_SYNC_EXEC_CMD_M  IDPF_M(0x3, 
PF_GLTSYN_CMD_SYNC_EXEC_CMD_S)
+#define PF_GLTSYN_CMD_SYNC_EXEC_CMD_M  GENMASK(1, 0)
 #define PF_GLTSYN_CMD_SYNC_SHTIME_EN_S 2
 #define PF_GLTSYN_CMD_SYNC_SHTIME_EN_M BIT(PF_GLTSYN_CMD_SYNC_SHTIME_EN_S)
 #define PF_GLTSYN_SHTIME_0 (PF_TIMESYNC_BASE + 0x4)
@@ -105,23 +105,23 @@
 /* Generic registers */
 #define PF_INT_DIR_OICR_ENA0x08406000
 #define PF_INT_DIR_OICR_ENA_S  0
-#define PF_INT_DIR_OICR_ENA_M  IDPF_M(0x, PF_INT_DIR_OICR_ENA_S)
+#define PF_INT_DIR_OICR_ENA_M  GENMASK(31, 0)
 #define PF_INT_DIR_OICR0x08406004
 #define PF_INT_DIR_OICR_TSYN_EVNT  0
 #define PF_INT_DIR_OICR_PHY_TS_0   BIT(1)
 #define PF_INT_DIR_OICR_PHY_TS_1   BIT(2)
 #define PF_INT_DIR_OICR_CAUSE  0x08406008
 #define PF_INT_DIR_OICR_CAUSE_CAUSE_S  0
-#define PF_INT_DIR_OICR_CAUSE_CAUSE_M  IDPF_M(0x, 
PF_INT_DIR_OICR_CAUSE_CAUSE_S)
+#define PF_INT_DIR_OICR_CAUSE_CAUSE_M  GENMASK(31, 0)
 #define PF_INT_PBA_CLEAR   0x0840600C
 
 #define PF_FUNC_RID0x08406010
 #define PF_FUNC_RID_FUNCTION_NUMBER_S  0
-#define PF_FUNC_RID_FUNCTION_NUMBER_M  IDPF_M(0x7, 
PF_FUNC_RID_FUNCTION_NUMBER_S)
+#define PF_FUNC_RID_FUNCTION_NUMBER_M  GENMASK(2, 0)
 #define PF_FUNC_RID_DEVICE_NUMBER_S3
-#define PF_FUNC_RID_DEVICE_NUMBER_MIDPF_M(0x1F, 
PF_FUNC_RID_DEVICE_NUMBER_S)
+#define PF_FUNC_RID_DEVICE_NUMBER_MGENMASK(7, 3)
 #define PF_FUNC_RID_BUS_NUMBER_S   8
-#define

[PATCH v5 11/11] common/idpf/base: update version

2023-09-19 Thread Simei Su
Update README

Signed-off-by: Simei Su 
Acked-by: Beilei Xing 
---
 drivers/common/idpf/base/README | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/common/idpf/base/README b/drivers/common/idpf/base/README
index 693049c057..ff26f736ec 100644
--- a/drivers/common/idpf/base/README
+++ b/drivers/common/idpf/base/README
@@ -6,7 +6,7 @@ Intel® IDPF driver
 ==
 
 This directory contains source code of BSD-3-Clause idpf driver of version
-2023.02.23 released by the team which develops basic drivers for Intel IPU.
+2023.07.25 released by the team which develops basic drivers for Intel IPU.
 The directory of base/ contains the original source package.
 This driver is valid for the product(s) listed below
 
-- 
2.25.1



[PATCH v5 10/11] common/idpf/base: remove unused Tx descriptor types

2023-09-19 Thread Simei Su
Remove the unused TX descriptor types and mark them as reserved.

Signed-off-by: Pavan Kumar Linga 
Signed-off-by: Simei Su 
Acked-by: Beilei Xing 
---
 drivers/common/idpf/base/idpf_lan_txrx.h | 136 ++-
 1 file changed, 12 insertions(+), 124 deletions(-)

diff --git a/drivers/common/idpf/base/idpf_lan_txrx.h 
b/drivers/common/idpf/base/idpf_lan_txrx.h
index c39930654a..c9eaeb5d3f 100644
--- a/drivers/common/idpf/base/idpf_lan_txrx.h
+++ b/drivers/common/idpf/base/idpf_lan_txrx.h
@@ -119,19 +119,19 @@ enum idpf_rss_hash {
 enum idpf_tx_desc_dtype_value {
IDPF_TX_DESC_DTYPE_DATA = 0,
IDPF_TX_DESC_DTYPE_CTX  = 1,
-   IDPF_TX_DESC_DTYPE_REINJECT_CTX = 2,
-   IDPF_TX_DESC_DTYPE_FLEX_DATA= 3,
-   IDPF_TX_DESC_DTYPE_FLEX_CTX = 4,
+   /* DTYPE 2 is reserved
+* DTYPE 3 is free for future use
+* DTYPE 4 is reserved
+*/
IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX = 5,
-   IDPF_TX_DESC_DTYPE_FLEX_TSYN_L2TAG1 = 6,
+   /* DTYPE 6 is reserved */
IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2   = 7,
-   IDPF_TX_DESC_DTYPE_FLEX_TSO_L2TAG2_PARSTAG_CTX  = 8,
-   IDPF_TX_DESC_DTYPE_FLEX_HOSTSPLIT_SA_TSO_CTX= 9,
-   IDPF_TX_DESC_DTYPE_FLEX_HOSTSPLIT_SA_CTX= 10,
-   IDPF_TX_DESC_DTYPE_FLEX_L2TAG2_CTX  = 11,
+   /* DTYPE 8, 9 are free for future use
+* DTYPE 10 is reserved
+* DTYPE 11 is free for future use
+*/
IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE   = 12,
-   IDPF_TX_DESC_DTYPE_FLEX_HOSTSPLIT_TSO_CTX   = 13,
-   IDPF_TX_DESC_DTYPE_FLEX_HOSTSPLIT_CTX   = 14,
+   /* DTYPE 13, 14 are free for future use */
/* DESC_DONE - HW has completed write-back of descriptor */
IDPF_TX_DESC_DTYPE_DESC_DONE= 15,
 };
@@ -225,28 +225,18 @@ enum idpf_tx_flex_desc_cmd_bits {
 struct idpf_flex_tx_desc {
__le64 buf_addr;/* Packet buffer address */
struct {
-   __le16 cmd_dtype;
 #define IDPF_FLEX_TXD_QW1_DTYPE_S  0
 #define IDPF_FLEX_TXD_QW1_DTYPE_M  GENMASK(4, 0)
 #define IDPF_FLEX_TXD_QW1_CMD_S5
 #define IDPF_FLEX_TXD_QW1_CMD_MGENMASK(15, 5)
+   __le16 cmd_dtype;
union {
-   /* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_DATA_(0x03) */
-   u8 raw[4];
-
-   /* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_TSYN_L2TAG1 (0x06) */
-   struct {
-   __le16 l2tag1;
-   u8 flex;
-   u8 tsync;
-   } tsync;
-
/* DTYPE=IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2 (0x07) */
struct {
__le16 l2tag1;
__le16 l2tag2;
} l2tags;
-   } flex;
+   };
__le16 buf_size;
} qw1;
 };
@@ -296,16 +286,6 @@ struct idpf_flex_tx_tso_ctx_qw {
 };
 
 union idpf_flex_tx_ctx_desc {
-   /* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_CTX (0x04) */
-   struct {
-   u8 qw0_flex[8];
-   struct {
-   __le16 cmd_dtype;
-   __le16 l2tag1;
-   u8 qw1_flex[4];
-   } qw1;
-   } gen;
-
/* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX (0x05) */
struct {
struct idpf_flex_tx_tso_ctx_qw qw0;
@@ -314,98 +294,6 @@ union idpf_flex_tx_ctx_desc {
u8 flex[6];
} qw1;
} tso;
-
-   /* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_TSO_L2TAG2_PARSTAG_CTX (0x08) */
-   struct {
-   struct idpf_flex_tx_tso_ctx_qw qw0;
-   struct {
-   __le16 cmd_dtype;
-   __le16 l2tag2;
-   u8 flex0;
-   u8 ptag;
-   u8 flex1[2];
-   } qw1;
-   } tso_l2tag2_ptag;
-
-   /* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_L2TAG2_CTX (0x0B) */
-   struct {
-   u8 qw0_flex[8];
-   struct {
-   __le16 cmd_dtype;
-   __le16 l2tag2;
-   u8 flex[4];
-   } qw1;
-   } l2tag2;
-
-   /* DTYPE = IDPF_TX_DESC_DTYPE_REINJECT_CTX (0x02) */
-   struct {
-   struct {
-   __le32 sa_domain;
-#define IDPF_TXD_FLEX_CTX_SA_DOM_M 0x
-#define IDPF_TXD_FLEX_CTX_SA_DOM_VAL   0x1
-   __le32 sa_idx;
-#define IDPF_TXD_FLEX_CTX_SAIDX_M  0x1F
-   } qw0;
-   struct {
-   __le16 cmd_dtype;
-   __le16 txr2comp;
-#define IDPF_TXD_FLEX_CTX_TXR2COMP

  1   2   3   4   5   >