Re: [PATCH] net/nfp: fix MTU configuration order
Hi Stephen, Thanks for your feedback. On 2023-03-07 18:44:09 -0800, Stephen Hemminger wrote: > On Wed, 8 Mar 2023 10:33:18 +0800 > Chaoyong He wrote: > > > diff --git a/drivers/net/nfp/nfp_common.c b/drivers/net/nfp/nfp_common.c > > index 5922bfea8e..5d92b476e2 100644 > > --- a/drivers/net/nfp/nfp_common.c > > +++ b/drivers/net/nfp/nfp_common.c > > @@ -1126,9 +1126,9 @@ nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t > > mtu) > > return -EBUSY; > > } > > > > - /* MTU larger then current mbufsize not supported */ > > + /* MTU larger than current mbufsize not supported */ > > if (mtu > hw->flbufsz) { > > - PMD_DRV_LOG(ERR, "MTU (%u) larger then current mbufsize (%u) > > not supported", > > + PMD_DRV_LOG(ERR, "MTU (%u) larger than current mbufsize (%u) > > not supported", > > mtu, hw->flbufsz); > > return -ERANGE; > > } > > Patch looks good but this looks like unrelated whitespace change. It's a tad unrelated, but not a whitespace change. It fixes a spelling mistake related to setting the MTU, s/then/than/g -- Kind Regards, Niklas Söderlund
[PATCH v4] app/testpmd: fix secondary process not forwarding
Under multi-process scenario, the secondary process gets queue state from the wrong location (the global variable 'ports'). Therefore, the secondary process can not forward since "stream_init" is not called. This commit fixes the issue by calling 'rte_eth_rx/tx_queue_info_get' to get queue state from shared memory. Fixes: 3c4426db54fc ("app/testpmd: do not poll stopped queues") Cc: sta...@dpdk.org Signed-off-by: Shiyang He v2: Add function return value processing v3: Add return value description v4: Update queue state in 'start_port()' --- app/test-pmd/testpmd.c | 72 +- 1 file changed, 71 insertions(+), 1 deletion(-) diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index 0c14325b8d..aa2a7b68ca 100644 --- a/app/test-pmd/testpmd.c +++ b/app/test-pmd/testpmd.c @@ -2379,6 +2379,70 @@ launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore) } } +static void +update_rx_queue_state(uint16_t port_id, uint16_t queue_id) +{ + struct rte_eth_rxq_info rx_qinfo; + int32_t rc; + + rc = rte_eth_rx_queue_info_get(port_id, + queue_id, &rx_qinfo); + if (rc == 0) { + ports[port_id].rxq[queue_id].state = + rx_qinfo.queue_state; + } else if (rc == -ENOTSUP) { + /* +* Set the rxq state to RTE_ETH_QUEUE_STATE_STARTED +* to ensure that the PMDs do not implement +* rte_eth_rx_queue_info_get can forward. +*/ + ports[port_id].rxq[queue_id].state = + RTE_ETH_QUEUE_STATE_STARTED; + } else { + TESTPMD_LOG(WARNING, + "Failed to get rx queue info\n"); + } +} + +static void +update_tx_queue_state(uint16_t port_id, uint16_t queue_id) +{ + struct rte_eth_txq_info tx_qinfo; + int32_t rc; + + rc = rte_eth_tx_queue_info_get(port_id, + queue_id, &tx_qinfo); + if (rc == 0) { + ports[port_id].txq[queue_id].state = + tx_qinfo.queue_state; + } else if (rc == -ENOTSUP) { + /* +* Set the txq state to RTE_ETH_QUEUE_STATE_STARTED +* to ensure that the PMDs do not implement +* rte_eth_tx_queue_info_get can forward. +*/ + ports[port_id].txq[queue_id].state = + RTE_ETH_QUEUE_STATE_STARTED; + } else { + TESTPMD_LOG(WARNING, + "Failed to get tx queue info\n"); + } +} + +static void +update_queue_state(void) +{ + portid_t pi; + queueid_t qi; + + RTE_ETH_FOREACH_DEV(pi) { + for (qi = 0; qi < nb_rxq; qi++) + update_rx_queue_state(pi, qi); + for (qi = 0; qi < nb_txq; qi++) + update_tx_queue_state(pi, qi); + } +} + /* * Launch packet forwarding configuration. */ @@ -2418,9 +2482,12 @@ start_packet_forwarding(int with_tx_first) if (!pkt_fwd_shared_rxq_check()) return; - if (stream_init != NULL) + if (stream_init != NULL) { + if (rte_eal_process_type() == RTE_PROC_SECONDARY) + update_queue_state(); for (i = 0; i < cur_fwd_config.nb_fwd_streams; i++) stream_init(fwd_streams[i]); + } port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin; if (port_fwd_begin != NULL) { @@ -3180,6 +3247,9 @@ start_port(portid_t pid) pl[cfg_pi++] = pi; } + if (rte_eal_process_type() == RTE_PROC_SECONDARY) + update_queue_state(); + if (at_least_one_port_successfully_started && !no_link_check) check_all_ports_link_status(RTE_PORT_ALL); else if (at_least_one_port_exist & all_ports_already_started) -- 2.37.2
Re: [PATCH v2 1/1] drivers: remove implementation of Rx metadata negotiation
On Tue, Mar 7, 2023 at 4:16 PM Hanumanth Pothula wrote: > > Presently, Rx metadata is sent to PMD by default, leading > to a performance drop as processing for the same in Rx path > takes extra cycles. > > Hence, removing driver implementation of Rx metadata negotiation > and falling back to old implementation where mark actions are > tracked as part of the flow rule. > > Signed-off-by: Hanumanth Pothula Updated the git commit as follows and applied to dpdk-next-net-mrvl/for-next-net. Thanks net/cnxk: remove Rx metadata negotiation > --- > v2: Remove explicit initializations. > --- > drivers/common/cnxk/roc_npc.c | 19 +++ > drivers/common/cnxk/roc_npc.h | 3 +++ > drivers/common/cnxk/roc_npc_priv.h | 1 + > drivers/common/cnxk/version.map| 2 ++ > drivers/net/cnxk/cn10k_ethdev.c| 26 -- > drivers/net/cnxk/cn10k_flow.c | 19 +++ > drivers/net/cnxk/cn9k_ethdev.c | 25 - > drivers/net/cnxk/cn9k_flow.c | 20 > drivers/net/cnxk/cnxk_ethdev.h | 1 - > 9 files changed, 64 insertions(+), 52 deletions(-) > > diff --git a/drivers/common/cnxk/roc_npc.c b/drivers/common/cnxk/roc_npc.c > index a795114326..47536c8ce8 100644 > --- a/drivers/common/cnxk/roc_npc.c > +++ b/drivers/common/cnxk/roc_npc.c > @@ -5,6 +5,23 @@ > #include "roc_api.h" > #include "roc_priv.h" > > +int > +roc_npc_mark_actions_get(struct roc_npc *roc_npc) > +{ > + struct npc *npc = roc_npc_to_npc_priv(roc_npc); > + > + return npc->mark_actions; > +} > + > +int > +roc_npc_mark_actions_sub_return(struct roc_npc *roc_npc, uint32_t count) > +{ > + struct npc *npc = roc_npc_to_npc_priv(roc_npc); > + > + npc->mark_actions -= count; > + return npc->mark_actions; > +} > + > int > roc_npc_vtag_actions_get(struct roc_npc *roc_npc) > { > @@ -488,12 +505,14 @@ npc_parse_actions(struct roc_npc *roc_npc, const struct > roc_npc_attr *attr, > } > mark = act_mark->id + 1; > req_act |= ROC_NPC_ACTION_TYPE_MARK; > + npc->mark_actions += 1; > flow->match_id = mark; > break; > > case ROC_NPC_ACTION_TYPE_FLAG: > mark = NPC_FLOW_FLAG_VAL; > req_act |= ROC_NPC_ACTION_TYPE_FLAG; > + npc->mark_actions += 1; > break; > > case ROC_NPC_ACTION_TYPE_COUNT: > diff --git a/drivers/common/cnxk/roc_npc.h b/drivers/common/cnxk/roc_npc.h > index 5e07e26a91..61d0628f5f 100644 > --- a/drivers/common/cnxk/roc_npc.h > +++ b/drivers/common/cnxk/roc_npc.h > @@ -397,6 +397,9 @@ int __roc_api roc_npc_mcam_free_all_resources(struct > roc_npc *roc_npc); > void __roc_api roc_npc_flow_dump(FILE *file, struct roc_npc *roc_npc); > void __roc_api roc_npc_flow_mcam_dump(FILE *file, struct roc_npc *roc_npc, > struct roc_npc_flow *mcam); > +int __roc_api roc_npc_mark_actions_get(struct roc_npc *roc_npc); > +int __roc_api roc_npc_mark_actions_sub_return(struct roc_npc *roc_npc, > + uint32_t count); > int __roc_api roc_npc_vtag_actions_get(struct roc_npc *roc_npc); > int __roc_api roc_npc_vtag_actions_sub_return(struct roc_npc *roc_npc, > uint32_t count); > diff --git a/drivers/common/cnxk/roc_npc_priv.h > b/drivers/common/cnxk/roc_npc_priv.h > index 08d763eeb4..714dcb09c9 100644 > --- a/drivers/common/cnxk/roc_npc_priv.h > +++ b/drivers/common/cnxk/roc_npc_priv.h > @@ -393,6 +393,7 @@ struct npc { > uint16_t flow_prealloc_size;/* Pre allocated mcam size */ > uint16_t flow_max_priority; /* Max priority for flow */ > uint16_t switch_header_type; /* Supported switch header type */ > + uint32_t mark_actions; > uint32_t vtag_strip_actions; /* vtag insert/strip actions */ > uint16_t pf_func;/* pf_func of device */ > npc_dxcfg_t prx_dxcfg; /* intf, lid, lt, extract */ > diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map > index 5d2b75fb5a..3eff3870d1 100644 > --- a/drivers/common/cnxk/version.map > +++ b/drivers/common/cnxk/version.map > @@ -344,6 +344,8 @@ INTERNAL { > roc_npc_flow_parse; > roc_npc_get_low_priority_mcam; > roc_npc_init; > + roc_npc_mark_actions_get; > + roc_npc_mark_actions_sub_return; > roc_npc_vtag_actions_get; > roc_npc_vtag_actions_sub_return; > roc_npc_mcam_alloc_entries; > diff --git a/drivers/net/cnxk/cn10k_ethdev.c b/drivers/net/cnxk/cn10k_ethdev.c > index b84fed6d90..512b9c2597 100644 > --- a/drivers/net/cnxk/cn10k_ethdev.c > +++ b/drivers/net/cnxk/cn10k_ethdev.c > @@ -39,9 +39,6 @@ nix_rx_offload_flags(struct rte_eth_dev *eth_de
Re: [PATCH v3] app/testpmd: fix secondary process not forwarding
On 3/8/2023 2:54 AM, lihuisong (C) wrote: > > 在 2023/3/8 10:05, He, ShiyangX 写道: >> >>> -Original Message- >>> From: Ferruh Yigit >>> Sent: Tuesday, March 7, 2023 7:41 PM >>> To: He, ShiyangX ; dev@dpdk.org >>> Cc: Zhou, YidingX ; sta...@dpdk.org; Zhang, >>> Yuying >>> ; Singh, Aman Deep >>> ; Burakov, Anatoly >>> ; Matan Azrad ; Dmitry >>> Kozlyuk >>> Subject: Re: [PATCH v3] app/testpmd: fix secondary process not >>> forwarding >>> >>> On 3/7/2023 3:25 AM, He, ShiyangX wrote: > -Original Message- > From: Ferruh Yigit > Sent: Monday, March 6, 2023 11:06 PM > To: He, ShiyangX ; dev@dpdk.org > Cc: Zhou, YidingX ; sta...@dpdk.org; Zhang, > Yuying ; Singh, Aman Deep > ; Burakov, Anatoly > ; Matan Azrad ; Dmitry > Kozlyuk > Subject: Re: [PATCH v3] app/testpmd: fix secondary process not > forwarding > > On 2/23/2023 2:41 PM, Shiyang He wrote: >> Under multi-process scenario, the secondary process gets queue state >> from the wrong location (the global variable 'ports'). Therefore, >> the secondary process can not forward since "stream_init" is not >> called. >> >> This commit fixes the issue by calling 'rte_eth_rx/tx_queue_info_get' >> to get queue state from shared memory. >> >> Fixes: 3c4426db54fc ("app/testpmd: do not poll stopped queues") >> Cc: sta...@dpdk.org >> >> Signed-off-by: Shiyang He >> Acked-by: Yuying Zhang >> >> v3: Add return value description >> --- >> app/test-pmd/testpmd.c | 45 >> -- >> 1 file changed, 43 insertions(+), 2 deletions(-) >> >> diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index >> 0c14325b8d..a050472aea 100644 >> --- a/app/test-pmd/testpmd.c >> +++ b/app/test-pmd/testpmd.c >> @@ -2418,9 +2418,50 @@ start_packet_forwarding(int with_tx_first) >> if (!pkt_fwd_shared_rxq_check()) >> return; >> >> - if (stream_init != NULL) >> - for (i = 0; i < cur_fwd_config.nb_fwd_streams; i++) >> + if (stream_init != NULL) { >> + for (i = 0; i < cur_fwd_config.nb_fwd_streams; i++) { >> + if (rte_eal_process_type() == RTE_PROC_SECONDARY) > { >> + struct fwd_stream *fs = fwd_streams[i]; >> + struct rte_eth_rxq_info rx_qinfo; >> + struct rte_eth_txq_info tx_qinfo; >> + int32_t rc; >> + rc = rte_eth_rx_queue_info_get(fs->rx_port, >> + fs->rx_queue, &rx_qinfo); >> + if (rc == 0) { >> + ports[fs->rx_port].rxq[fs- >> rx_queue].state = >> + rx_qinfo.queue_state; >> + } else if (rc == -ENOTSUP) { >> + /* Set the rxq state to > RTE_ETH_QUEUE_STATE_STARTED >> + * to ensure that the PMDs do not > implement >> + * rte_eth_rx_queue_info_get can > forward. >> + */ >> + ports[fs->rx_port].rxq[fs- >> rx_queue].state = >> + > RTE_ETH_QUEUE_STATE_STARTED; >> + } else { >> + TESTPMD_LOG(WARNING, >> + "Failed to get rx queue > info\n"); >> + } >> + >> + rc = rte_eth_tx_queue_info_get(fs->tx_port, >> + fs->tx_queue, &tx_qinfo); >> + if (rc == 0) { >> + ports[fs->tx_port].txq[fs- >> tx_queue].state = >> + tx_qinfo.queue_state; >> + } else if (rc == -ENOTSUP) { >> + /* Set the txq state to > RTE_ETH_QUEUE_STATE_STARTED >> + * to ensure that the PMDs do not > implement >> + * rte_eth_tx_queue_info_get can > forward. >> + */ >> + ports[fs->tx_port].txq[fs- >> tx_queue].state = >> + > RTE_ETH_QUEUE_STATE_STARTED; >> + } else { >> + TESTPMD_LOG(WARNING, >> + "Failed to get tx queue > info\n"); >> + } >> + } >> stream_init(fwd_streams[i]); >> + } >> + } >> > > Testpmd duplicates some dpdk/ethdev state/config in application > level, and this can bite in multiple cases, as it is happening here. > > I am not sure if this was a design decision, but I think instead of > testpmd storing ethdev related state/config in application level, it > should store only application level state/config, and when ethdev > related state/config is required app should get it directly from > ethdev. > > It may be too late
Re: [PATCH v4] app/testpmd: fix secondary process not forwarding
On 3/8/2023 4:19 PM, Shiyang He wrote: > Under multi-process scenario, the secondary process gets queue state > from the wrong location (the global variable 'ports'). Therefore, the > secondary process can not forward since "stream_init" is not called. > > This commit fixes the issue by calling 'rte_eth_rx/tx_queue_info_get' > to get queue state from shared memory. > > Fixes: 3c4426db54fc ("app/testpmd: do not poll stopped queues") > Cc: sta...@dpdk.org > > Signed-off-by: Shiyang He Reviewed-by: Ferruh Yigit Applied to dpdk-next-net/main, thanks.
RE: [PATCH v3 1/3] common/mlx5: get Windows dependency from standard variables
> Subject: [PATCH v3 1/3] common/mlx5: get Windows dependency from > standard variables > > External email: Use caution opening links or attachments > > > The DevX library path had to be provided through the variables > DEVX_INC_PATH and DEVX_LIB_PATH. > It was non-standard and triggers some issues with recent Meson. > > Using CFLAGS/LDFLAGS is standard and simpler. > It is also possible to use the Meson options -Dc_args and -Dc_link_args. > There are 2 options to provide: > -I > -L > > Signed-off-by: Thomas Monjalon > --- > doc/guides/platform/mlx5.rst| 11 +- > drivers/common/mlx5/windows/meson.build | 28 > - > 2 files changed, 20 insertions(+), 19 deletions(-) > > diff --git a/doc/guides/platform/mlx5.rst b/doc/guides/platform/mlx5.rst > index 2d6fbe7e44..5fc5d0cb8c 100644 > --- a/doc/guides/platform/mlx5.rst > +++ b/doc/guides/platform/mlx5.rst > @@ -259,13 +259,14 @@ configured by the ``ibverbs_link`` build option: > Compilation on Windows > ~~ > > -The DevX SDK location must be set through two environment variables: > +The DevX SDK location must be set through CFLAGS/LDFLAGS, > +either:: > > -``DEVX_LIB_PATH`` > - path to the DevX lib file. > + meson.exe setup "-Dc_args=-I\"%DEVX_INC_PATH%\"" "-Dc_link_args=- > L\"%DEVX_LIB_PATH%\"" ... > > -``DEVX_INC_PATH`` > - path to the DevX header files. > +or:: > + > + set CFLAGS=-I"%DEVX_INC_PATH%" && set LDFLAGS=- > L"%DEVX_LIB_PATH%" && meson.exe setup ... > > > .. _mlx5_common_env: > diff --git a/drivers/common/mlx5/windows/meson.build > b/drivers/common/mlx5/windows/meson.build > index cc486014a8..f60daed840 100644 > --- a/drivers/common/mlx5/windows/meson.build > +++ b/drivers/common/mlx5/windows/meson.build > @@ -1,6 +1,20 @@ > # SPDX-License-Identifier: BSD-3-Clause # Copyright 2019 Mellanox > Technologies, Ltd > > +if not cc.has_header('mlx5devx.h') > +build = false > +reason = 'missing dependency, "mlx5devx.h"' > +subdir_done() > +endif > + > +devxlib = cc.find_library('mlx5devx', required: true) if not > +devxlib.found() > +build = false > +reason = 'missing dependency, "mlx5devx"' > +subdir_done() > +endif > +ext_deps += devxlib > + > includes += include_directories('.') > > sources += files( > @@ -8,20 +22,6 @@ sources += files( > 'mlx5_common_os.c', > ) > > -res_lib = run_command(python3, '-c', 'import os; > print(os.environ["DEVX_LIB_PATH"])', check: false) -res_inc = > run_command(python3, '-c', 'import os; > print(os.environ["DEVX_INC_PATH"])', check: false) > - > -if (res_lib.returncode() != 0 or res_inc.returncode() != 0) > -build = false > -reason = 'DevX environment variables are not set, DEVX_LIB_PATH and > DEVX_INC_PATH vars must be exported' > -subdir_done() > -endif > - > -devx_lib_dir = res_lib.stdout().strip() -devx_inc_dir = > res_inc.stdout().strip() > - > -ext_deps += cc.find_library('mlx5devx', dirs: devx_lib_dir, required: true) - > includes += include_directories(devx_inc_dir) cflags_options = [ > '-std=c11', > '-Wno-strict-prototypes', > -- > 2.39.1 Acked-by: Tal Shnaiderman
RE: [PATCH v3 2/3] net/mlx5: remove weak stub functions
> Subject: Re: [PATCH v3 2/3] net/mlx5: remove weak stub functions > > External email: Use caution opening links or attachments > > > On Thu, Mar 02, 2023 at 02:21:49PM +0100, Thomas Monjalon wrote: > > The vector Rx functions are conditionally compiled. > > Some stub functions were also always compiled with weak attribute. > > If there is no vector support, the weak functions were linked. > > > > These weak functions are moved in a specific file which is compiled > > only if there is no vector support. > > This way it is simpler to understand, > > and the weak attributes can be removed. > > > > This change helps to compile with MinGW GCC which has no support for > > weak functions. > > > > Signed-off-by: Thomas Monjalon > > --- > > Acked-by: Tyler Retzlaff Acked-by: Tal Shnaiderman
RE: [PATCH v3 3/3] net/mlx5: fix Windows build with MinGW GCC 12
> Subject: Re: [PATCH v3 3/3] net/mlx5: fix Windows build with MinGW GCC 12 > > External email: Use caution opening links or attachments > > > On Thu, Mar 02, 2023 at 02:21:50PM +0100, Thomas Monjalon wrote: > > With recent changes in Meson and MinGW toolchain, the driver mlx5 was > > not able to compile on Linux for Windows. > > > > There were errors due to system detection, non-typed constants, > > constant going over int range forbidden in pedantic mode, and > > minimum-comparison of different types. > > > > Cc: sta...@dpdk.org > > > > Signed-off-by: Thomas Monjalon > > --- > > lgtm > > Acked-by: Tyler Retzlaff Acked-by: Tal Shnaiderman
Re: [PATCH 2/2] net/mana: enable driver by default
20/01/2023 03:19, lon...@linuxonhyperv.com: > From: Long Li > > The dependencies of mana have been released in rdma-core v44 and Linux > kernel 6.2. > > Signed-off-by: Long Li I'm squashing this patch with the previous one, including the backport request. I consider it is allowed to enable a driver in backports. Stable maintainers will decide.
[PATCH 0/2] crypto/qat: added cipher-crc offload feature
This patchset adds support to the QAT PMD for combined cipher-crc processing on the QAT device. The current QAT PMD implementation of cipher-crc calculates CRC in software and uses QAT for encryption/decryption offload. Note: The code-path is still retained for QAT versions without support for combined Cipher-CRC offload. - Support has been added to DPDK QAT PMD to enable the use of the cipher-crc offload feature on gen2/gen3/gen4 QAT devices. - A cipher-crc offload capability check has been added to the queue pair setup function to determine if the feature is supported on the QAT device. Kevin O'Sullivan (2): crypto/qat: added cipher-crc offload support crypto/qat: added cipher-crc cap check drivers/common/qat/qat_adf/icp_qat_fw.h | 1 - drivers/common/qat/qat_adf/icp_qat_fw_la.h | 3 +- drivers/common/qat/qat_adf/icp_qat_hw.h | 133 + drivers/common/qat/qat_device.c | 12 +- drivers/common/qat/qat_device.h | 3 +- drivers/common/qat/qat_qp.c | 157 +++ drivers/common/qat/qat_qp.h | 5 + drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c | 2 +- drivers/crypto/qat/dev/qat_crypto_pmd_gens.h | 24 ++- drivers/crypto/qat/dev/qat_sym_pmd_gen1.c| 4 + drivers/crypto/qat/qat_crypto.c | 22 ++- drivers/crypto/qat/qat_crypto.h | 1 + drivers/crypto/qat/qat_sym.c | 4 + drivers/crypto/qat/qat_sym.h | 7 +- drivers/crypto/qat/qat_sym_session.c | 194 +++ drivers/crypto/qat/qat_sym_session.h | 21 +- 16 files changed, 576 insertions(+), 17 deletions(-) -- 2.34.1 -- Intel Research and Development Ireland Limited Registered in Ireland Registered Office: Collinstown Industrial Park, Leixlip, County Kildare Registered Number: 308263 This e-mail and any attachments may contain confidential material for the sole use of the intended recipient(s). Any review or distribution by others is strictly prohibited. If you are not the intended recipient, please contact the sender and delete all copies.
[PATCH 1/2] crypto/qat: added cipher-crc offload support
Functionality has been added to the QAT PMD to use the combined cipher-crc offload feature on the gen1/gen2/gen3 QAT devices by setting the CRC content descriptor accordingly. Signed-off-by: Kevin O'Sullivan Signed-off-by: David Coyle --- drivers/common/qat/qat_adf/icp_qat_fw.h| 1 - drivers/common/qat/qat_adf/icp_qat_fw_la.h | 3 +- drivers/common/qat/qat_adf/icp_qat_hw.h| 133 + 3 files changed, 135 insertions(+), 2 deletions(-) diff --git a/drivers/common/qat/qat_adf/icp_qat_fw.h b/drivers/common/qat/qat_adf/icp_qat_fw.h index be10fc9bde..3aa17ae041 100644 --- a/drivers/common/qat/qat_adf/icp_qat_fw.h +++ b/drivers/common/qat/qat_adf/icp_qat_fw.h @@ -4,7 +4,6 @@ #ifndef _ICP_QAT_FW_H_ #define _ICP_QAT_FW_H_ #include -#include "icp_qat_hw.h" #define QAT_FIELD_SET(flags, val, bitpos, mask) \ { (flags) = (((flags) & (~((mask) << (bitpos | \ diff --git a/drivers/common/qat/qat_adf/icp_qat_fw_la.h b/drivers/common/qat/qat_adf/icp_qat_fw_la.h index c4901eb869..227a6cebc8 100644 --- a/drivers/common/qat/qat_adf/icp_qat_fw_la.h +++ b/drivers/common/qat/qat_adf/icp_qat_fw_la.h @@ -18,7 +18,8 @@ enum icp_qat_fw_la_cmd_id { ICP_QAT_FW_LA_CMD_MGF1 = 9, ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP = 10, ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP = 11, - ICP_QAT_FW_LA_CMD_DELIMITER = 12 + ICP_QAT_FW_LA_CMD_CIPHER_CRC = 17, + ICP_QAT_FW_LA_CMD_DELIMITER = 18 }; #define ICP_QAT_FW_LA_ICV_VER_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK diff --git a/drivers/common/qat/qat_adf/icp_qat_hw.h b/drivers/common/qat/qat_adf/icp_qat_hw.h index 866147cd77..8b864e1630 100644 --- a/drivers/common/qat/qat_adf/icp_qat_hw.h +++ b/drivers/common/qat/qat_adf/icp_qat_hw.h @@ -4,6 +4,8 @@ #ifndef _ICP_QAT_HW_H_ #define _ICP_QAT_HW_H_ +#include "icp_qat_fw.h" + #define ADF_C4XXXIOV_VFLEGFUSES_OFFSET 0x4C #define ADF1_C4XXXIOV_VFLEGFUSES_LEN 4 @@ -260,14 +262,19 @@ enum icp_qat_hw_cipher_convert { }; #define QAT_CIPHER_MODE_BITPOS 4 +#define QAT_CIPHER_MODE_LE_BITPOS 28 #define QAT_CIPHER_MODE_MASK 0xF #define QAT_CIPHER_ALGO_BITPOS 0 +#define QAT_CIPHER_ALGO_LE_BITPOS 24 #define QAT_CIPHER_ALGO_MASK 0xF #define QAT_CIPHER_CONVERT_BITPOS 9 +#define QAT_CIPHER_CONVERT_LE_BITPOS 17 #define QAT_CIPHER_CONVERT_MASK 0x1 #define QAT_CIPHER_DIR_BITPOS 8 +#define QAT_CIPHER_DIR_LE_BITPOS 16 #define QAT_CIPHER_DIR_MASK 0x1 #define QAT_CIPHER_AEAD_HASH_CMP_LEN_BITPOS 10 +#define QAT_CIPHER_AEAD_HASH_CMP_LEN_LE_BITPOS 18 #define QAT_CIPHER_AEAD_HASH_CMP_LEN_MASK 0x1F #define QAT_CIPHER_MODE_F8_KEY_SZ_MULT 2 #define QAT_CIPHER_MODE_XTS_KEY_SZ_MULT 2 @@ -281,7 +288,9 @@ enum icp_qat_hw_cipher_convert { #define QAT_CIPHER_AEAD_AAD_UPPER_SHIFT 8 #define QAT_CIPHER_AEAD_AAD_SIZE_LOWER_MASK 0xFF #define QAT_CIPHER_AEAD_AAD_SIZE_UPPER_MASK 0x3F +#define QAT_CIPHER_AEAD_AAD_SIZE_MASK 0x3FFF #define QAT_CIPHER_AEAD_AAD_SIZE_BITPOS 16 +#define QAT_CIPHER_AEAD_AAD_SIZE_LE_BITPOS 0 #define ICP_QAT_HW_CIPHER_CONFIG_BUILD_UPPER(aad_size) \ ({ \ typeof(aad_size) aad_size1 = aad_size; \ @@ -362,6 +371,28 @@ struct icp_qat_hw_cipher_algo_blk { uint8_t key[ICP_QAT_HW_CIPHER_MAX_KEY_SZ]; } __rte_cache_aligned; +struct icp_qat_hw_gen2_crc_cd { + uint32_t flags; + uint32_t reserved1[5]; + uint32_t initial_crc; + uint32_t reserved2[3]; +}; + +#define QAT_GEN3_COMP_REFLECT_IN_BITPOS 17 +#define QAT_GEN3_COMP_REFLECT_IN_MASK 0x1 +#define QAT_GEN3_COMP_REFLECT_OUT_BITPOS 18 +#define QAT_GEN3_COMP_REFLECT_OUT_MASK 0x1 + +struct icp_qat_hw_gen3_crc_cd { + uint32_t flags; + uint32_t reserved1[3]; + uint32_t polynomial; + uint32_t xor_val; + uint32_t reserved2[2]; + uint32_t initial_crc; + uint32_t reserved3; +}; + struct icp_qat_hw_ucs_cipher_config { uint32_t val; uint32_t reserved[3]; @@ -372,6 +403,108 @@ struct icp_qat_hw_cipher_algo_blk20 { uint8_t key[ICP_QAT_HW_CIPHER_MAX_KEY_SZ]; } __rte_cache_aligned; +enum icp_qat_hw_ucs_cipher_reflect_out { + ICP_QAT_HW_CIPHER_UCS_REFLECT_OUT_DISABLED = 0, + ICP_QAT_HW_CIPHER_UCS_REFLECT_OUT_ENABLED = 1, +}; + +enum icp_qat_hw_ucs_cipher_reflect_in { + ICP_QAT_HW_CIPHER_UCS_REFLECT_IN_DISABLED = 0, + ICP_QAT_HW_CIPHER_UCS_REFLECT_IN_ENABLED = 1, +}; + +enum icp_qat_hw_ucs_cipher_crc_encoding { + ICP_QAT_HW_CIPHER_UCS_CRC_NOT_REQUIRED = 0, + ICP_QAT_HW_CIPHER_UCS_CRC32 = 1, + ICP_QAT_HW_CIPHER_UCS_CRC64 = 2, +}; + +#define QAT_CIPHER_UCS_REFLECT_OUT_LE_BITPOS 17 +#define QAT_CIPHER_UCS_REFLECT_OUT_MASK 0x1 +#define QAT_CIPHER_UCS_REFLECT_IN_LE_BITPOS 16 +#define QAT_CIPHER_UCS_REFLECT_IN_MASK 0x1 +#define QAT_CIPHER_UCS_CRC_ENCODING_LE_BITPOS 14 +#define QAT_CIPHER_UCS_CRC_ENCODING_MASK 0x3 + +struct icp_qat_fw_ucs_slice_cipher_config { + enum icp_qat_hw_cipher_mode mode; + enum icp_qat_hw_cipher_algo algo; + uint16_t hash_cmp_val; + enum icp_qat_hw
[PATCH 2/2] crypto/qat: added cipher-crc cap check
A configuration item called qat_sym_cipher_crc_enable has been added. When set, an LA bulk req message with combined cipher-crc will be sent on startup to the QAT device. The response is checked to see if the data returned matches the cipher text. If a match is determined the cipher-crc capability bit is set to indicate support. If cipher-crc offload is supported, the LA Bulk request will be formatted correctly before being enqueued to the device. Signed-off-by: Kevin O'Sullivan Signed-off-by: David Coyle --- drivers/common/qat/qat_device.c | 12 +- drivers/common/qat/qat_device.h | 3 +- drivers/common/qat/qat_qp.c | 157 +++ drivers/common/qat/qat_qp.h | 5 + drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c | 2 +- drivers/crypto/qat/dev/qat_crypto_pmd_gens.h | 24 ++- drivers/crypto/qat/dev/qat_sym_pmd_gen1.c| 4 + drivers/crypto/qat/qat_crypto.c | 22 ++- drivers/crypto/qat/qat_crypto.h | 1 + drivers/crypto/qat/qat_sym.c | 4 + drivers/crypto/qat/qat_sym.h | 7 +- drivers/crypto/qat/qat_sym_session.c | 194 +++ drivers/crypto/qat/qat_sym_session.h | 21 +- 13 files changed, 441 insertions(+), 15 deletions(-) diff --git a/drivers/common/qat/qat_device.c b/drivers/common/qat/qat_device.c index 8bce2ac073..308c59c39f 100644 --- a/drivers/common/qat/qat_device.c +++ b/drivers/common/qat/qat_device.c @@ -149,7 +149,16 @@ qat_dev_parse_cmd(const char *str, struct qat_dev_cmd_param } else { memcpy(value_str, arg2, iter); value = strtol(value_str, NULL, 10); - if (value > MAX_QP_THRESHOLD_SIZE) { + if (strcmp(param, +SYM_CIPHER_CRC_ENABLE_NAME) == 0) { + if (value < 0 || value > 1) { + QAT_LOG(DEBUG, "The value for" + " qat_sym_cipher_crc_enable" + " should be set to 0 or 1," + " setting to 0"); + value = 0; + } + } else if (value > MAX_QP_THRESHOLD_SIZE) { QAT_LOG(DEBUG, "Exceeded max size of" " threshold, setting to %d", MAX_QP_THRESHOLD_SIZE); @@ -369,6 +378,7 @@ static int qat_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, { SYM_ENQ_THRESHOLD_NAME, 0 }, { ASYM_ENQ_THRESHOLD_NAME, 0 }, { COMP_ENQ_THRESHOLD_NAME, 0 }, + { SYM_CIPHER_CRC_ENABLE_NAME, 0 }, [QAT_CMD_SLICE_MAP_POS] = { QAT_CMD_SLICE_MAP, 0}, { NULL, 0 }, }; diff --git a/drivers/common/qat/qat_device.h b/drivers/common/qat/qat_device.h index bc3da04238..4188474dde 100644 --- a/drivers/common/qat/qat_device.h +++ b/drivers/common/qat/qat_device.h @@ -21,8 +21,9 @@ #define SYM_ENQ_THRESHOLD_NAME "qat_sym_enq_threshold" #define ASYM_ENQ_THRESHOLD_NAME "qat_asym_enq_threshold" #define COMP_ENQ_THRESHOLD_NAME "qat_comp_enq_threshold" +#define SYM_CIPHER_CRC_ENABLE_NAME "qat_sym_cipher_crc_enable" #define QAT_CMD_SLICE_MAP "qat_cmd_slice_disable" -#define QAT_CMD_SLICE_MAP_POS 4 +#define QAT_CMD_SLICE_MAP_POS 5 #define MAX_QP_THRESHOLD_SIZE 32 /** diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c index 9cbd19a481..441dbe9846 100644 --- a/drivers/common/qat/qat_qp.c +++ b/drivers/common/qat/qat_qp.c @@ -11,6 +11,9 @@ #include #include #include +#ifdef RTE_LIB_SECURITY +#include +#endif #include "qat_logs.h" #include "qat_device.h" @@ -957,6 +960,160 @@ qat_cq_get_fw_version(struct qat_qp *qp) return -EINVAL; } +#ifdef BUILD_QAT_SYM +/* Sends an LA bulk req message to determine if a QAT device supports Cipher-CRC + * offload. This assumes that there are no inflight messages, i.e. assumes + * there's space on the qp, one message is sent and only one response + * collected. The status bit of the response and returned data are checked. + * Returns: + * 1 if status bit indicates success and returned data matches expected + * data (i.e. Cipher-CRC supported) + * 0 if status bit indicates error or returned data does not match expected + * data (i.e. Cipher-CRC not supported) + * Negative error code in case of error + */ +int +qat_cq_get_fw_cipher_crc_cap(struct qat_qp *qp) +{ + struct qat_queue *queue = &(qp->tx_q); + uint8_t *base_addr = (uint8_t *)queue->base_addr; + struct icp_q
RE: [PATCH v3 0/2] net/mlx5: support MPLSoUDP for HWS
Hi, > -Original Message- > From: Michael Baum > Sent: Thursday, February 23, 2023 9:48 AM > To: dev@dpdk.org > Cc: Matan Azrad ; Raslan Darawsheh > ; Slava Ovsiienko > Subject: [PATCH v3 0/2] net/mlx5: support MPLSoUDP for HWS > > Add support for matching/encap/decap MPLSoUDP including multiple MPLS > headers. > > v2: > - Rebase. > - Fix typo in comment. > > v3: > - Update to capital letters at the beginning of sentences in comments. > > Erez Shitrit (1): > net/mlx5/hws: support matching on MPLSoUDP > > Michael Baum (1): > net/mlx5: add MPLS tunnel support for HWS > > doc/guides/nics/mlx5.rst | 4 + > doc/guides/rel_notes/release_23_03.rst | 1 + > drivers/net/mlx5/hws/mlx5dr_definer.c | 183 > - drivers/net/mlx5/hws/mlx5dr_definer.h | > 32 - > drivers/net/mlx5/mlx5_flow_hw.c| 1 + > 5 files changed, 218 insertions(+), 3 deletions(-) > > -- > 2.25.1 Patch applied to next-net-mlx, Kindest regards, Raslan Darawsheh
[PATCH] examples/qos_sched: fix buffer overflow on mbuf free
When running the qos_sched app with separated worker and Tx threads, the app would seg-fault after a short time of handling packets. The root cause of this turns out to be an incorrect array index when freeing unsent packets post-Tx. Rather than freeing packets using the "nb_tx" value i.e. where transmission failed, the function was freeing packets using the "nb_pkts" value, i.e. going beyond the number of packets previously received into the buffer. Fixes: 39b25117c40b ("examples/qos_sched: remove Tx buffering") Reported-by: Megha Ajmera Signed-off-by: Bruce Richardson --- examples/qos_sched/app_thread.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/qos_sched/app_thread.c b/examples/qos_sched/app_thread.c index 1ea732aa91..059c470afb 100644 --- a/examples/qos_sched/app_thread.c +++ b/examples/qos_sched/app_thread.c @@ -118,7 +118,7 @@ app_tx_thread(struct thread_conf **confs) if (likely(nb_pkts != 0)) { uint16_t nb_tx = rte_eth_tx_burst(conf->tx_port, 0, mbufs, nb_pkts); if (nb_pkts != nb_tx) - rte_pktmbuf_free_bulk(&mbufs[nb_pkts], nb_pkts - nb_tx); + rte_pktmbuf_free_bulk(&mbufs[nb_tx], nb_pkts - nb_tx); } conf_idx++; -- 2.37.2
RE: [PATCH] examples/qos_sched: fix buffer overflow on mbuf free
> -Original Message- > From: Richardson, Bruce > Sent: Wednesday, March 8, 2023 2:09 PM > To: dev@dpdk.org > Cc: Richardson, Bruce ; Ajmera, Megha > ; Dumitrescu, Cristian > > Subject: [PATCH] examples/qos_sched: fix buffer overflow on mbuf free > > When running the qos_sched app with separated worker and Tx threads, the > app would seg-fault after a short time of handling packets. The root > cause of this turns out to be an incorrect array index when freeing > unsent packets post-Tx. Rather than freeing packets using the "nb_tx" > value i.e. where transmission failed, the function was freeing packets > using the "nb_pkts" value, i.e. going beyond the number of packets > previously received into the buffer. > > Fixes: 39b25117c40b ("examples/qos_sched: remove Tx buffering") > > Reported-by: Megha Ajmera > Signed-off-by: Bruce Richardson > --- > examples/qos_sched/app_thread.c | 2 +- > 1 file changed, 1 insertion(+), 1 deletion(-) > > diff --git a/examples/qos_sched/app_thread.c > b/examples/qos_sched/app_thread.c > index 1ea732aa91..059c470afb 100644 > --- a/examples/qos_sched/app_thread.c > +++ b/examples/qos_sched/app_thread.c Acked-by: Cristian Dumitrescu
[dpdk-dev v1] crypto/openssl: fix of ASAN heap-use-after-free
fix of ASAN report on heap-use-after-free error on tmp buffer. Fixes: d7bd42f6db19 ("crypto/openssl: update RSA routine with 3.0 EVP API") Cc: kai...@intel.com Signed-off-by: Kai Ji --- drivers/crypto/openssl/rte_openssl_pmd.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/openssl/rte_openssl_pmd.c b/drivers/crypto/openssl/rte_openssl_pmd.c index abcb641a44..384d262621 100644 --- a/drivers/crypto/openssl/rte_openssl_pmd.c +++ b/drivers/crypto/openssl/rte_openssl_pmd.c @@ -2633,7 +2633,7 @@ process_openssl_rsa_op_evp(struct rte_crypto_op *cop, if (EVP_PKEY_verify_recover(rsa_ctx, tmp, &outlen, op->rsa.sign.data, op->rsa.sign.length) <= 0) { - rte_free(tmp); + OPENSSL_free(tmp); goto err_rsa; } @@ -2645,7 +2645,7 @@ process_openssl_rsa_op_evp(struct rte_crypto_op *cop, op->rsa.message.length)) { OPENSSL_LOG(ERR, "RSA sign Verification failed"); } - rte_free(tmp); + OPENSSL_free(tmp); break; default: -- 2.17.1
Re: [PATCH] net/nfp: fix MTU configuration order
On 3/8/2023 2:33 AM, Chaoyong He wrote: > From: Peng Zhang > > If rte_eth_dev_set_mtu() is called before rte_eth_rx_queue_setup() the > NFP driver setup fails. This is because the default values evaluated > when setting the MTU are initialized in the rte_eth_rx_queue_setup() > code path. Fix this by instead initializing the MTU default values in > the device initialization, in nfp_net_init() and the check also is > conducted in nfp_net_start(), so it doesn't influence the result. > > This was found by using DPDK with OVS. > > Fixes: dbad6f64f921 ("net/nfp: fix internal buffer size and MTU check") > Cc: sta...@dpdk.org > > Signed-off-by: Peng Zhang > Reviewed-by: Chaoyong He > Reviewed-by: Niklas Söderlund Applied to dpdk-next-net/main, thanks.
[PATCH v2 0/5] net/mlx5: add indirect QUOTA create/query/modify
Add indirect quota flow action. Add match on quota flow item. v2: rebase to the latest main branch. Gregory Etelson (5): net/mlx5: update query fields in async job structure net/mlx5: remove code duplication common/mlx5: update MTR ASO definitions net/mlx5: add indirect QUOTA create/query/modify mlx5dr: Definer, translate RTE quota item drivers/common/mlx5/mlx5_prm.h| 4 + drivers/net/mlx5/hws/mlx5dr_definer.c | 61 +++ drivers/net/mlx5/meson.build | 1 + drivers/net/mlx5/mlx5.h | 88 - drivers/net/mlx5/mlx5_flow.c | 62 +++ drivers/net/mlx5/mlx5_flow.h | 20 +- drivers/net/mlx5/mlx5_flow_aso.c | 10 +- drivers/net/mlx5/mlx5_flow_hw.c | 527 ++ 8 files changed, 592 insertions(+), 181 deletions(-) -- 2.34.1
[PATCH v2 3/5] common/mlx5: update MTR ASO definitions
Update MTR ASO definitions for QUOTA flow action. Quota flow action requires WQE READ capability and access to token fields. Signed-off-by: Gregory Etelson --- drivers/common/mlx5/mlx5_prm.h | 4 1 file changed, 4 insertions(+) diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h index 75af636f59..525364d5e4 100644 --- a/drivers/common/mlx5/mlx5_prm.h +++ b/drivers/common/mlx5/mlx5_prm.h @@ -3917,6 +3917,8 @@ enum mlx5_aso_op { ASO_OPER_LOGICAL_OR = 0x1, }; +#define MLX5_ASO_CSEG_READ_ENABLE 1 + /* ASO WQE CTRL segment. */ struct mlx5_aso_cseg { uint32_t va_h; @@ -3931,6 +3933,8 @@ struct mlx5_aso_cseg { uint64_t data_mask; } __rte_packed; +#define MLX5_MTR_MAX_TOKEN_VALUE INT32_MAX + /* A meter data segment - 2 per ASO WQE. */ struct mlx5_aso_mtr_dseg { uint32_t v_bo_sc_bbog_mm; -- 2.34.1
[PATCH v2 1/5] net/mlx5: update query fields in async job structure
Query fields defined in `mlx5_hw_q_job` target CT type only. The patch updates `mlx5_hw_q_job` for other query types as well. Signed-off-by: Gregory Etelson --- drivers/net/mlx5/mlx5.h | 10 +- drivers/net/mlx5/mlx5_flow_aso.c | 2 +- drivers/net/mlx5/mlx5_flow_hw.c | 6 +++--- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index a766fb408e..aa956ec1b7 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -366,11 +366,11 @@ struct mlx5_hw_q_job { struct rte_flow_item *items; union { struct { - /* Pointer to ct query user memory. */ - struct rte_flow_action_conntrack *profile; - /* Pointer to ct ASO query out memory. */ - void *out_data; - } __rte_packed; + /* User memory for query output */ + void *user; + /* Data extracted from hardware */ + void *hw; + } __rte_packed query; struct rte_flow_item_ethdev port_spec; struct rte_flow_item_tag tag_spec; } __rte_packed; diff --git a/drivers/net/mlx5/mlx5_flow_aso.c b/drivers/net/mlx5/mlx5_flow_aso.c index 29bd7ce9e8..0eb91c570f 100644 --- a/drivers/net/mlx5/mlx5_flow_aso.c +++ b/drivers/net/mlx5/mlx5_flow_aso.c @@ -1389,7 +1389,7 @@ mlx5_aso_ct_sq_query_single(struct mlx5_dev_ctx_shared *sh, struct mlx5_hw_q_job *job = (struct mlx5_hw_q_job *)user_data; sq->elts[wqe_idx].ct = user_data; - job->out_data = (char *)((uintptr_t)sq->mr.addr + wqe_idx * 64); + job->query.hw = (char *)((uintptr_t)sq->mr.addr + wqe_idx * 64); } else { sq->elts[wqe_idx].query_data = data; sq->elts[wqe_idx].ct = ct; diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c index a9c7045a3e..cd951019de 100644 --- a/drivers/net/mlx5/mlx5_flow_hw.c +++ b/drivers/net/mlx5/mlx5_flow_hw.c @@ -2738,8 +2738,8 @@ __flow_hw_pull_indir_action_comp(struct rte_eth_dev *dev, idx = MLX5_ACTION_CTX_CT_GET_IDX ((uint32_t)(uintptr_t)job->action); aso_ct = mlx5_ipool_get(priv->hws_ctpool->cts, idx); - mlx5_aso_ct_obj_analyze(job->profile, - job->out_data); + mlx5_aso_ct_obj_analyze(job->query.user, + job->query.hw); aso_ct->state = ASO_CONNTRACK_READY; } } @@ -8275,7 +8275,7 @@ flow_hw_action_handle_query(struct rte_eth_dev *dev, uint32_t queue, case MLX5_INDIRECT_ACTION_TYPE_CT: aso = true; if (job) - job->profile = (struct rte_flow_action_conntrack *)data; + job->query.user = data; ret = flow_hw_conntrack_query(dev, queue, act_idx, data, job, push, error); break; -- 2.34.1
[PATCH v2 2/5] net/mlx5: remove code duplication
Replace duplicated code with dedicated functions Signed-off-by: Gregory Etelson --- drivers/net/mlx5/mlx5.h | 6 +- drivers/net/mlx5/mlx5_flow_hw.c | 182 2 files changed, 95 insertions(+), 93 deletions(-) diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index aa956ec1b7..a4ed61e257 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -344,11 +344,11 @@ struct mlx5_lb_ctx { }; /* HW steering queue job descriptor type. */ -enum { +enum mlx5_hw_job_type { MLX5_HW_Q_JOB_TYPE_CREATE, /* Flow create job type. */ MLX5_HW_Q_JOB_TYPE_DESTROY, /* Flow destroy job type. */ - MLX5_HW_Q_JOB_TYPE_UPDATE, - MLX5_HW_Q_JOB_TYPE_QUERY, + MLX5_HW_Q_JOB_TYPE_UPDATE, /* Flow update job type. */ + MLX5_HW_Q_JOB_TYPE_QUERY, /* Flow query job type. */ }; #define MLX5_HW_MAX_ITEMS (16) diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c index cd951019de..8a5e8941fd 100644 --- a/drivers/net/mlx5/mlx5_flow_hw.c +++ b/drivers/net/mlx5/mlx5_flow_hw.c @@ -7626,6 +7626,67 @@ flow_hw_action_handle_validate(struct rte_eth_dev *dev, uint32_t queue, return 0; } +static __rte_always_inline bool +flow_hw_action_push(const struct rte_flow_op_attr *attr) +{ + return attr ? !attr->postpone : true; +} + +static __rte_always_inline struct mlx5_hw_q_job * +flow_hw_job_get(struct mlx5_priv *priv, uint32_t queue) +{ + return priv->hw_q[queue].job[--priv->hw_q[queue].job_idx]; +} + +static __rte_always_inline void +flow_hw_job_put(struct mlx5_priv *priv, uint32_t queue) +{ + priv->hw_q[queue].job_idx++; +} + +static __rte_always_inline struct mlx5_hw_q_job * +flow_hw_action_job_init(struct mlx5_priv *priv, uint32_t queue, + const struct rte_flow_action_handle *handle, + void *user_data, void *query_data, + enum mlx5_hw_job_type type, + struct rte_flow_error *error) +{ + struct mlx5_hw_q_job *job; + + MLX5_ASSERT(queue != MLX5_HW_INV_QUEUE); + if (unlikely(!priv->hw_q[queue].job_idx)) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL, + "Action destroy failed due to queue full."); + return NULL; + } + job = flow_hw_job_get(priv, queue); + job->type = type; + job->action = handle; + job->user_data = user_data; + job->query.user = query_data; + return job; +} + +static __rte_always_inline void +flow_hw_action_finalize(struct rte_eth_dev *dev, uint32_t queue, + struct mlx5_hw_q_job *job, + bool push, bool aso, bool status) +{ + struct mlx5_priv *priv = dev->data->dev_private; + if (likely(status)) { + if (push) + __flow_hw_push_action(dev, queue); + if (!aso) + rte_ring_enqueue(push ? +priv->hw_q[queue].indir_cq : +priv->hw_q[queue].indir_iq, +job); + } else { + flow_hw_job_put(priv, queue); + } +} + /** * Create shared action. * @@ -7663,21 +7724,15 @@ flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue, cnt_id_t cnt_id; uint32_t mtr_id; uint32_t age_idx; - bool push = true; + bool push = flow_hw_action_push(attr); bool aso = false; if (attr) { - MLX5_ASSERT(queue != MLX5_HW_INV_QUEUE); - if (unlikely(!priv->hw_q[queue].job_idx)) { - rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "Flow queue full."); + job = flow_hw_action_job_init(priv, queue, NULL, user_data, + NULL, MLX5_HW_Q_JOB_TYPE_CREATE, + error); + if (!job) return NULL; - } - job = priv->hw_q[queue].job[--priv->hw_q[queue].job_idx]; - job->type = MLX5_HW_Q_JOB_TYPE_CREATE; - job->user_data = user_data; - push = !attr->postpone; } switch (action->type) { case RTE_FLOW_ACTION_TYPE_AGE: @@ -7740,17 +7795,9 @@ flow_hw_action_handle_create(struct rte_eth_dev *dev, uint32_t queue, break; } if (job) { - if (!handle) { - priv->hw_q[queue].job_idx++; - return NULL; - } job->action = handle; - if (push) - __flow_hw_push_action(dev, queue); - if (aso) - return
[PATCH v2 4/5] net/mlx5: add indirect QUOTA create/query/modify
Implement HWS functions for indirect QUOTA creation, modification and query. Signed-off-by: Gregory Etelson --- drivers/net/mlx5/meson.build | 1 + drivers/net/mlx5/mlx5.h | 72 +++ drivers/net/mlx5/mlx5_flow.c | 62 ++ drivers/net/mlx5/mlx5_flow.h | 20 +- drivers/net/mlx5/mlx5_flow_aso.c | 8 +- drivers/net/mlx5/mlx5_flow_hw.c | 343 --- 6 files changed, 425 insertions(+), 81 deletions(-) diff --git a/drivers/net/mlx5/meson.build b/drivers/net/mlx5/meson.build index abd507bd88..323c381d2b 100644 --- a/drivers/net/mlx5/meson.build +++ b/drivers/net/mlx5/meson.build @@ -23,6 +23,7 @@ sources = files( 'mlx5_flow_dv.c', 'mlx5_flow_aso.c', 'mlx5_flow_flex.c', +'mlx5_flow_quota.c', 'mlx5_mac.c', 'mlx5_rss.c', 'mlx5_rx.c', diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index a4ed61e257..6e6f2f53eb 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -46,6 +46,14 @@ #define MLX5_HW_INV_QUEUE UINT32_MAX +/* + * The default ipool threshold value indicates which per_core_cache + * value to set. + */ +#define MLX5_HW_IPOOL_SIZE_THRESHOLD (1 << 19) +/* The default min local cache size. */ +#define MLX5_HW_IPOOL_CACHE_MIN (1 << 9) + /* * Number of modification commands. * The maximal actions amount in FW is some constant, and it is 16 in the @@ -349,6 +357,7 @@ enum mlx5_hw_job_type { MLX5_HW_Q_JOB_TYPE_DESTROY, /* Flow destroy job type. */ MLX5_HW_Q_JOB_TYPE_UPDATE, /* Flow update job type. */ MLX5_HW_Q_JOB_TYPE_QUERY, /* Flow query job type. */ + MLX5_HW_Q_JOB_TYPE_UPDATE_QUERY, /* Flow update and query job type. */ }; #define MLX5_HW_MAX_ITEMS (16) @@ -601,6 +610,7 @@ struct mlx5_aso_sq_elem { char *query_data; }; void *user_data; + struct mlx5_quota *quota_obj; }; }; @@ -1658,6 +1668,33 @@ struct mlx5_hw_ctrl_flow { struct mlx5_flow_hw_ctrl_rx; +enum mlx5_quota_state { + MLX5_QUOTA_STATE_FREE, /* quota not in use */ + MLX5_QUOTA_STATE_READY, /* quota is ready */ + MLX5_QUOTA_STATE_WAIT /* quota waits WR completion */ +}; + +struct mlx5_quota { + uint8_t state; /* object state */ + uint8_t mode; /* metering mode */ + /** +* Keep track of application update types. +* PMD does not allow 2 consecutive ADD updates. +*/ + enum rte_flow_update_quota_op last_update; +}; + +/* Bulk management structure for flow quota. */ +struct mlx5_quota_ctx { + uint32_t nb_quotas; /* Total number of quota objects */ + struct mlx5dr_action *dr_action; /* HWS action */ + struct mlx5_devx_obj *devx_obj; /* DEVX ranged object. */ + struct mlx5_pmd_mr mr; /* MR for READ from MTR ASO */ + struct mlx5_aso_mtr_dseg **read_buf; /* Buffers for READ */ + struct mlx5_aso_sq *sq; /* SQs for sync/async ACCESS_ASO WRs */ + struct mlx5_indexed_pool *quota_ipool; /* Manage quota objects */ +}; + struct mlx5_priv { struct rte_eth_dev_data *dev_data; /* Pointer to device data. */ struct mlx5_dev_ctx_shared *sh; /* Shared device context. */ @@ -1747,6 +1784,7 @@ struct mlx5_priv { struct mlx5_flow_meter_policy *mtr_policy_arr; /* Policy array. */ struct mlx5_l3t_tbl *mtr_idx_tbl; /* Meter index lookup table. */ struct mlx5_mtr_bulk mtr_bulk; /* Meter index mapping for HWS */ + struct mlx5_quota_ctx quota_ctx; /* Quota index mapping for HWS */ uint8_t skip_default_rss_reta; /* Skip configuration of default reta. */ uint8_t fdb_def_rule; /* Whether fdb jump to table 1 is configured. */ struct mlx5_mp_id mp_id; /* ID of a multi-process process */ @@ -2242,6 +2280,15 @@ int mlx5_aso_ct_queue_init(struct mlx5_dev_ctx_shared *sh, uint32_t nb_queues); int mlx5_aso_ct_queue_uninit(struct mlx5_dev_ctx_shared *sh, struct mlx5_aso_ct_pools_mng *ct_mng); +int +mlx5_aso_sq_create(struct mlx5_common_device *cdev, struct mlx5_aso_sq *sq, + void *uar, uint16_t log_desc_n); +void +mlx5_aso_destroy_sq(struct mlx5_aso_sq *sq); +void +mlx5_aso_mtr_init_sq(struct mlx5_aso_sq *sq); +void +mlx5_aso_cqe_err_handle(struct mlx5_aso_sq *sq); /* mlx5_flow_flex.c */ @@ -2273,6 +2320,31 @@ struct mlx5_list_entry *mlx5_flex_parser_clone_cb(void *list_ctx, void mlx5_flex_parser_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry); +int +mlx5_flow_quota_destroy(struct rte_eth_dev *dev); +int +mlx5_flow_quota_init(struct rte_eth_dev *dev, uint32_t nb_quotas); +struct rte_flow_action_handle * +mlx5_quota_alloc(struct rte_eth_dev *dev, uint32_t queue, +const struct rte_flow_action_quota *conf, +struct mlx5_hw_q_job *job, bool push, +struct rte_flow
[PATCH v2 5/5] mlx5dr: Definer, translate RTE quota item
MLX5 PMD implements QUOTA with Meter object. PMD Quota action translation implicitly increments Meter register value after HW assigns it. Meter register values are: HW QUOTA(HW+1) QUOTA state RED01 (01b) BLOCK YELLOW 12 (10b) PASS GREEN 23 (11b) PASS Quota item checks Meter register bit 1 value to determine state: SPEC MASK PASS 2 (10b)2 (10b) BLOCK0 (00b)2 (10b) Signed-off-by: Gregory Etelson --- drivers/net/mlx5/hws/mlx5dr_definer.c | 63 +++ 1 file changed, 63 insertions(+) diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.c b/drivers/net/mlx5/hws/mlx5dr_definer.c index 6374f9df33..dc9e50ee0f 100644 --- a/drivers/net/mlx5/hws/mlx5dr_definer.c +++ b/drivers/net/mlx5/hws/mlx5dr_definer.c @@ -19,6 +19,9 @@ #define STE_UDP0x2 #define STE_ICMP 0x3 +#define MLX5DR_DEFINER_QUOTA_BLOCK 0 +#define MLX5DR_DEFINER_QUOTA_PASS 2 + /* Setter function based on bit offset and mask, for 32bit DW*/ #define _DR_SET_32(p, v, byte_off, bit_off, mask) \ do { \ @@ -1247,6 +1250,62 @@ mlx5dr_definer_conv_item_tag(struct mlx5dr_definer_conv_data *cd, return 0; } +static void +mlx5dr_definer_quota_set(struct mlx5dr_definer_fc *fc, +const void *item_data, uint8_t *tag) +{ + /** +* MLX5 PMD implements QUOTA with Meter object. +* PMD Quota action translation implicitly increments +* Meter register value after HW assigns it. +* Meter register values are: +*HW QUOTA(HW+1) QUOTA state +* RED01 (01b) BLOCK +* YELLOW 12 (10b) PASS +* GREEN 23 (11b) PASS +* +* Quota item checks Meter register bit 1 value to determine state: +*SPEC MASK +* PASS 2 (10b)2 (10b) +* BLOCK0 (00b)2 (10b) +* +* item_data is NULL when template quota item is non-masked: +* .. / quota / .. +*/ + + const struct rte_flow_item_quota *quota = item_data; + uint32_t val; + + if (quota && quota->state == RTE_FLOW_QUOTA_STATE_BLOCK) + val = MLX5DR_DEFINER_QUOTA_BLOCK; + else + val = MLX5DR_DEFINER_QUOTA_PASS; + + DR_SET(tag, val, fc->byte_off, fc->bit_off, fc->bit_mask); +} + +static int +mlx5dr_definer_conv_item_quota(struct mlx5dr_definer_conv_data *cd, + __rte_unused struct rte_flow_item *item, + int item_idx) +{ + int mtr_reg = flow_hw_get_reg_id(RTE_FLOW_ITEM_TYPE_METER_COLOR, 0); + struct mlx5dr_definer_fc *fc; + + if (mtr_reg < 0) { + rte_errno = EINVAL; + return rte_errno; + } + + fc = mlx5dr_definer_get_register_fc(cd, mtr_reg); + if (!fc) + return rte_errno; + + fc->tag_set = &mlx5dr_definer_quota_set; + fc->item_idx = item_idx; + return 0; +} + static int mlx5dr_definer_conv_item_metadata(struct mlx5dr_definer_conv_data *cd, struct rte_flow_item *item, @@ -1904,6 +1963,10 @@ mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx, ret = mlx5dr_definer_conv_item_meter_color(&cd, items, i); item_flags |= MLX5_FLOW_ITEM_METER_COLOR; break; + case RTE_FLOW_ITEM_TYPE_QUOTA: + ret = mlx5dr_definer_conv_item_quota(&cd, items, i); + item_flags |= MLX5_FLOW_ITEM_QUOTA; + break; case RTE_FLOW_ITEM_TYPE_IPV6_ROUTING_EXT: ret = mlx5dr_definer_conv_item_ipv6_routing_ext(&cd, items, i); item_flags |= cd.tunnel ? MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT : -- 2.34.1
Re: [PATCH v9 01/21] net/cpfl: support device initialization
On 3/7/2023 3:03 PM, Ferruh Yigit wrote: > On 3/7/2023 2:11 PM, Ferruh Yigit wrote: >> On 3/2/2023 9:20 PM, Mingxia Liu wrote: >>> Support device init and add the following dev ops: >>> - dev_configure >>> - dev_close >>> - dev_infos_get >>> - link_update >>> - dev_supported_ptypes_get >>> >>> Signed-off-by: Mingxia Liu >> >> <...> >> >>> +static void >>> +cpfl_handle_virtchnl_msg(struct cpfl_adapter_ext *adapter) >>> +{ >>> + struct idpf_adapter *base = &adapter->base; >>> + struct idpf_dma_mem *dma_mem = NULL; >>> + struct idpf_hw *hw = &base->hw; >>> + struct virtchnl2_event *vc_event; >>> + struct idpf_ctlq_msg ctlq_msg; >>> + enum idpf_mbx_opc mbx_op; >>> + struct idpf_vport *vport; >>> + enum virtchnl_ops vc_op; >>> + uint16_t pending = 1; >>> + int ret; >>> + >>> + while (pending) { >>> + ret = idpf_vc_ctlq_recv(hw->arq, &pending, &ctlq_msg); >>> + if (ret) { >>> + PMD_DRV_LOG(INFO, "Failed to read msg from virtual >>> channel, ret: %d", ret); >>> + return; >>> + } >>> + >>> + memcpy(base->mbx_resp, ctlq_msg.ctx.indirect.payload->va, >>> + IDPF_DFLT_MBX_BUF_SIZE); >>> + >>> + mbx_op = rte_le_to_cpu_16(ctlq_msg.opcode); >>> + vc_op = rte_le_to_cpu_32(ctlq_msg.cookie.mbx.chnl_opcode); >>> + base->cmd_retval = >>> rte_le_to_cpu_32(ctlq_msg.cookie.mbx.chnl_retval); >>> + >>> + switch (mbx_op) { >>> + case idpf_mbq_opc_send_msg_to_peer_pf: >>> + if (vc_op == VIRTCHNL2_OP_EVENT) { >> >> >> Raslan reported following build error [1], 'VIRTCHNL2_OP_EVENT' is not >> an element of "enum virtchnl_ops", can you please check? >> >> >> I guess there are a few options, have a new enum for virtchnl2, like >> "enum virtchnl2_ops" which inlucde all 'VIRTCHNL2_OP_', >> >> OR >> >> use 'uint32_t' type (instead of "enum virtchnl_ops") when >> 'VIRTCHNL2_OP_' opcodes can be used, this seems simpler. >> >> >> BTW, this is same in the idfp driver. >> >> >> [1] >> drivers/libtmp_rte_net_cpfl.a.p/net_cpfl_cpfl_ethdev.c.o -c >> ../../root/dpdk/drivers/net/cpfl/cpfl_ethdev.c >> ../../root/dpdk/drivers/net/cpfl/cpfl_ethdev.c:1118:14: error: >> comparison of constant 522 with expression of type 'enum virtchnl_ops' >> is always false [-Werror,-Wtautological-constant-out-of-range-compare] >> if (vc_op == VIRTCHNL2_OP_EVENT) { >> ~ ^ ~~ >> 1 error generated. >> > > Thinking twice, I am not sure if this a compiler issue or coding issue, > many compilers doesn't complain about above issue. > > As far as I understand C allows assigning unlisted values to enums, > because underneath it just uses an integer type. > > Only caveat I can see is, the integer type used is not fixed, > technically compiler can select the type that fits all enum values, so > for above enum compiler can select an char type to store the values, but > fixed value is 522 out of the char limit may cause an issue. But in > practice I am not sure if compilers are selecting char as underlying > type, or if they all just use 'int'. > Hi Mingxia, Beilei, Yuying, Qi, Reminder of this issue. Build error is observed by clang 3.4.x [1], can you please work on a fix? [1] https://godbolt.org/z/zrKz7371b Thanks, ferruh
Re: ixgbe rxq interrupt not working
Hi Stephen, Downloaded https://fast.dpdk.org/rel/dpdk-22.11.1.tar.xz, built and linked with the application. Still don't get rxq interrupt of ixgbe ports. EAL: Detected CPU lcores: 4 EAL: Detected NUMA nodes: 1 EAL: Detected static linkage of DPDK EAL: Multi-process socket /var/run/dpdk/rte/mp_socket EAL: Selected IOVA mode 'PA' EAL: VFIO support initialized EAL: Using IOMMU type 8 (No-IOMMU) EAL: Probe PCI driver: net_ixgbe (8086:15e4) device: :02:00.0 (socket -1) EAL: Probe PCI driver: net_ixgbe (8086:15e4) device: :02:00.1 (socket -1) EAL: Ignore mapping IO port bar(2) EAL: Probe PCI driver: net_e1000_igb (8086:1521) device: :04:00.1 (socket -11) # cat /proc/interrupts | grep vfio 59: 0 0 6 0 IR-PCI-MSI 1048576-edge vfio-msix[0](:02:00.0) 60: 0 6 0 0 IR-PCI-MSI 1050624-edge vfio-msix[0](:02:00.1) 61: 0 0 1 0 IR-PCI-MSI 2099200-edge vfio-msix[0](:04:00.1) #> the above 3 irq's are link state change interrupt, verified by plugging-out and plugging-in eth cable and verified the above irq counter increments every time. 62: 0 0 0 0 IR-PCI-MSI 1050625-edge vfio-msix[1](:02:00.1) 63: 0 18200 0 0 IR-PCI-MSI 2099201-edge vfio-msix[1](:04:00.1) 64: 0 0 0 0 IR-PCI-MSI 1048577-edge vfio-msix[1](:02:00.0) #> igb port irq counter is non-zero (likely to be rxq), where-as the same for ixgbe is 0. Thanks & Regards, Rajasekhar On Wed, Mar 8, 2023 at 3:12 AM Stephen Hemminger wrote: > On Wed, 8 Mar 2023 00:22:10 +0530 > Rajasekhar Pulluru wrote: > > > Hi Team, > > > > Bringing-up dpdk-22.07 on an intel machine with 8 ports, 4 of them driven > > by igb and the rest of the 4 ports driven by ixgbe. > > > FYI - 22.07 is not a supported release, only LTS releases like 22.11 are > supported. >
Re: ixgbe rxq interrupt not working
No Honnappa. Thanks & Regards, Rajasekhar On Wed, Mar 8, 2023 at 5:49 AM Honnappa Nagarahalli < honnappa.nagaraha...@arm.com> wrote: > > > From: Rajasekhar Pulluru > Sent: Tuesday, March 7, 2023 12:52 PM > To: dev@dpdk.org > Subject: ixgbe rxq interrupt not working > > Hi Team, > > Bringing-up dpdk-22.07 on an intel machine with 8 ports, 4 of them driven > by igb and the rest of the 4 ports driven by ixgbe. > [Honnappa] Do you have packets crossing between the 2 drivers? > > > I am following the below sequence to initialize these ports: > > dev_conf.intr_conf.lsc = 1; //Enable link state change interrupt > dev_conf.intr_conf.rxq = 1; //Enable RX Queue Interrupt > dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_NONE; > dev_conf.rxmode.offloads = 0; > dev_conf.txmode.mq_mode = RTE_ETH_MQ_TX_NONE; > dev_conf.txmode.offloads = 0; > > rte_eth_dev_configure > rte_eth_rx_queue_setup > rte_eth_tx_queue_setup > rte_eth_dev_start > data = port_id << CHAR_BIT | queue_id; > rte_eth_dev_rx_intr_ctl_q(port_id, queue_id, RTE_EPOLL_PER_THREAD, > RTE_INTR_EVENT_ADD, (void *)((uintptr_t)data)); > rte_eth_dev_rx_intr_enable(port_id, queue_id); > > And then main loop repeats the below: > > rte_epoll_wait(RTE_EPOLL_PER_THREAD, event, 1, timeout /* 200micro-sec > */); /* ignore return value */ > rte_eth_dev_rx_intr_disable(port_id, queue_id); > rte_eth_rx_burst(port_id, queue_id, pkts, num_pkts); > rte_eth_dev_rx_intr_enable(port_id, queue_id); > > The code is same for all the ports, igb ports are able to come-up and rx > packets, where-as the ixgbe ports are not able to rx packets at all. > cat /proc/interrupts dumps vfio-msix counters for ixgbe as 0, where-as > it's non-zero for igb. > If I don't use/enable rxq interrupt for ixgbe (and remove epoll wait, > interrupt enable/disable from while loop) and simply poll for > rte_eth_rx_burst in a loop, ixgbe ports are able to rx packets. > > What could be wrong here? Appreciate any help. > > I would also like to know if there's an asynchronous rxq interrupt > notification to the application instead of rte_epoll_wait (and sleep). > > Thanks & Regards, > Rajasekhar >
RE: [PATCH v2] test: add cryptodev crosscheck suite
Hi Volodymyr, > -Original Message- > From: Volodymyr Fialko > Sent: Thursday 9 February 2023 12:34 > To: dev@dpdk.org; Akhil Goyal ; Fan Zhang > > Cc: jer...@marvell.com; ano...@marvell.com; hemant.agra...@nxp.com; Ji, > Kai ; Power, Ciara ; Volodymyr > Fialko > Subject: [PATCH v2] test: add cryptodev crosscheck suite > > Add a validation test suite that helps in verifying that the output generated > by two different cryptodevs match for a wide range of input parameter > combinations. > > Crypto autotest performs a comprehensive testing of the cryptodev but > since it performs verification by comparing against known vectors, the > extend to which various parameters (like packet size) can be tested is > limited. This test suite attempts to simulate various cases by running same > test case on different cryptodevs and compares the output generated. The > test suite relies on capabilities to determine the combinations of tests to be > attempted. > > A typical use case would be to compare outputs generated from a standard > driver such as openSSL PMD and a new cryptodev PMD. This test suite is to > compliment the testing coverage that crypto autotest provides. > > Currently supported symmetric xforms(cipher, auth, aead) without chaining. > > Example command: > DPDK_TEST=cryptodev_crosscheck ./dpdk-test \ > -a --vdev "crypto_openssl" > > Signed-off-by: Volodymyr Fialko > --- > V2: > - Updated commit message. > > + > +static void > +capabilities_inspect(void) > +{ > + struct rte_cryptodev_sym_capability_idx > cap_indexes[CRYPTO_ALGOS_LEN], *cap_idx; > + struct crypto_testsuite_params *ts_params = &testsuite_params; > + const struct rte_cryptodev_symmetric_capability *sym_capa; > + struct rte_cryptodev_symmetric_capability *common_capa; > + uint32_t algo, i, dev_id, caps_idx; > + > + caps_idx = 0; > + /* Create capability idx for known algorithms*/ > + for (algo = 1; algo <= CRYPTO_AUTH_MAX_IDX; algo++) { > + cap_idx = &cap_indexes[caps_idx++]; > + cap_idx->type = RTE_CRYPTO_SYM_XFORM_AUTH; > + cap_idx->algo.auth = algo; > + } > + for (algo = 1; algo <= CRYPTO_CIPHER_MAX_IDX; algo++) { > + cap_idx = &cap_indexes[caps_idx++]; > + cap_idx->type = RTE_CRYPTO_SYM_XFORM_CIPHER; > + cap_idx->algo.cipher = algo; > + } > + for (algo = 1; algo <= CRYPTO_AEAD_MAX_IDX; algo++) { > + cap_idx = &cap_indexes[caps_idx++]; > + cap_idx->type = RTE_CRYPTO_SYM_XFORM_AEAD; > + cap_idx->algo.aead = algo; > + } > + > + for (caps_idx = 0; caps_idx < CRYPTO_ALGOS_LEN; caps_idx++) { > + /* Gather common capabilities */ > + common_capa = &common_symm_capas[caps_idx]; > + common_capa->xform_type = > RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED; > + for (i = 0; i < ts_params->valid_dev_count; i++) { > + dev_id = ts_params->valid_devs[i]; > + sym_capa = > rte_cryptodev_sym_capability_get(dev_id, > + &cap_indexes[caps_idx]); > + if (sym_capa == NULL) { > + /* Capability not supported by one of devs, > mark and skip */ > + goto next_algo; > + } > + > + if (common_capa->xform_type == > RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED) { > + /* First time initialization, copy data, go to > next device */ > + *common_capa = *sym_capa; > + continue; > + } [CP] This function - from what I understand after review, is looping through all algorithms in DPDK crypto, and then checking if each is supported by each device? Could we instead just take the capabilities list from one device as the starting point - That will be the max list of capabilities, they will only get knocked out if not supported on the 2nd/3rd device etc. Some trials I did with this patch showed some issues. 1. For QAT + AESNI_MB I get all skipped tests. I would have expected some common algs here. + --- + + Test Suite Summary : Crosscheck Unit Test Suite + --- + + Algo AUTH 'null' : 0/0 passed, 0/0 skipped, 0/0 failed, 0/0 unsupported + Algo AUTH 'aes-cbc-mac' : 0/0 passed, 0/0 skipped, 0/0 failed, 0/0 unsupported + Algo AUTH 'aes-cmac' : 0/0 passed, 0/0 skipped, 0/0 failed, 0/0 unsupported + Algo AUTH 'aes-gmac' : 0/0 passed, 0/0 skipped, 0/0 failed, 0/0 unsupported + Algo AUTH 'aes-xcbc-mac' : 0/0 passed, 0/0 skipped, 0/0 failed, 0/0 unsupported + Algo AUTH 'kasumi-f9' : 0/0 passed, 0/0 skipped, 0/0 failed, 0/0 unsupported + Algo AUTH 'md5' : 0/0 passed, 0/0 skipped, 0/0 failed, 0/0 unsupported + Algo AUTH 'md5-hmac' : 0/0 passed, 0/0 skipped,
Re: ixgbe rxq interrupt not working
On Wed, 8 Mar 2023 22:54:12 +0530 Rajasekhar Pulluru wrote: > No Honnappa. > > Thanks & Regards, > Rajasekhar > > On Wed, Mar 8, 2023 at 5:49 AM Honnappa Nagarahalli < > honnappa.nagaraha...@arm.com> wrote: > > > > > > > From: Rajasekhar Pulluru > > Sent: Tuesday, March 7, 2023 12:52 PM > > To: dev@dpdk.org > > Subject: ixgbe rxq interrupt not working > > > > Hi Team, > > > > Bringing-up dpdk-22.07 on an intel machine with 8 ports, 4 of them driven > > by igb and the rest of the 4 ports driven by ixgbe. > > [Honnappa] Do you have packets crossing between the 2 drivers? > > > > > > I am following the below sequence to initialize these ports: > > > > dev_conf.intr_conf.lsc = 1; //Enable link state change interrupt > > dev_conf.intr_conf.rxq = 1; //Enable RX Queue Interrupt > > dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_NONE; > > dev_conf.rxmode.offloads = 0; > > dev_conf.txmode.mq_mode = RTE_ETH_MQ_TX_NONE; > > dev_conf.txmode.offloads = 0; > > > > rte_eth_dev_configure > > rte_eth_rx_queue_setup > > rte_eth_tx_queue_setup > > rte_eth_dev_start > > data = port_id << CHAR_BIT | queue_id; > > rte_eth_dev_rx_intr_ctl_q(port_id, queue_id, RTE_EPOLL_PER_THREAD, > > RTE_INTR_EVENT_ADD, (void *)((uintptr_t)data)); > > rte_eth_dev_rx_intr_enable(port_id, queue_id); > > > > And then main loop repeats the below: > > > > rte_epoll_wait(RTE_EPOLL_PER_THREAD, event, 1, timeout /* 200micro-sec > > */); /* ignore return value */ > > rte_eth_dev_rx_intr_disable(port_id, queue_id); > > rte_eth_rx_burst(port_id, queue_id, pkts, num_pkts); > > rte_eth_dev_rx_intr_enable(port_id, queue_id); > > > > The code is same for all the ports, igb ports are able to come-up and rx > > packets, where-as the ixgbe ports are not able to rx packets at all. > > cat /proc/interrupts dumps vfio-msix counters for ixgbe as 0, where-as > > it's non-zero for igb. > > If I don't use/enable rxq interrupt for ixgbe (and remove epoll wait, > > interrupt enable/disable from while loop) and simply poll for > > rte_eth_rx_burst in a loop, ixgbe ports are able to rx packets. > > > > What could be wrong here? Appreciate any help. > > > > I would also like to know if there's an asynchronous rxq interrupt > > notification to the application instead of rte_epoll_wait (and sleep). > > > > Thanks & Regards, > > Rajasekhar > > Does the device work as expected when not used with DPDK? I.e does the kernel driver handle it correctly. Also check the kernel dmesg log, for any relevant info. There maybe VFIO or other overlap involved.
RE: [PATCH v2] test: add cryptodev crosscheck suite
> -Original Message- > From: Power, Ciara > Sent: Wednesday 8 March 2023 17:31 > To: Volodymyr Fialko ; dev@dpdk.org; Akhil Goyal > ; Fan Zhang > Cc: jer...@marvell.com; ano...@marvell.com; hemant.agra...@nxp.com; Ji, > Kai > Subject: RE: [PATCH v2] test: add cryptodev crosscheck suite > > Hi Volodymyr, > > > > -Original Message- > > From: Volodymyr Fialko > > Sent: Thursday 9 February 2023 12:34 > > To: dev@dpdk.org; Akhil Goyal ; Fan Zhang > > > > Cc: jer...@marvell.com; ano...@marvell.com; hemant.agra...@nxp.com; > > Ji, Kai ; Power, Ciara ; > > Volodymyr Fialko > > Subject: [PATCH v2] test: add cryptodev crosscheck suite > > > > Add a validation test suite that helps in verifying that the output > > generated by two different cryptodevs match for a wide range of input > > parameter combinations. > > > > Crypto autotest performs a comprehensive testing of the cryptodev but > > since it performs verification by comparing against known vectors, the > > extend to which various parameters (like packet size) can be tested is > > limited. This test suite attempts to simulate various cases by running > > same test case on different cryptodevs and compares the output > > generated. The test suite relies on capabilities to determine the > > combinations of tests to be attempted. > > > > A typical use case would be to compare outputs generated from a > > standard driver such as openSSL PMD and a new cryptodev PMD. This test > > suite is to compliment the testing coverage that crypto autotest provides. > > > > Currently supported symmetric xforms(cipher, auth, aead) without > chaining. > > > > Example command: > > DPDK_TEST=cryptodev_crosscheck ./dpdk-test \ > > -a --vdev "crypto_openssl" > > > > Signed-off-by: Volodymyr Fialko > > --- > > V2: > > - Updated commit message. > > > > > + > > +static void > > +capabilities_inspect(void) > > +{ > > + struct rte_cryptodev_sym_capability_idx > > cap_indexes[CRYPTO_ALGOS_LEN], *cap_idx; > > + struct crypto_testsuite_params *ts_params = &testsuite_params; > > + const struct rte_cryptodev_symmetric_capability *sym_capa; > > + struct rte_cryptodev_symmetric_capability *common_capa; > > + uint32_t algo, i, dev_id, caps_idx; > > + > > + caps_idx = 0; > > + /* Create capability idx for known algorithms*/ > > + for (algo = 1; algo <= CRYPTO_AUTH_MAX_IDX; algo++) { > > + cap_idx = &cap_indexes[caps_idx++]; > > + cap_idx->type = RTE_CRYPTO_SYM_XFORM_AUTH; > > + cap_idx->algo.auth = algo; > > + } > > + for (algo = 1; algo <= CRYPTO_CIPHER_MAX_IDX; algo++) { > > + cap_idx = &cap_indexes[caps_idx++]; > > + cap_idx->type = RTE_CRYPTO_SYM_XFORM_CIPHER; > > + cap_idx->algo.cipher = algo; > > + } > > + for (algo = 1; algo <= CRYPTO_AEAD_MAX_IDX; algo++) { > > + cap_idx = &cap_indexes[caps_idx++]; > > + cap_idx->type = RTE_CRYPTO_SYM_XFORM_AEAD; > > + cap_idx->algo.aead = algo; > > + } > > + > > + for (caps_idx = 0; caps_idx < CRYPTO_ALGOS_LEN; caps_idx++) { > > + /* Gather common capabilities */ > > + common_capa = &common_symm_capas[caps_idx]; > > + common_capa->xform_type = > > RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED; > > + for (i = 0; i < ts_params->valid_dev_count; i++) { > > + dev_id = ts_params->valid_devs[i]; > > + sym_capa = > > rte_cryptodev_sym_capability_get(dev_id, > > + &cap_indexes[caps_idx]); > > + if (sym_capa == NULL) { > > + /* Capability not supported by one of devs, > > mark and skip */ > > + goto next_algo; > > + } > > + > > + if (common_capa->xform_type == > > RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED) { > > + /* First time initialization, copy data, go to > > next device */ > > + *common_capa = *sym_capa; > > + continue; > > + } > [CP] > > This function - from what I understand after review, is looping through all > algorithms in DPDK crypto, and then checking if each is supported by each > device? > Could we instead just take the capabilities list from one device as the > starting > point - That will be the max list of capabilities, they will only get knocked > out > if not supported on the 2nd/3rd device etc. > > > > > Some trials I did with this patch showed some issues. > > 1. For QAT + AESNI_MB I get all skipped tests. I would have expected some > common algs here. > + --- + > + Test Suite Summary : Crosscheck Unit Test Suite + > - > -- + + Algo AUTH 'null' : 0/0 passed, 0/0 skipped, > 0/0 > failed, 0/0 unsupported + Algo AUTH 'aes-cbc-mac' : 0/0 passed, 0/0 > skipped, 0/0 failed, 0/0 unsupported
Re: release candidate 23.03-rc1
Hi IBM - Power Systems DPDK v23.03-rc1-11 * Basic PF on Mellanox: No new issues or regressions were seen. * Performance: not tested. Systems tested: - IBM Power9 PowerNV 9006-22P OS: RHEL 8.3 GCC: version 8.3.1 20191121 (Red Hat 8.3.1-5) NICs: - Mellanox Technologies MT28800 Family [ConnectX-5 Ex] - firmware version: 16.29.1017 - MLNX_OFED_LINUX-5.2-1.0.4.1 (OFED-5.2-1.0.4) - PowerVM LPARs on IBM Power10 CHRP IBM,9105-22A OS: - RHEL 9.1 kernel 5.14.0-162.6.1.el9_1.ppc64le - RHEL 8.6 kernel 4.18.0-372.9.1.el8.ppc64le GCC: - gcc version 11.3.1 20220421 (Red Hat 11.3.1-2) - gcc version 8.5.0 20210514 (Red Hat 8.5.0-10) NICs: - Mellanox Technologies MT2894 Family [ConnectX-6 Lx] - firmware version: 26.36.1010 - MLNX_OFED_LINUX-5.9-0.5.6.1 Regards, Thinh Tran On 2/20/2023 10:48 AM, Thomas Monjalon wrote: A new DPDK release candidate is ready for testing: https://git.dpdk.org/dpdk/tag/?id=v23.03-rc1 There are 566 new patches in this snapshot. Release notes: https://doc.dpdk.org/guides/rel_notes/release_23_03.html Highlights of 23.03-rc1: - lock annotations - ARM power management monitor/wakeup - queue mapping of aggregated ports - flow quota - more flow matching (ICMPv6, IPv6 routing extension) - more flow actions (flex modify, congestion management) - SHAKE hash algorithm for crypto - LZ4 algorithm for compression - more telemetry endpoints - more tracepoints Please test and report issues on bugs.dpdk.org. DPDK 23.03-rc2 is expected in two weeks. Thank you everyone
Re: [PATCH v3 2/2] eal: add option to put timestamp on console output
On 2023/3/8 10:03, Stephen Hemminger wrote: > On Wed, 8 Mar 2023 08:36:48 +0800 > fengchengwen wrote: > >> On 2023/3/8 0:06, Stephen Hemminger wrote: >>> On Tue, 7 Mar 2023 17:35:32 +0800 >>> fengchengwen wrote: >>> The syslog will add timestamp, but the syslog backend will re-write timestamp, so in the last, you can't find the real-timestamp of this log print. sometimes it requires to get real log time. PS: we found it in our test environment because RR schedule hang too long (similar question also found: https://bugzilla.redhat.com/show_bug.cgi?id=1855447). So suggest add timestamp in syslog string also, and don't convert to monotonic and just print as normal format (just like syslog). >>> >>> >>> Are you using systemd? >> >> Yes > > There is redhat bug about this: > https://bugzilla.redhat.com/show_bug.cgi?id=991678 > >>> Never, never configure a DPDK application with real-time process priority. >>> Polling model and RT don't mix. >> >> Maybe we should document them ? > > Part of previous discussion here: > https://mails.dpdk.org/archives/dev/2021-April/203778.html > > In my experience running DPDK on isolated threads (cgroup or scheduler > isolation) > combined with remapping interrupts gives best response without lockup. > > I.e don't depend on scheduler to do the right thing. Instead ensure that > each thread runs on dedicated CPU. Got it, thanks. > > > > . >
Re: [PATCH 1/5] ethdev: fix race-condition of proactive error handling mode
On 2023/3/8 9:09, Honnappa Nagarahalli wrote: > > >>> > > Is there any reason not to design this in the same way as 'rte_eth_dev_reset'? Why does the PMD have to recover by itself? I suppose it is a question for the authors of original patch... >>> Appreciate if the authors could comment on this. >> >> The main cause is that the hardware implementation limit, I will try to >> explain >> from hns3 PMD's view. >> For a global reset, all the function need responsed within a centain period >> of >> time. otherwise, the reset will fail. and also the reset requirement a few >> steps (all >> may take a long time). >> >> When with multiple functions in one DPDK, and trigger a global reset, the >> rte_eth_dev_reset will not cover this scene: >> 1. each port's will report RTE_ETH_EVENT_INTR_RESET in interrupt thread. >> 2. then invoke application callback, but due to the same thread, and each >> port's recover will take a long time, so later port will reset failed. > If the design were to introduce RTE_ETH_EVENT_INTR_RECOVER and > rte_eth_dev_recover, what problems do you see? I see the 'RTE_ETH_EVENT_INTR_RECOVER and rte_eth_dev_recover' has no difference with RTE_ETH_EVENT_INTR_RESET mechanism. Could you detail more? > >> >>> > We could have a similar API 'rte_eth_dev_recover' to do the recovery functionality. I suppose such approach is also possible. Personally I am fine with both ways: either existing one or what you propose, as long as we'll fix existing race-condition. What is good with what you suggest - that way we probably don't need to worry how to allow user to enable/disable auto-recovery inside PMD. Konstantin >>>
RE: [PATCH v9 01/21] net/cpfl: support device initialization
> -Original Message- > From: Ferruh Yigit > Sent: Thursday, March 9, 2023 1:04 AM > To: Liu, Mingxia ; Xing, Beilei > ; > Zhang, Yuying ; Zhang, Qi Z > Cc: dev@dpdk.org; Stephen Hemminger ; > Richardson, Bruce ; Raslan Darawsheh > > Subject: Re: [PATCH v9 01/21] net/cpfl: support device initialization > > On 3/7/2023 3:03 PM, Ferruh Yigit wrote: > > On 3/7/2023 2:11 PM, Ferruh Yigit wrote: > >> On 3/2/2023 9:20 PM, Mingxia Liu wrote: > >>> Support device init and add the following dev ops: > >>> - dev_configure > >>> - dev_close > >>> - dev_infos_get > >>> - link_update > >>> - dev_supported_ptypes_get > >>> > >>> Signed-off-by: Mingxia Liu > >> > >> <...> > >> > >>> +static void > >>> +cpfl_handle_virtchnl_msg(struct cpfl_adapter_ext *adapter) { > >>> + struct idpf_adapter *base = &adapter->base; > >>> + struct idpf_dma_mem *dma_mem = NULL; > >>> + struct idpf_hw *hw = &base->hw; > >>> + struct virtchnl2_event *vc_event; > >>> + struct idpf_ctlq_msg ctlq_msg; > >>> + enum idpf_mbx_opc mbx_op; > >>> + struct idpf_vport *vport; > >>> + enum virtchnl_ops vc_op; > >>> + uint16_t pending = 1; > >>> + int ret; > >>> + > >>> + while (pending) { > >>> + ret = idpf_vc_ctlq_recv(hw->arq, &pending, &ctlq_msg); > >>> + if (ret) { > >>> + PMD_DRV_LOG(INFO, "Failed to read msg from virtual > channel, ret: %d", ret); > >>> + return; > >>> + } > >>> + > >>> + memcpy(base->mbx_resp, ctlq_msg.ctx.indirect.payload->va, > >>> +IDPF_DFLT_MBX_BUF_SIZE); > >>> + > >>> + mbx_op = rte_le_to_cpu_16(ctlq_msg.opcode); > >>> + vc_op = rte_le_to_cpu_32(ctlq_msg.cookie.mbx.chnl_opcode); > >>> + base->cmd_retval = > >>> +rte_le_to_cpu_32(ctlq_msg.cookie.mbx.chnl_retval); > >>> + > >>> + switch (mbx_op) { > >>> + case idpf_mbq_opc_send_msg_to_peer_pf: > >>> + if (vc_op == VIRTCHNL2_OP_EVENT) { > >> > >> > >> Raslan reported following build error [1], 'VIRTCHNL2_OP_EVENT' is > >> not an element of "enum virtchnl_ops", can you please check? > >> > >> > >> I guess there are a few options, have a new enum for virtchnl2, like > >> "enum virtchnl2_ops" which inlucde all 'VIRTCHNL2_OP_', > >> > >> OR > >> > >> use 'uint32_t' type (instead of "enum virtchnl_ops") when > >> 'VIRTCHNL2_OP_' opcodes can be used, this seems simpler. > >> > >> > >> BTW, this is same in the idfp driver. > >> > >> > >> [1] > >> drivers/libtmp_rte_net_cpfl.a.p/net_cpfl_cpfl_ethdev.c.o -c > >> ../../root/dpdk/drivers/net/cpfl/cpfl_ethdev.c > >> ../../root/dpdk/drivers/net/cpfl/cpfl_ethdev.c:1118:14: error: > >> comparison of constant 522 with expression of type 'enum virtchnl_ops' > >> is always false [-Werror,-Wtautological-constant-out-of-range-compare] > >> if (vc_op == VIRTCHNL2_OP_EVENT) { > >> ~ ^ ~~ > >> 1 error generated. > >> > > > > Thinking twice, I am not sure if this a compiler issue or coding > > issue, many compilers doesn't complain about above issue. > > > > As far as I understand C allows assigning unlisted values to enums, > > because underneath it just uses an integer type. > > > > Only caveat I can see is, the integer type used is not fixed, > > technically compiler can select the type that fits all enum values, so > > for above enum compiler can select an char type to store the values, > > but fixed value is 522 out of the char limit may cause an issue. But > > in practice I am not sure if compilers are selecting char as > > underlying type, or if they all just use 'int'. > > > > Hi Mingxia, Beilei, Yuying, Qi, > > Reminder of this issue. > > Build error is observed by clang 3.4.x [1], can you please work on a fix? > > > [1] https://godbolt.org/z/zrKz7371b > > Thanks, > Ferruh [Liu, Mingxia] Sorry for late reply, I just came back from sl. I'll check the issue as soon as possible. Thanks!
RE: [PATCH v9 01/21] net/cpfl: support device initialization
> -Original Message- > From: Ferruh Yigit > Sent: Tuesday, March 7, 2023 11:03 PM > To: Liu, Mingxia ; Xing, Beilei > ; > Zhang, Yuying ; Raslan Darawsheh > > Cc: dev@dpdk.org; Stephen Hemminger ; > Richardson, Bruce ; Zhang, Qi Z > > Subject: Re: [PATCH v9 01/21] net/cpfl: support device initialization > > On 3/7/2023 2:11 PM, Ferruh Yigit wrote: > > On 3/2/2023 9:20 PM, Mingxia Liu wrote: > >> Support device init and add the following dev ops: > >> - dev_configure > >> - dev_close > >> - dev_infos_get > >> - link_update > >> - dev_supported_ptypes_get > >> > >> Signed-off-by: Mingxia Liu > > > > <...> > > > >> +static void > >> +cpfl_handle_virtchnl_msg(struct cpfl_adapter_ext *adapter) { > >> + struct idpf_adapter *base = &adapter->base; > >> + struct idpf_dma_mem *dma_mem = NULL; > >> + struct idpf_hw *hw = &base->hw; > >> + struct virtchnl2_event *vc_event; > >> + struct idpf_ctlq_msg ctlq_msg; > >> + enum idpf_mbx_opc mbx_op; > >> + struct idpf_vport *vport; > >> + enum virtchnl_ops vc_op; > >> + uint16_t pending = 1; > >> + int ret; > >> + > >> + while (pending) { > >> + ret = idpf_vc_ctlq_recv(hw->arq, &pending, &ctlq_msg); > >> + if (ret) { > >> + PMD_DRV_LOG(INFO, "Failed to read msg from virtual > channel, ret: %d", ret); > >> + return; > >> + } > >> + > >> + memcpy(base->mbx_resp, ctlq_msg.ctx.indirect.payload->va, > >> + IDPF_DFLT_MBX_BUF_SIZE); > >> + > >> + mbx_op = rte_le_to_cpu_16(ctlq_msg.opcode); > >> + vc_op = rte_le_to_cpu_32(ctlq_msg.cookie.mbx.chnl_opcode); > >> + base->cmd_retval = > >> +rte_le_to_cpu_32(ctlq_msg.cookie.mbx.chnl_retval); > >> + > >> + switch (mbx_op) { > >> + case idpf_mbq_opc_send_msg_to_peer_pf: > >> + if (vc_op == VIRTCHNL2_OP_EVENT) { > > > > > > Raslan reported following build error [1], 'VIRTCHNL2_OP_EVENT' is not > > an element of "enum virtchnl_ops", can you please check? > > > > > > I guess there are a few options, have a new enum for virtchnl2, like > > "enum virtchnl2_ops" which inlucde all 'VIRTCHNL2_OP_', > > > > OR > > > > use 'uint32_t' type (instead of "enum virtchnl_ops") when > > 'VIRTCHNL2_OP_' opcodes can be used, this seems simpler. > > > > > > BTW, this is same in the idfp driver. > > > > > > [1] > > drivers/libtmp_rte_net_cpfl.a.p/net_cpfl_cpfl_ethdev.c.o -c > > ../../root/dpdk/drivers/net/cpfl/cpfl_ethdev.c > > ../../root/dpdk/drivers/net/cpfl/cpfl_ethdev.c:1118:14: error: > > comparison of constant 522 with expression of type 'enum virtchnl_ops' > > is always false [-Werror,-Wtautological-constant-out-of-range-compare] > > if (vc_op == VIRTCHNL2_OP_EVENT) { > > ~ ^ ~~ > > 1 error generated. > > > > Thinking twice, I am not sure if this a compiler issue or coding issue, many > compilers doesn't complain about above issue. > > As far as I understand C allows assigning unlisted values to enums, because > underneath it just uses an integer type. > > Only caveat I can see is, the integer type used is not fixed, technically > compiler > can select the type that fits all enum values, so for above enum compiler can > select an char type to store the values, but fixed value is 522 out of the > char > limit may cause an issue. But in practice I am not sure if compilers are > selecting > char as underlying type, or if they all just use 'int'. [Liu, Mingxia] By checking the code, we shouldn't compare an enum virtchnl_ops variable with VIRTCHNL2_OP_EVENT, as VIRTCHNL2_OP_EVENT is not included in enum virtchnl_ops. And the cpfl/idpf pmd use virtual msg opcodes prefixed with virtchnl2 or VIRTCHNL2. I'll send a fixed patch to fix this issue.
Re: [PATCH v2 1/2] build: clarify configuration without IOVA field in mbuf
On 2023/3/7 0:13, Thomas Monjalon wrote: > The impact of the option "enable_iova_as_pa" is explained for users. > > Also the code flag "RTE_IOVA_AS_PA" is renamed as "RTE_IOVA_IN_MBUF" > in order to be more accurate (IOVA mode is decided at runtime), > and more readable in the code. > > Similarly the drivers are using the variable "require_iova_in_mbuf" > instead of "pmd_supports_disable_iova_as_pa" with an opposite meaning. > By default, it is assumed that drivers require the IOVA field in mbuf. > The drivers which support removing this field have to declare themselves. > > If the option "enable_iova_as_pa" is disabled, the unsupported drivers > will be listed with the new reason text "requires IOVA in mbuf". > > Suggested-by: Bruce Richardson > Signed-off-by: Thomas Monjalon > --- ... > compile_time_cpuflags = [] > subdir(arch_subdir) > diff --git a/doc/guides/rel_notes/release_22_11.rst > b/doc/guides/rel_notes/release_22_11.rst > index 91414573bd..c67c2823a2 100644 > --- a/doc/guides/rel_notes/release_22_11.rst > +++ b/doc/guides/rel_notes/release_22_11.rst > @@ -504,7 +504,7 @@ ABI Changes >``rte-worker-`` so that DPDK can accommodate lcores higher than > 99. > > * mbuf: Replaced ``buf_iova`` field with ``next`` field and added a new field > - ``dynfield2`` at its place in second cacheline if ``RTE_IOVA_AS_PA`` is 0. > + ``dynfield2`` at its place in second cacheline if ``RTE_IOVA_IN_MBUF`` is > 0. Should add to release 23.03 rst. The original 22.11 still have RTE_IOVA_AS_PA definition. ... > diff --git a/drivers/net/hns3/meson.build b/drivers/net/hns3/meson.build > index e1a5afa2ec..743fae9db7 100644 > --- a/drivers/net/hns3/meson.build > +++ b/drivers/net/hns3/meson.build > @@ -13,9 +13,7 @@ if arch_subdir != 'x86' and arch_subdir != 'arm' or not > dpdk_conf.get('RTE_ARCH_ > subdir_done() > endif > > -if dpdk_conf.get('RTE_IOVA_AS_PA') == 0 > -build = false > -reason = 'driver does not support disabling IOVA as PA mode' > +if not get_option('enable_iova_as_pa') > subdir_done() > endif Suggest keep original, and replace RTE_IOVA_AS_PA with RTE_IOVA_IN_MBUF: if dpdk_conf.get('RTE_IOVA_IN_MBUF') == 0 subdir_done() endif Meson build 0.63.0 already support deprecated a option by a new option. When update to the new meson verion, the drivers' meson.build will not be modified. > > diff --git a/drivers/net/ice/ice_rxtx_common_avx.h > b/drivers/net/ice/ice_rxtx_common_avx.h > index e69e23997f..dacb87dcb0 100644 ...
RE: [PATCH v5] enhance NUMA affinity heuristic
> -Original Message- > From: Thomas Monjalon > Sent: 2023年3月3日 22:07 > To: Burakov, Anatoly ; You, KaisenX > > Cc: dev@dpdk.org; Zhou, YidingX ; > david.march...@redhat.com; Matz, Olivier ; > ferruh.yi...@amd.com; zhou...@loongson.cn; sta...@dpdk.org; > Richardson, Bruce ; jer...@marvell.com > Subject: Re: [PATCH v5] enhance NUMA affinity heuristic > > I'm not comfortable with this patch. > > First, there is no comment in the code which helps to understand the logic. > Second, I'm afraid changing the value of the per-core variable _socket_id > may have an impact on some applications. > Thank you for your reply. First, about comments, I can submit a new patch to add comments to help understand. Second, if you do not change the value of the per-core variable_ socket_ id, /lib/eal/common/malloc_heap.c malloc_get_numa_socket(void) { const struct internal_config *conf = eal_get_internal_configuration(); unsigned int socket_id = rte_socket_id(); // The return value of "rte_socket_id()" is 1 unsigned int idx; if (socket_id != (unsigned int)SOCKET_ID_ANY) return socket_id;//so return here This will cause return here, This function returns the socket_id of unallocated memory. If you have a better solution, I can modify it. > 16/02/2023 03:50, You, KaisenX: > > From: Burakov, Anatoly > > > On 2/1/2023 12:20 PM, Kaisen You wrote: > > > > Trying to allocate memory on the first detected numa node has less > > > > chance to find some memory actually available rather than on the > > > > main lcore numa node (especially when the DPDK application is > > > > started only on one numa node). > > > > > > > > Fixes: 705356f0811f ("eal: simplify control thread creation") > > > > Fixes: bb0bd346d5c1 ("eal: suggest using --lcores option") > > > > Cc: sta...@dpdk.org > > > > > > > > Signed-off-by: David Marchand > > > > Signed-off-by: Kaisen You > > > > --- > > > > Changes since v4: > > > > - mod the patch title, > > > > > > > > Changes since v3: > > > > - add the assignment of socket_id in thread initialization, > > > > > > > > Changes since v2: > > > > - add uncommitted local change and fix compilation, > > > > > > > > Changes since v1: > > > > - accomodate for configurations with main lcore running on multiples > > > >physical cores belonging to different numa, > > > > --- > > > > lib/eal/common/eal_common_thread.c | 1 + > > > > lib/eal/common/malloc_heap.c | 4 > > > > 2 files changed, 5 insertions(+) > > > > > > > > diff --git a/lib/eal/common/eal_common_thread.c > > > > b/lib/eal/common/eal_common_thread.c > > > > index 38d83a6885..21bff971f8 100644 > > > > --- a/lib/eal/common/eal_common_thread.c > > > > +++ b/lib/eal/common/eal_common_thread.c > > > > @@ -251,6 +251,7 @@ static void *ctrl_thread_init(void *arg) > > > > void *routine_arg = params->arg; > > > > > > > > __rte_thread_init(rte_lcore_id(), cpuset); > > > > + RTE_PER_LCORE(_socket_id) = SOCKET_ID_ANY; > > > > params->ret = rte_thread_set_affinity_by_id(rte_thread_self(), > > > cpuset); > > > > if (params->ret != 0) { > > > > __atomic_store_n(¶ms->ctrl_thread_status, > > > > diff --git a/lib/eal/common/malloc_heap.c > > > > b/lib/eal/common/malloc_heap.c index d7c410b786..3ee19aee15 > 100644 > > > > --- a/lib/eal/common/malloc_heap.c > > > > +++ b/lib/eal/common/malloc_heap.c > > > > @@ -717,6 +717,10 @@ malloc_get_numa_socket(void) > > > > return socket_id; > > > > } > > > > > > > > + socket_id = rte_lcore_to_socket_id(rte_get_main_lcore()); > > > > + if (socket_id != (unsigned int)SOCKET_ID_ANY) > > > > + return socket_id; > > > > + > > > > return rte_socket_id_by_idx(0); > > > > } > > > > > > > > > > I may be lacking context, but I don't quite get the suggested change. > > > From what I understand, the original has to do with assigning lcore > > > cpusets in such a way that an lcore ends up having two socket ID's > > > (because it's been assigned to CPU's on different sockets). Why is this > allowed in the first place? > > > It seems like a user error to me, as it breaks many of the > > > fundamental assumptions DPDK makes. > > > > > In a dual socket system, if all used cores are in socket 1 and the NIC > > is in socket 1, no memory is allocated for socket 0. This is to optimize > memory consumption. > > > > I agree with you. If the startup parameters can ensure that both > > sockets allocate memory, there will be no problem. > > However, due to the different CPU topologies of different systems, It > > is difficult for users to ensure that the startup parameter contains two cpu > nodes. > > > > > I'm fine with using main lcore socket for control threads, I just > > > don't think the `socket_id != SOCKET_ID_ANY` thing should be checked > > > here, because it apparently tries to compensate for a problem with > > > cpuset of the main thread, whic
Re: [PATCH 1/5] ethdev: fix race-condition of proactive error handling mode
On Tue, Mar 7, 2023 at 4:40 AM Konstantin Ananyev wrote: > > > > > >>> In the proactive error handling mode, the PMD will set the data > > >>> path > > >>> pointers to dummy functions and then try recovery, in this > > >>> period the > > >>> application may still invoking data path API. This will > > >>> introduce a > > >>> race-condition with data path which may lead to crash [1]. > > >>> > > >>> Although the PMD added delay after setting data path pointers > > >>> to cover > > >>> the above race-condition, it reduces the probability, but it > > >>> doesn't > > >>> solve the problem. > > >>> > > >>> To solve the race-condition problem fundamentally, the following > > >>> requirements are added: > > >>> 1. The PMD should set the data path pointers to dummy functions > > >>> after > > >>> report RTE_ETH_EVENT_ERR_RECOVERING event. > > >>> 2. The application should stop data path API invocation when > > >>> process > > >>> the RTE_ETH_EVENT_ERR_RECOVERING event. > > >>> 3. The PMD should set the data path pointers to valid functions > > >>> before > > >>> report RTE_ETH_EVENT_RECOVERY_SUCCESS event. > > >>> 4. The application should enable data path API invocation when > > >>> process > > >>> the RTE_ETH_EVENT_RECOVERY_SUCCESS event. > > >>> > > > > > > How this is solving the race-condition, by pushing responsibility > > > to > > > stop data path to application? > > > > Exactly, it becomes application responsibility to make sure > > data-path is > > stopped/suspended before recovery will continue. > > > > >>> > > >>> From documentation of the feature: > > >>> > > >>> `` > > >>> Because the PMD recovers automatically, > > >>> the application can only sense that the data flow is disconnected > > >>> for a > > >>> while and the control API returns an error in this period. > > >>> > > >>> In order to sense the error happening/recovering, as well as to > > >>> restore > > >>> some additional configuration, three events are available: > > >>> `` > > >>> > > >>> It looks like initial design is to use events mainly inform > > >>> application > > >>> about what happened and mainly for re-configuration. > > >>> > > >>> Although I am don't disagree to involve the application, I am not > > >>> sure > > >>> that is part of current design. > > >> > > >> I thought we all agreed that initial design contain some fallacies > > >> that > > >> need to fixed, no? > > >> Statement that with current rte_ethdev design error recovery can be > > >> done > > >> without interaction with the app (to stop/suspend data/control path) > > >> is the main one I think. > > >> It needs some interaction with app layer, one way or another. > > >> > > > > > > What if application is not interested in recovery modes at all > > > and not > > > registered any callback for the recovery? > > > > > > Are you saying there is no way for application to disable > > automatic recovery in PMD if it is not interested > > (or can't full-fill per-requesties for it)? > > If so, then yes it is a problem and we need to fix it. > > I assumed that such mechanism to disable unwanted events already > > exists, > > but I can't find anything. > > Wonder what would be the easiest way here - can PMD make a decision > > based on callback return value, or do we need a new API to > > enable/disable callbacks, or ...? > > > > > > >>> > > >>> As far as I can see automatic recovery is not configurable by app. > > >>> > > >>> But that is not all, PMD sends events to application but PMD can't > > >>> know > > >>> if application is handling them or not, so with current design PMD > > >>> can't > > >>> rely on to app. > > >> > > >> Well, PMD invokes user provided callback. > > >> One way to fix that problem - if there is no callback provided, > > >> or callback returns an error code - PMD can assume that recovery > > >> should not be done. > > >> That is probably not the best design choice, but at least it will > > >> allow > > >> to fix the problem without too many changes and introducing new API. > > >> That could be sort of a 'quick fix'. > > >> In a meanwhile we can think about new/better approach for that. > > >> > > > > > > -rc2 for 23.03 is a few days away. > > > > > > What do you think to have 'quick fix' as modifying how driver updates > > > burst ops to prevent
[PATCH 2/2] vhost: refactor to follow new naming convention
Simply replace '_slave_' with '_backend_'. Signed-off-by: Nobuhiro MIKI --- drivers/vdpa/ifc/ifcvf_vdpa.c | 2 +- lib/vhost/rte_vhost.h | 2 +- lib/vhost/version.map | 2 +- lib/vhost/vhost.c | 4 +-- lib/vhost/vhost.h | 4 +-- lib/vhost/vhost_user.c| 62 +-- lib/vhost/vhost_user.h| 6 ++-- 7 files changed, 41 insertions(+), 41 deletions(-) diff --git a/drivers/vdpa/ifc/ifcvf_vdpa.c b/drivers/vdpa/ifc/ifcvf_vdpa.c index fe4d278c5380..e4133568c1aa 100644 --- a/drivers/vdpa/ifc/ifcvf_vdpa.c +++ b/drivers/vdpa/ifc/ifcvf_vdpa.c @@ -605,7 +605,7 @@ virtio_interrupt_handler(struct ifcvf_internal *internal) int vid = internal->vid; int ret; - ret = rte_vhost_slave_config_change(vid, 1); + ret = rte_vhost_backend_config_change(vid, 1); if (ret) DRV_LOG(ERR, "failed to notify the guest about configuration space change."); } diff --git a/lib/vhost/rte_vhost.h b/lib/vhost/rte_vhost.h index ed255fc6c51f..cc2ec99c315c 100644 --- a/lib/vhost/rte_vhost.h +++ b/lib/vhost/rte_vhost.h @@ -1066,7 +1066,7 @@ rte_vhost_get_vdpa_device(int vid); */ __rte_experimental int -rte_vhost_slave_config_change(int vid, bool need_reply); +rte_vhost_backend_config_change(int vid, bool need_reply); /** * Retrieve names of statistics of a Vhost virtqueue. diff --git a/lib/vhost/version.map b/lib/vhost/version.map index d779a5cf3a0a..d322a4a888cd 100644 --- a/lib/vhost/version.map +++ b/lib/vhost/version.map @@ -68,7 +68,7 @@ EXPERIMENTAL { global: rte_vhost_crypto_driver_start; - rte_vhost_slave_config_change; + rte_vhost_backend_config_change; rte_vhost_async_channel_register; rte_vhost_async_channel_unregister; rte_vhost_submit_enqueue_burst; diff --git a/lib/vhost/vhost.c b/lib/vhost/vhost.c index 358672bb176b..ef3794381751 100644 --- a/lib/vhost/vhost.c +++ b/lib/vhost/vhost.c @@ -701,9 +701,9 @@ vhost_new_device(void) dev->vid = i; dev->flags = VIRTIO_DEV_BUILTIN_VIRTIO_NET; - dev->slave_req_fd = -1; + dev->backend_req_fd = -1; dev->postcopy_ufd = -1; - rte_spinlock_init(&dev->slave_req_lock); + rte_spinlock_init(&dev->backend_req_lock); return i; } diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h index a31d810531d7..8fdab13c7097 100644 --- a/lib/vhost/vhost.h +++ b/lib/vhost/vhost.h @@ -499,8 +499,8 @@ struct virtio_net { uint32_tmax_guest_pages; struct guest_page *guest_pages; - int slave_req_fd; - rte_spinlock_t slave_req_lock; + int backend_req_fd; + rte_spinlock_t backend_req_lock; int postcopy_ufd; int postcopy_listening; diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c index cd4104ea5ce0..1ce1a5924f74 100644 --- a/lib/vhost/vhost_user.c +++ b/lib/vhost/vhost_user.c @@ -226,9 +226,9 @@ vhost_backend_cleanup(struct virtio_net *dev) dev->inflight_info = NULL; } - if (dev->slave_req_fd >= 0) { - close(dev->slave_req_fd); - dev->slave_req_fd = -1; + if (dev->backend_req_fd >= 0) { + close(dev->backend_req_fd); + dev->backend_req_fd = -1; } if (dev->postcopy_ufd >= 0) { @@ -2257,11 +2257,11 @@ vhost_user_set_protocol_features(struct virtio_net **pdev, { struct virtio_net *dev = *pdev; uint64_t protocol_features = ctx->msg.payload.u64; - uint64_t slave_protocol_features = 0; + uint64_t backend_protocol_features = 0; rte_vhost_driver_get_protocol_features(dev->ifname, - &slave_protocol_features); - if (protocol_features & ~slave_protocol_features) { + &backend_protocol_features); + if (protocol_features & ~backend_protocol_features) { VHOST_LOG_CONFIG(dev->ifname, ERR, "received invalid protocol features.\n"); return RTE_VHOST_MSG_RESULT_ERR; } @@ -2458,14 +2458,14 @@ vhost_user_set_req_fd(struct virtio_net **pdev, if (fd < 0) { VHOST_LOG_CONFIG(dev->ifname, ERR, - "invalid file descriptor for slave channel (%d)\n", fd); + "invalid file descriptor for backend channel (%d)\n", fd); return RTE_VHOST_MSG_RESULT_ERR; } - if (dev->slave_req_fd >= 0) - close(dev->slave_req_fd); + if (dev->backend_req_fd >= 0) + close(dev->backend_req_fd); - dev->slave_req_fd = fd; + dev->backend_req_fd = fd; return RTE_VHOST_MSG_RESULT_OK; } @@ -2931,46 +2931,46 @@ send_vhost_reply(struct virtio_net *dev, int sockfd, struct vhu_msg_context *ctx } static int -send_vhost_slave_message(struc
[PATCH 0/2] vhost: fix names to follow new naming convention
In this series, we simply replace '_slave_' with '_backend_' so that vhost user protocol message, protocol names follow the new naming conventions. Nobuhiro Miki (2): vhost: fix constants to follow new naming convension vhost: refactor to follow new naming convention drivers/vdpa/ifc/ifcvf_vdpa.c | 6 +-- drivers/vdpa/mlx5/mlx5_vdpa.c | 4 +- drivers/vdpa/sfc/sfc_vdpa_ops.c | 4 +- lib/vhost/rte_vhost.h | 10 ++--- lib/vhost/version.map | 2 +- lib/vhost/vhost.c | 4 +- lib/vhost/vhost.h | 4 +- lib/vhost/vhost_user.c | 70 - lib/vhost/vhost_user.h | 20 +- 9 files changed, 62 insertions(+), 62 deletions(-) -- 2.31.1
[PATCH 1/2] vhost: fix constants to follow new naming convension
DPDK apps (e.g. dpdk-skeleton) output this name during negotiation. But, it is not consistent when debugging using QEMU as a front-end, for example. This is because QEMU already follows new naming convention [1]. Some type names and variable names, such as VhostUserSlaveRequest, are still in old naming convension. But, in this patch we only focus on constants. [1] https://qemu-project.gitlab.io/qemu/interop/vhost-user.html Signed-off-by: Nobuhiro MIKI --- drivers/vdpa/ifc/ifcvf_vdpa.c | 4 ++-- drivers/vdpa/mlx5/mlx5_vdpa.c | 4 ++-- drivers/vdpa/sfc/sfc_vdpa_ops.c | 4 ++-- lib/vhost/rte_vhost.h | 8 lib/vhost/vhost_user.c | 14 +++--- lib/vhost/vhost_user.h | 14 +++--- 6 files changed, 24 insertions(+), 24 deletions(-) diff --git a/drivers/vdpa/ifc/ifcvf_vdpa.c b/drivers/vdpa/ifc/ifcvf_vdpa.c index 4a87673682fc..fe4d278c5380 100644 --- a/drivers/vdpa/ifc/ifcvf_vdpa.c +++ b/drivers/vdpa/ifc/ifcvf_vdpa.c @@ -1315,8 +1315,8 @@ ifcvf_get_vdpa_features(struct rte_vdpa_device *vdev, uint64_t *features) #define VDPA_SUPPORTED_PROTOCOL_FEATURES \ (1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK | \ -1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ | \ -1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD | \ +1ULL << VHOST_USER_PROTOCOL_F_BACKEND_REQ | \ +1ULL << VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD | \ 1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER | \ 1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD | \ 1ULL << VHOST_USER_PROTOCOL_F_MQ | \ diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c index f8dfa9513311..f1737f82a8ae 100644 --- a/drivers/vdpa/mlx5/mlx5_vdpa.c +++ b/drivers/vdpa/mlx5/mlx5_vdpa.c @@ -37,8 +37,8 @@ (1ULL << VIRTIO_NET_F_MTU)) #define MLX5_VDPA_PROTOCOL_FEATURES \ - ((1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ) | \ -(1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD) | \ + ((1ULL << VHOST_USER_PROTOCOL_F_BACKEND_REQ) | \ +(1ULL << VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD) | \ (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) | \ (1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) | \ (1ULL << VHOST_USER_PROTOCOL_F_MQ) | \ diff --git a/drivers/vdpa/sfc/sfc_vdpa_ops.c b/drivers/vdpa/sfc/sfc_vdpa_ops.c index 6401d4e16f25..e88c7eeaa609 100644 --- a/drivers/vdpa/sfc/sfc_vdpa_ops.c +++ b/drivers/vdpa/sfc/sfc_vdpa_ops.c @@ -21,8 +21,8 @@ /* These protocol features are needed to enable notifier ctrl */ #define SFC_VDPA_PROTOCOL_FEATURES \ ((1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK) | \ -(1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ) | \ -(1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD) | \ +(1ULL << VHOST_USER_PROTOCOL_F_BACKEND_REQ) | \ +(1ULL << VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD) | \ (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) | \ (1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) | \ (1ULL << VHOST_USER_PROTOCOL_F_MQ)) diff --git a/lib/vhost/rte_vhost.h b/lib/vhost/rte_vhost.h index a395843fe96d..ed255fc6c51f 100644 --- a/lib/vhost/rte_vhost.h +++ b/lib/vhost/rte_vhost.h @@ -80,8 +80,8 @@ extern "C" { #define VHOST_USER_PROTOCOL_F_NET_MTU 4 #endif -#ifndef VHOST_USER_PROTOCOL_F_SLAVE_REQ -#define VHOST_USER_PROTOCOL_F_SLAVE_REQ5 +#ifndef VHOST_USER_PROTOCOL_F_BACKEND_REQ +#define VHOST_USER_PROTOCOL_F_BACKEND_REQ 5 #endif #ifndef VHOST_USER_PROTOCOL_F_CRYPTO_SESSION @@ -96,8 +96,8 @@ extern "C" { #define VHOST_USER_PROTOCOL_F_CONFIG 9 #endif -#ifndef VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD -#define VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD 10 +#ifndef VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD +#define VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD 10 #endif #ifndef VHOST_USER_PROTOCOL_F_HOST_NOTIFIER diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c index 9e361082dc9b..cd4104ea5ce0 100644 --- a/lib/vhost/vhost_user.c +++ b/lib/vhost/vhost_user.c @@ -2846,7 +2846,7 @@ VHOST_MESSAGE_HANDLER(VHOST_USER_GET_QUEUE_NUM, vhost_user_get_queue_num, false) VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_ENABLE, vhost_user_set_vring_enable, false) \ VHOST_MESSAGE_HANDLER(VHOST_USER_SEND_RARP, vhost_user_send_rarp, false) \ VHOST_MESSAGE_HANDLER(VHOST_USER_NET_SET_MTU, vhost_user_net_set_mtu, false) \ -VHOST_MESSAGE_HANDLER(VHOST_USER_SET_SLAVE_REQ_FD, vhost_user_set_req_fd, true) \ +VHOST_MESSAGE_HANDLER(VHOST_USER_SET_BACKEND_REQ_FD, vhost_user_set_req_fd, true) \ VHOST_MESSAGE_HANDLER(VHOST_USER_IOTLB_MSG, vhost_user_iotlb_msg, false) \ VHOST_MESSAGE_HANDLER(VHOST_USER_GET_CONFIG, vhost_user_get_config, false) \ VHOST_MESSAGE_HANDLER(VHOST_USER_SET_CONFIG, vhost_user_set_config, fal
[PATCH] net/cpfl: fix a compiler issue about virtchnl opcode
Comparing an enum virtchnl_ops variable with VIRTCHNL2_OP_EVENT will cause a compiler issue, as VIRTCHNL2_OP_EVENT is not included in enum virtchnl_ops. And the PMD uses virtual msg opcodes prefixed with virtchnl2 or VIRTCHNL2. Fixes: 20618563b81b ("net/cpfl: support device initialization") Signed-off-by: Mingxia Liu --- drivers/net/cpfl/cpfl_ethdev.c | 2 +- drivers/net/idpf/idpf_ethdev.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c index 0940bf1276..54261d5743 100644 --- a/drivers/net/cpfl/cpfl_ethdev.c +++ b/drivers/net/cpfl/cpfl_ethdev.c @@ -1095,8 +1095,8 @@ cpfl_handle_virtchnl_msg(struct cpfl_adapter_ext *adapter) struct idpf_ctlq_msg ctlq_msg; enum idpf_mbx_opc mbx_op; struct idpf_vport *vport; - enum virtchnl_ops vc_op; uint16_t pending = 1; + uint32_t vc_op; int ret; while (pending) { diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c index db58157ba3..59883af8a8 100644 --- a/drivers/net/idpf/idpf_ethdev.c +++ b/drivers/net/idpf/idpf_ethdev.c @@ -1058,8 +1058,8 @@ idpf_handle_virtchnl_msg(struct idpf_adapter_ext *adapter_ex) struct idpf_ctlq_msg ctlq_msg; enum idpf_mbx_opc mbx_op; struct idpf_vport *vport; - enum virtchnl_ops vc_op; uint16_t pending = 1; + enum virtchnl_ops vc_op; int ret; while (pending) { -- 2.34.1
[PATCH v2] net/cpfl: fix a compiler issue about virtchnl opcode
Comparing an enum virtchnl_ops variable with VIRTCHNL2_OP_EVENT will cause a compiler issue, as VIRTCHNL2_OP_EVENT is not included in enum virtchnl_ops. And the PMD uses virtual msg opcodes prefixed with virtchnl2 or VIRTCHNL2. Fixes: 20618563b81b ("net/cpfl: support device initialization") Signed-off-by: Mingxia Liu --- drivers/net/cpfl/cpfl_ethdev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c index 0940bf1276..54261d5743 100644 --- a/drivers/net/cpfl/cpfl_ethdev.c +++ b/drivers/net/cpfl/cpfl_ethdev.c @@ -1095,8 +1095,8 @@ cpfl_handle_virtchnl_msg(struct cpfl_adapter_ext *adapter) struct idpf_ctlq_msg ctlq_msg; enum idpf_mbx_opc mbx_op; struct idpf_vport *vport; - enum virtchnl_ops vc_op; uint16_t pending = 1; + uint32_t vc_op; int ret; while (pending) { -- 2.34.1
[PATCH v3] net/mlx5: use just sufficient barrier for ARM platforms
cqe->op_own indicates if the CQE is owned by the NIC. The rest of the fields in CQE should be read only after op_own is read. On Arm platforms using "dmb ishld" is sufficient to enforce this. Fixes: 88c0733535d6 ("net/mlx5: extend Rx completion with error handling") Cc: ma...@mellanox.com Cc: sta...@dpdk.org Signed-off-by: Honnappa Nagarahalli Reviewed-by: Ruifeng Wang --- drivers/common/mlx5/mlx5_common.h | 6 +- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/common/mlx5/mlx5_common.h b/drivers/common/mlx5/mlx5_common.h index f8d07d6c6b..f4ddaf9f11 100644 --- a/drivers/common/mlx5/mlx5_common.h +++ b/drivers/common/mlx5/mlx5_common.h @@ -203,7 +203,11 @@ check_cqe(volatile struct mlx5_cqe *cqe, const uint16_t cqes_n, if (unlikely((op_owner != (!!(idx))) || (op_code == MLX5_CQE_INVALID))) return MLX5_CQE_STATUS_HW_OWN; - rte_io_rmb(); + /* Prevent speculative reading of other fields in CQE until +* CQE is valid. +*/ + rte_atomic_thread_fence(__ATOMIC_ACQUIRE); + if (unlikely(op_code == MLX5_CQE_RESP_ERR || op_code == MLX5_CQE_REQ_ERR)) return MLX5_CQE_STATUS_ERR; -- 2.25.1
Re: [PATCH 0/2] vhost: fix names to follow new naming convention
On Thu, 9 Mar 2023 11:07:19 +0900 Nobuhiro MIKI wrote: > In this series, we simply replace '_slave_' with '_backend_' so that > vhost user protocol message, protocol names follow the new naming > conventions. > > Nobuhiro Miki (2): > vhost: fix constants to follow new naming convension > vhost: refactor to follow new naming convention > > drivers/vdpa/ifc/ifcvf_vdpa.c | 6 +-- > drivers/vdpa/mlx5/mlx5_vdpa.c | 4 +- > drivers/vdpa/sfc/sfc_vdpa_ops.c | 4 +- > lib/vhost/rte_vhost.h | 10 ++--- > lib/vhost/version.map | 2 +- > lib/vhost/vhost.c | 4 +- > lib/vhost/vhost.h | 4 +- > lib/vhost/vhost_user.c | 70 - > lib/vhost/vhost_user.h | 20 +- > 9 files changed, 62 insertions(+), 62 deletions(-) > Acked-by: Stephen Hemminger
Re: [PATCH] net/cpfl: fix a compiler issue about virtchnl opcode
On Thu, 9 Mar 2023 10:31:42 + Mingxia Liu wrote: > iff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c > index db58157ba3..59883af8a8 100644 > --- a/drivers/net/idpf/idpf_ethdev.c > +++ b/drivers/net/idpf/idpf_ethdev.c > @@ -1058,8 +1058,8 @@ idpf_handle_virtchnl_msg(struct idpf_adapter_ext > *adapter_ex) > struct idpf_ctlq_msg ctlq_msg; > enum idpf_mbx_opc mbx_op; > struct idpf_vport *vport; > - enum virtchnl_ops vc_op; > uint16_t pending = 1; > + enum virtchnl_ops vc_op; > int ret; > > while (pending) { Why does order of declarations matter here? Compilers are free to reorder this.
RE: [PATCH v2] net/mlx5: use just sufficient barrier for Arm platforms
> -Original Message- > From: Slava Ovsiienko > Sent: Tuesday, March 7, 2023 10:08 AM > To: Honnappa Nagarahalli ; dev@dpdk.org; > Ruifeng Wang ; Matan Azrad ; > Shahaf Shuler > Cc: nd ; Matan Azrad ; sta...@dpdk.org; > nd > Subject: RE: [PATCH v2] net/mlx5: use just sufficient barrier for Arm > platforms > > Hi, Honnappa > > I'm sorry for delay - I had to play with patch, to compile this one with > assembly > listings and check what code is actually generated on x86/ARM. > > On x86 there is no difference at all (with and w/o patch), so no objection > from > my side. > On ARM we have: > w/o patch: dmb oshld > with patch: dmb ishld > > What is the purpose of the barrier - to not allow the CQE read access > reordering. > On x86, "compiler barrier" is quite enough (due to strong load/store > ordering). > On ARM load/store might be reordered, AFAIU. > > CQE resides in the host memory and can be directly written by the NIC via > PCIe. > (In my understanding it can cause core cache(s) invalidations). > I have a question - should we consider this as outer sharable domain? > Is it safe to have a barrier for inner domain only in our case? Inner domain is enough, hence the reason for this patch. > > We have updated the cqe_check() routine, sorry for this. Could you, please, > update the patch and send v3? Sent V3 > > With best regards, > Slava > > > -Original Message- > > From: Honnappa Nagarahalli > > Sent: вторник, 15 ноября 2022 г. 03:46 > > To: Slava Ovsiienko ; dev@dpdk.org; Ruifeng > > Wang ; Matan Azrad ; > Shahaf > > Shuler > > Cc: nd ; Matan Azrad ; > sta...@dpdk.org; > > nd > > Subject: RE: [PATCH v2] net/mlx5: use just sufficient barrier for Arm > > platforms > > > > > > > > > > > > > > > > > Hi, Honnappa > > > Hi Slava, thanks for the feedback. > > > > > > > > > > > We discussed the barrier here: > > > > http://patches.dpdk.org/project/dpdk/patch/20210606164948.35997-1- > > > > honnappa.nagaraha...@arm.com/ > > > Yes, I have changed the patch according to the discussion. i.e. > > > barrier is needed, but different (inner sharable domain) barrier is > > > required. > > > > > > > > > > > (BTW, it is good practice to keep the reference to previous patch > > > > versions below Commit Message of the next ones). > > > > > > > > This barrier is not about compiler ordering, it is about external > > > > HW agent memory action completions. > > > > So, I'm not sure the rte_atomic_thread_fence() is safe for x86 - > > > > patch impacts > > > > x86 as well. > > > The earlier barrier 'rte_io_rmb()', resolves to a compiler barrier on x86 > > > [1]. > > > The rte_atomic_thread_fence(__ATOMIC_ACQUIRE) on x86 also acts as a > > > compiler barrier. So, there is no change for x86. > > > > > > > > > [1] > > > https://github.com/DPDK/dpdk/blob/main/lib/eal/x86/include/rte_atomic. > > > h# > > > L80 > > Hi Slava, any more comments on this? > > > > > > > > > > > > > With best regards, > > > > Slava > > > > > > > > > -Original Message- > > > > > From: Honnappa Nagarahalli > > > > > Sent: Tuesday, August 30, 2022 23:01 > > > > > To: dev@dpdk.org; honnappa.nagaraha...@arm.com; > > > > ruifeng.w...@arm.com; > > > > > Matan Azrad ; Shahaf Shuler > > > ; > > > > > Slava Ovsiienko > > > > > Cc: n...@arm.com; Matan Azrad ; sta...@dpdk.org > > > > > Subject: [PATCH v2] net/mlx5: use just sufficient barrier for > > > > > Arm platforms > > > > > > > > > > cqe->op_own indicates if the CQE is owned by the NIC. The rest > > > > > cqe->of > > > > > the fields in CQE should be read only after op_own is read. On > > > > > Arm platforms using "dmb ishld" is sufficient to enforce this. > > > > > > > > > > Fixes: 88c0733535d6 ("net/mlx5: extend Rx completion with error > > > > > handling") > > > > > Cc: ma...@mellanox.com > > > > > Cc: sta...@dpdk.org > > > > > > > > > > Signed-off-by: Honnappa Nagarahalli > > > > > > > > > > Reviewed-by: Ruifeng Wang > > > > > --- > > > > > drivers/common/mlx5/mlx5_common.h | 6 +- > > > > > 1 file changed, 5 insertions(+), 1 deletion(-) > > > > > > > > > > diff --git a/drivers/common/mlx5/mlx5_common.h > > > > > b/drivers/common/mlx5/mlx5_common.h > > > > > index 5028a05b49..ac2e85b15f 100644 > > > > > --- a/drivers/common/mlx5/mlx5_common.h > > > > > +++ b/drivers/common/mlx5/mlx5_common.h > > > > > @@ -195,7 +195,11 @@ check_cqe(volatile struct mlx5_cqe *cqe, > > > > > const uint16_t cqes_n, > > > > > > > > > > if (unlikely((op_owner != (!!(idx))) || (op_code == > > > > > MLX5_CQE_INVALID))) > > > > > return MLX5_CQE_STATUS_HW_OWN; > > > > > - rte_io_rmb(); > > > > > + /* Prevent speculative reading of other fields in CQE until > > > > > + * CQE is valid. > > > > > + */ > > > > > + rte_atomic_thread_fence(__ATOMIC_ACQUIRE); > > > > > + > > > > > if (unlikely(op_code == MLX5_CQE_RESP_ERR || > > > > >op_code == MLX5_CQE_REQ_ERR)) > > > > > return MLX5_CQE_STATUS_ERR; > > >
RE: [PATCH 1/5] ethdev: fix race-condition of proactive error handling mode
> -Original Message- > From: fengchengwen > Sent: Wednesday, March 8, 2023 7:00 PM > To: Honnappa Nagarahalli ; Konstantin > Ananyev ; dev@dpdk.org; > tho...@monjalon.net; Ferruh Yigit ; Andrew > Rybchenko ; Kalesh AP anakkur.pura...@broadcom.com>; Ajit Khaparde > (ajit.khapa...@broadcom.com) > Cc: nd > Subject: Re: [PATCH 1/5] ethdev: fix race-condition of proactive error > handling > mode > > > > On 2023/3/8 9:09, Honnappa Nagarahalli wrote: > > > > > >>> > > > > Is there any reason not to design this in the same way as > 'rte_eth_dev_reset'? Why does the PMD have to recover by itself? > > I suppose it is a question for the authors of original patch... > >>> Appreciate if the authors could comment on this. > >> > >> The main cause is that the hardware implementation limit, I will try > >> to explain from hns3 PMD's view. > >> For a global reset, all the function need responsed within a centain > >> period of time. otherwise, the reset will fail. and also the reset > >> requirement a few steps (all may take a long time). > >> > >> When with multiple functions in one DPDK, and trigger a global reset, > >> the rte_eth_dev_reset will not cover this scene: > >> 1. each port's will report RTE_ETH_EVENT_INTR_RESET in interrupt thread. > >> 2. then invoke application callback, but due to the same thread, and each > >> port's recover will take a long time, so later port will reset failed. I am reading this again. What you are saying is, a single thread running the recovery process in sequence for multiple ports will not meet the required time limits. Hence, the recovery process needs to run in multiple threads simultaneously. This way each thread could run the recovery for a different port. Do I understand this correctly? (Assuming my understanding is correct) The current implementation is running the recovery process in the context of data plane threads and not in the interrupt thread. Is this correct? > > If the design were to introduce RTE_ETH_EVENT_INTR_RECOVER and > rte_eth_dev_recover, what problems do you see? > > I see the 'RTE_ETH_EVENT_INTR_RECOVER and rte_eth_dev_recover' has no > difference with RTE_ETH_EVENT_INTR_RESET mechanism. > Could you detail more? > > > > >> > >>> > > > We could have a similar API 'rte_eth_dev_recover' to do the > > recovery > functionality. > > I suppose such approach is also possible. > Personally I am fine with both ways: either existing one or what > you propose, as long as we'll fix existing race-condition. > What is good with what you suggest - that way we probably don't > need to worry how to allow user to enable/disable auto-recovery inside > PMD. > > Konstantin > > >>>
RE: [PATCH] net/cpfl: fix a compiler issue about virtchnl opcode
> -Original Message- > From: Stephen Hemminger > Sent: Thursday, March 9, 2023 10:42 AM > To: Liu, Mingxia > Cc: dev@dpdk.org; Xing, Beilei ; Zhang, Yuying > > Subject: Re: [PATCH] net/cpfl: fix a compiler issue about virtchnl opcode > > On Thu, 9 Mar 2023 10:31:42 + > Mingxia Liu wrote: > > > iff --git a/drivers/net/idpf/idpf_ethdev.c > > b/drivers/net/idpf/idpf_ethdev.c index db58157ba3..59883af8a8 100644 > > --- a/drivers/net/idpf/idpf_ethdev.c > > +++ b/drivers/net/idpf/idpf_ethdev.c > > @@ -1058,8 +1058,8 @@ idpf_handle_virtchnl_msg(struct idpf_adapter_ext > *adapter_ex) > > struct idpf_ctlq_msg ctlq_msg; > > enum idpf_mbx_opc mbx_op; > > struct idpf_vport *vport; > > - enum virtchnl_ops vc_op; > > uint16_t pending = 1; > > + enum virtchnl_ops vc_op; > > int ret; > > > > while (pending) { > > > Why does order of declarations matter here? > Compilers are free to reorder this. [Liu, Mingxia] Unused changes, v2 was sent.
RE: [PATCH 2/2] vhost: refactor to follow new naming convention
Hi Nobuhiro, Thanks for the work, check comments inline. > -Original Message- > From: Nobuhiro MIKI > Sent: Thursday, March 9, 2023 10:07 AM > To: maxime.coque...@redhat.com; Xia, Chenbo > Cc: dev@dpdk.org; Nobuhiro MIKI > Subject: [PATCH 2/2] vhost: refactor to follow new naming convention > > Simply replace '_slave_' with '_backend_'. > > Signed-off-by: Nobuhiro MIKI > --- > drivers/vdpa/ifc/ifcvf_vdpa.c | 2 +- > lib/vhost/rte_vhost.h | 2 +- > lib/vhost/version.map | 2 +- > lib/vhost/vhost.c | 4 +-- > lib/vhost/vhost.h | 4 +-- > lib/vhost/vhost_user.c| 62 +-- > lib/vhost/vhost_user.h| 6 ++-- > 7 files changed, 41 insertions(+), 41 deletions(-) > > diff --git a/drivers/vdpa/ifc/ifcvf_vdpa.c b/drivers/vdpa/ifc/ifcvf_vdpa.c > index fe4d278c5380..e4133568c1aa 100644 > --- a/drivers/vdpa/ifc/ifcvf_vdpa.c > +++ b/drivers/vdpa/ifc/ifcvf_vdpa.c > @@ -605,7 +605,7 @@ virtio_interrupt_handler(struct ifcvf_internal > *internal) ... > > typedef struct VhostUserMemoryRegion { > uint64_t guest_phys_addr; > @@ -137,7 +137,7 @@ struct vhost_user_config { > typedef struct VhostUserMsg { > union { > uint32_t master; /* a VhostUserRequest value */ > - uint32_t slave; /* a VhostUserSlaveRequest value*/ > + uint32_t backend; /* a VhostUserBackendRequest value*/ I think we should rename the master to front-end too (as QEMU spec says) If you can also replace all the 'masters' with front-end too, we will appreciate that much.. BTW, I guess this is your first patch, you need to update .mailmap file with your name and email address. Thanks, Chenbo > } request; > > #define VHOST_USER_VERSION_MASK 0x3 > -- > 2.31.1
RE: [PATCH 1/2] vhost: fix constants to follow new naming convension
> -Original Message- > From: Nobuhiro MIKI > Sent: Thursday, March 9, 2023 10:07 AM > To: maxime.coque...@redhat.com; Xia, Chenbo > Cc: dev@dpdk.org; Nobuhiro MIKI > Subject: [PATCH 1/2] vhost: fix constants to follow new naming convension > > DPDK apps (e.g. dpdk-skeleton) output this name > during negotiation. But, it is not consistent when > debugging using QEMU as a front-end, for example. > This is because QEMU already follows new naming convention [1]. > > Some type names and variable names, such as VhostUserSlaveRequest, > are still in old naming convension. But, in this patch we > only focus on constants. > > [1] https://qemu-project.gitlab.io/qemu/interop/vhost-user.html > > Signed-off-by: Nobuhiro MIKI > --- > drivers/vdpa/ifc/ifcvf_vdpa.c | 4 ++-- > drivers/vdpa/mlx5/mlx5_vdpa.c | 4 ++-- > drivers/vdpa/sfc/sfc_vdpa_ops.c | 4 ++-- > lib/vhost/rte_vhost.h | 8 > lib/vhost/vhost_user.c | 14 +++--- > lib/vhost/vhost_user.h | 14 +++--- > 6 files changed, 24 insertions(+), 24 deletions(-) > As I said in patch 2, it's better to update the mailmap file in this patch 1. Thanks, Chenbo
[PATCH v1 1/2] test/bbdev: fix possible div by zero issue
Add extra check at function level to ensure num_ops cannot be 0. Coverity issue: 383647 Fixes: f0d288bca467 ("test/bbdev: add support for BLER for 4G") Cc: sta...@dpdk.org Signed-off-by: Hernan Vargas --- app/test-bbdev/test_bbdev_perf.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/app/test-bbdev/test_bbdev_perf.c b/app/test-bbdev/test_bbdev_perf.c index e56d314e93b5..15ebcdfc1556 100644 --- a/app/test-bbdev/test_bbdev_perf.c +++ b/app/test-bbdev/test_bbdev_perf.c @@ -3722,6 +3722,7 @@ bler_pmd_lcore_ldpc_dec(void *arg) TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST), "BURST_SIZE should be <= %u", MAX_BURST); + TEST_ASSERT_SUCCESS((num_ops == 0), "NUM_OPS must be greater than 0"); rte_bbdev_info_get(tp->dev_id, &info); @@ -3856,6 +3857,7 @@ bler_pmd_lcore_turbo_dec(void *arg) TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST), "BURST_SIZE should be <= %u", MAX_BURST); + TEST_ASSERT_SUCCESS((num_ops == 0), "NUM_OPS must be greater than 0"); rte_bbdev_info_get(tp->dev_id, &info); -- 2.37.1
[PATCH v1 0/2] test-bbdev coverity fixes
Fixes for coverity issues 383647, 383155. Hernan Vargas (2): test/bbdev: fix possible div by zero issue test/bbdev: fix unchecked ret value issue app/test-bbdev/test_bbdev_perf.c | 9 +++-- 1 file changed, 7 insertions(+), 2 deletions(-) -- 2.37.1
[PATCH v1 2/2] test/bbdev: fix unchecked ret value issue
Add check for return value from get_bbdev_queue_stats. Coverity issue: 383155 Fixes: c25604355a15 ("app/bbdev: add explicit check for counters") Cc: sta...@dpdk.org Signed-off-by: Hernan Vargas --- app/test-bbdev/test_bbdev_perf.c | 7 +-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/app/test-bbdev/test_bbdev_perf.c b/app/test-bbdev/test_bbdev_perf.c index 15ebcdfc1556..276bbf0a2e6d 100644 --- a/app/test-bbdev/test_bbdev_perf.c +++ b/app/test-bbdev/test_bbdev_perf.c @@ -5679,7 +5679,7 @@ static int offload_cost_test(struct active_device *ad, struct test_op_params *op_params) { - int iter; + int iter, ret; uint16_t burst_sz = op_params->burst_sz; const uint16_t num_to_process = op_params->num_to_process; const enum rte_bbdev_op_type op_type = test_vector.op_type; @@ -5774,7 +5774,10 @@ offload_cost_test(struct active_device *ad, rte_get_tsc_hz()); struct rte_bbdev_stats stats = {0}; - get_bbdev_queue_stats(ad->dev_id, queue_id, &stats); + ret = get_bbdev_queue_stats(ad->dev_id, queue_id, &stats); + TEST_ASSERT_SUCCESS(ret, + "Failed to get stats for queue (%u) of device (%u)", + queue_id, ad->dev_id); if (stats.enqueue_warn_count > 0) printf("Warning reported on the queue : %10"PRIu64"\n", stats.enqueue_warn_count); -- 2.37.1
[PATCH] net/idpf: fix a compiler issue about virtchnl opcode
Comparing an enum virtchnl_ops variable with VIRTCHNL2_OP_EVENT will cause a compiler issue, as VIRTCHNL2_OP_EVENT is not included in enum virtchnl_ops. And the PMD uses virtual msg opcodes prefixed with virtchnl2 or VIRTCHNL2. Fixes: 78049b3dc7e6 ("net/idpf: add alarm to handle virtual channel message") Signed-off-by: Mingxia Liu --- drivers/net/idpf/idpf_ethdev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c index db58157ba3..46aec6ae37 100644 --- a/drivers/net/idpf/idpf_ethdev.c +++ b/drivers/net/idpf/idpf_ethdev.c @@ -1058,8 +1058,8 @@ idpf_handle_virtchnl_msg(struct idpf_adapter_ext *adapter_ex) struct idpf_ctlq_msg ctlq_msg; enum idpf_mbx_opc mbx_op; struct idpf_vport *vport; - enum virtchnl_ops vc_op; uint16_t pending = 1; + uint32_t vc_op; int ret; while (pending) { -- 2.34.1
[PATCH 1/1] net/sfc: stop misuse of Rx ingress m-port metadata on EF100
The driver supports representor functionality. In it, packets coming from VFs to the dedicated back-end Rx queue get demultiplexed into front-end Rx queues of representor ethdevs as per the per-packet metadata indicating logical HW ingress ports. On transmit, packets are provided with symmetrical metadata by front-end Tx queues, and the back-end queue transforms the data into so-called Tx override descriptors. These let the packets bypass flow lookup and go directly to the represented VFs. However, in the Rx part, the driver extracts the said metadata on every HW Rx queue, that is, not just on the one used by representors Doing so leads to a buggy behaviour. It is revealed by operating testpmd as follows: dpdk-testpmd -a :c6:00.0 -a :c6:00.1 -- -i testpmd> flow create 0 transfer pattern port_representor \ port_id is 0 / end actions port_representor port_id 1 / end Flow rule #0 created testpmd> set fwd io testpmd> start tx_first testpmd> flow destroy 0 rule 0 Flow rule #0 destroyed testpmd> stop -- Forward statistics for port 0 - RX-packets: 19196498 RX-dropped: 0 RX-total: 19196498 TX-packets: 19196535 TX-dropped: 0 TX-total: 19196535 --- -- Forward statistics for port 1 - RX-packets: 19196503 RX-dropped: 0 RX-total: 19196503 TX-packets: 19196530 TX-dropped: 0 TX-total: 19196530 --- In this scenario, two physical functions of the adapter do not have any corresponding "back-to-back" forwarder on peer host. Packets transmitted from port 0 can only be forwarded to port 1 by means of a special flow rule. The flow rule indeed works, but destroying it does not stop forwarding. Port statistics carry on incrementing. Also, it is apparent that forwarding in the opposite direction must not have worked in this case as the flow is meant to target only one of the directions. Because of the bug, the first 32 mbufs received as a result of the flow rule operation have the said metadata present. In io mode, testpmd does not tamper with mbufs and passes them directly to transmit path, so this data remains in them instructing the PMD to override destinations of the packets via Tx option descriptors. Expected behaviour is as follows: -- Forward statistics for port 0 - RX-packets: 0 RX-dropped: 0 RX-total: 0 TX-packets: 15787496 TX-dropped: 0 TX-total: 15787496 --- -- Forward statistics for port 1 - RX-packets: 15787464 RX-dropped: 0 RX-total: 15787464 TX-packets: 32 TX-dropped: 0 TX-total: 32 --- These figures show the rule work only for one direction. Also, removing the flow shall cause forwarding to cease. Provided patch fixes the bug accordingly. Fixes: d0f981a3efd8 ("net/sfc: handle ingress mport in EF100 Rx prefix") Cc: sta...@dpdk.org Signed-off-by: Ivan Malov Reviewed-by: Andy Moreton --- drivers/net/sfc/sfc_dp_rx.h| 3 +++ drivers/net/sfc/sfc_ef100_rx.c | 3 ++- drivers/net/sfc/sfc_rx.c | 3 +++ 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/net/sfc/sfc_dp_rx.h b/drivers/net/sfc/sfc_dp_rx.h index 246adbd87c..51a44bd034 100644 --- a/drivers/net/sfc/sfc_dp_rx.h +++ b/drivers/net/sfc/sfc_dp_rx.h @@ -10,6 +10,8 @@ #ifndef _SFC_DP_RX_H #define _SFC_DP_RX_H +#include + #include #include @@ -27,6 +29,7 @@ extern "C" { */ struct sfc_dp_rxq { struct sfc_dp_queue dpq; + boolneed_ingress_mport; }; /** Datapath receive queue descriptor number limitations */ diff --git a/drivers/net/sfc/sfc_ef100_rx.c b/drivers/net/sfc/sfc_ef100_rx.c index 16cd8524d3..c4d256b40d 100644 --- a/drivers/net/sfc/sfc_ef100_rx.c +++ b/drivers/net/sfc/sfc_ef100_rx.c @@ -876,7 +876,8 @@ sfc_ef100_rx_qstart(struct sfc_dp_rxq *dp_rxq, unsigned int evq_read_ptr, else rxq->flags &= ~SFC_EF100_RXQ_USER_MARK; - if ((unsup_rx_prefix_fields & + if (dp_rxq->need_ingress_mport && + (unsup_rx_prefix_fields & (1U << EFX_RX_PREFIX_FIELD_INGRESS_MPORT)) == 0) rxq->flags |= SFC_EF100_RXQ_INGRESS_MPORT; else diff --git a/drivers/net/sfc/sfc_rx.c b/drivers/net/sfc/sfc_rx.c index 5ea98187c3..3d3d7d42e3 100644 --- a/drivers/net/sfc/sfc_rx.c +++ b/drivers/net/sfc/sfc_rx.c @@ -1265,6 +1265,9 @@ sfc_rx_qinit(struct sfc_adapter *sa, sfc_sw_index_t sw_index, if (rc != 0) goto fail_dp_rx_qcreate; + rxq_info->dp->need_ingress_mport = +
[PATCH 1/2] common/sfc_efx/base: allow to request MAE mark reset action
Previously, DPDK sfc driver received support for tunnel offload. In it, MAE needs to set intermediate mark from an outer rule (OR) recirculation ID in order to help the driver identify packets that hit the OR but miss on action rule (AR) lookup. But, for packets that do hit an AR, the driver wants to reset this mark so that the end receiver of traffic does not see it. The driver has a call to request such mark reset, but it does not work as it comes via the regular mark populate API, which must not be invoked after final delivery action has been added. Provide a suitable dedicated API for that. Fixes: 3a73dcfdb255 ("common/sfc_efx/base: match on recirc ID in action rules") Cc: sta...@dpdk.org Signed-off-by: Ivan Malov Reviewed-by: Andy Moreton --- drivers/common/sfc_efx/base/efx.h | 18 ++ drivers/common/sfc_efx/base/efx_mae.c | 12 drivers/common/sfc_efx/version.map| 1 + 3 files changed, 31 insertions(+) diff --git a/drivers/common/sfc_efx/base/efx.h b/drivers/common/sfc_efx/base/efx.h index 92ec18761b..f4fa88f169 100644 --- a/drivers/common/sfc_efx/base/efx.h +++ b/drivers/common/sfc_efx/base/efx.h @@ -4582,6 +4582,24 @@ efx_mae_action_set_populate_mark( __inefx_mae_actions_t *spec, __inuint32_t mark_value); +/* + * Whilst efx_mae_action_set_populate_mark() can be used to request setting + * a user mark in matching packets and demands that the request come before + * setting the final destination (deliver action), this API can be invoked + * after deliver action has been added in order to request mark reset if + * the user's own mark request has not been added as a result of parsing. + * + * It is useful when the driver chains an outer rule (OR) with an action + * rule (AR) by virtue of a recirculation ID. The OR may set mark from + * this ID to help the driver identify packets that hit the OR and do + * not hit the AR. But, for packets that do hit the AR, the driver + * wants to reset the mark value to avoid confusing recipients. + */ +LIBEFX_API +extern void +efx_mae_action_set_populate_mark_reset( + __inefx_mae_actions_t *spec); + LIBEFX_API extern __checkReturn efx_rc_t efx_mae_action_set_populate_deliver( diff --git a/drivers/common/sfc_efx/base/efx_mae.c b/drivers/common/sfc_efx/base/efx_mae.c index 31f51b5548..7732d2 100644 --- a/drivers/common/sfc_efx/base/efx_mae.c +++ b/drivers/common/sfc_efx/base/efx_mae.c @@ -1916,6 +1916,18 @@ efx_mae_action_set_populate_mark( EFX_MAE_ACTION_MARK, sizeof (mark_value), arg)); } + void +efx_mae_action_set_populate_mark_reset( + __inefx_mae_actions_t *spec) +{ + uint32_t action_mask = (1U << EFX_MAE_ACTION_MARK); + + if ((spec->ema_actions & action_mask) == 0) { + spec->ema_actions |= action_mask; + spec->ema_mark_value = 0; + } +} + __checkReturn efx_rc_t efx_mae_action_set_populate_deliver( __inefx_mae_actions_t *spec, diff --git a/drivers/common/sfc_efx/version.map b/drivers/common/sfc_efx/version.map index a54aab0a08..aabc354118 100644 --- a/drivers/common/sfc_efx/version.map +++ b/drivers/common/sfc_efx/version.map @@ -103,6 +103,7 @@ INTERNAL { efx_mae_action_set_populate_encap; efx_mae_action_set_populate_flag; efx_mae_action_set_populate_mark; + efx_mae_action_set_populate_mark_reset; efx_mae_action_set_populate_set_dst_mac; efx_mae_action_set_populate_set_src_mac; efx_mae_action_set_populate_vlan_pop; -- 2.17.1
[PATCH 2/2] net/sfc: fix resetting mark in tunnel offload switch rules
The currently used API is unfit for the purpose as it checks the order in which the action is being added. Use a dedicated API to request the reset. Fixes: 012bf708c20f ("net/sfc: support group flows in tunnel offload") Cc: sta...@dpdk.org Signed-off-by: Ivan Malov Reviewed-by: Andy Moreton --- drivers/net/sfc/sfc_mae.c | 8 +++- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/drivers/net/sfc/sfc_mae.c b/drivers/net/sfc/sfc_mae.c index 3daeed81b9..e5e9257998 100644 --- a/drivers/net/sfc/sfc_mae.c +++ b/drivers/net/sfc/sfc_mae.c @@ -3896,12 +3896,10 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa, break; case SFC_FT_RULE_SWITCH: /* -* Packets that go to the rule's AR have FT mark set (from the -* TUNNEL rule OR's RECIRC_ID). Remove this mark in matching -* packets. The user may have provided their own action -* MARK above, so don't check the return value here. +* Packets that go to the rule's AR have FT mark set (from +* the TUNNEL rule OR's RECIRC_ID). Reset the mark to zero. */ - (void)efx_mae_action_set_populate_mark(ctx.spec, 0); + efx_mae_action_set_populate_mark_reset(ctx.spec); ctx.ft_switch_hit_counter = &spec_mae->ft_ctx->switch_hit_counter; -- 2.17.1
Re: [PATCH 2/2] vhost: refactor to follow new naming convention
On 2023/03/09 12:03, Xia, Chenbo wrote: > Hi Nobuhiro, > > Thanks for the work, check comments inline. > >> -Original Message- >> From: Nobuhiro MIKI >> Sent: Thursday, March 9, 2023 10:07 AM >> To: maxime.coque...@redhat.com; Xia, Chenbo >> Cc: dev@dpdk.org; Nobuhiro MIKI >> Subject: [PATCH 2/2] vhost: refactor to follow new naming convention >> >> Simply replace '_slave_' with '_backend_'. >> >> Signed-off-by: Nobuhiro MIKI >> --- >> drivers/vdpa/ifc/ifcvf_vdpa.c | 2 +- >> lib/vhost/rte_vhost.h | 2 +- >> lib/vhost/version.map | 2 +- >> lib/vhost/vhost.c | 4 +-- >> lib/vhost/vhost.h | 4 +-- >> lib/vhost/vhost_user.c| 62 +-- >> lib/vhost/vhost_user.h| 6 ++-- >> 7 files changed, 41 insertions(+), 41 deletions(-) >> >> diff --git a/drivers/vdpa/ifc/ifcvf_vdpa.c b/drivers/vdpa/ifc/ifcvf_vdpa.c >> index fe4d278c5380..e4133568c1aa 100644 >> --- a/drivers/vdpa/ifc/ifcvf_vdpa.c >> +++ b/drivers/vdpa/ifc/ifcvf_vdpa.c >> @@ -605,7 +605,7 @@ virtio_interrupt_handler(struct ifcvf_internal >> *internal) > > ... > >> >> typedef struct VhostUserMemoryRegion { >> uint64_t guest_phys_addr; >> @@ -137,7 +137,7 @@ struct vhost_user_config { >> typedef struct VhostUserMsg { >> union { >> uint32_t master; /* a VhostUserRequest value */ >> -uint32_t slave; /* a VhostUserSlaveRequest value*/ >> +uint32_t backend; /* a VhostUserBackendRequest value*/ > > I think we should rename the master to front-end too (as QEMU spec says) > > If you can also replace all the 'masters' with front-end too, we will > appreciate > that much.. > > BTW, I guess this is your first patch, you need to update .mailmap file with > your > name and email address. Hi Chenbo, Thanks for your review. I will fix them in v2. Best Regards, Nobuhiro MIKI
[PATCH 0/4] Small corrections in mempool
1) Patches 1/4, 2/4 - Few small corrections in mempool API documentation. 2) Patch 3/4 - The API for checking 'lcore ID is valid' is trivial, but it is the right thing to do. 3) Patch 4/4 - IMO, the 'lcore ID is valid' check is not necessary to be done during run time. If it is not valid, there is something seriously wrong in the system already or it is a programming error. Given that it is a data plane function, it makes sense to remove it from run time check. Honnappa Nagarahalli (4): mempool: clarify mempool cache flush API behavior mempool: clarify comments for mempool cache implementation eal: add API to check if lcore id is valid mempool: use lcore API to check if lcore ID is valid lib/eal/include/rte_lcore.h | 14 ++ lib/mempool/rte_mempool.c | 12 ++-- lib/mempool/rte_mempool.h | 13 - 3 files changed, 28 insertions(+), 11 deletions(-) -- 2.25.1
[PATCH 1/4] mempool: clarify mempool cache flush API behavior
Clarify that mempool cache flush API works with default mempool cache. It is applications responsibility to validate that the cache belongs to the specified mempool. Signed-off-by: Honnappa Nagarahalli Reviewed-by: Kamalakshitha Aligeri Reviewed-by: Ruifeng Wang --- lib/mempool/rte_mempool.h | 8 ++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/lib/mempool/rte_mempool.h b/lib/mempool/rte_mempool.h index 9f530db24b..009bd10215 100644 --- a/lib/mempool/rte_mempool.h +++ b/lib/mempool/rte_mempool.h @@ -1326,10 +1326,14 @@ rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id) } /** - * Flush a user-owned mempool cache to the specified mempool. + * Flush a mempool cache to the specified mempool. + * + * It is application's responsibility to validate that the mempool cache + * belongs to the specified mempool. * * @param cache - * A pointer to the mempool cache. + * A pointer to the mempool cache. If NULL, default mempool cache + * is used if configured. * @param mp * A pointer to the mempool. */ -- 2.25.1
[PATCH 3/4] eal: add API to check if lcore id is valid
Simple API to check if the lcore ID does not exceed the maximum number of lcores configured. Signed-off-by: Honnappa Nagarahalli Reviewed-by: Ruifeng Wang --- lib/eal/include/rte_lcore.h | 14 ++ 1 file changed, 14 insertions(+) diff --git a/lib/eal/include/rte_lcore.h b/lib/eal/include/rte_lcore.h index 6a355e9986..cf99919a02 100644 --- a/lib/eal/include/rte_lcore.h +++ b/lib/eal/include/rte_lcore.h @@ -38,6 +38,20 @@ enum rte_lcore_role_t { ROLE_NON_EAL, }; +/** + * Check if the lcore ID is valid + * + * @param lcore_id + * The identifier of the lcore. + * + * @return + * True if the given lcore ID is between 0 and RTE_MAX_LCORE-1. + */ +static inline int rte_lcore_id_is_valid(unsigned int lcore_id) +{ + return (lcore_id < RTE_MAX_LCORE); +} + /** * Get a lcore's role. * -- 2.25.1
[PATCH 2/4] mempool: clarify comments for mempool cache implementation
Clarify that the mempool cache create and free API implementations work on user owned mempool caches. Signed-off-by: Honnappa Nagarahalli Reviewed-by: Kamalakshitha Aligeri Reviewed-by: Ruifeng Wang --- lib/mempool/rte_mempool.c | 12 ++-- 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/lib/mempool/rte_mempool.c b/lib/mempool/rte_mempool.c index cf5dea2304..a46d855dbf 100644 --- a/lib/mempool/rte_mempool.c +++ b/lib/mempool/rte_mempool.c @@ -757,9 +757,9 @@ mempool_cache_init(struct rte_mempool_cache *cache, uint32_t size) } /* - * Create and initialize a cache for objects that are retrieved from and - * returned to an underlying mempool. This structure is identical to the - * local_cache[lcore_id] pointed to by the mempool structure. + * Create and initialize a user owned cache for objects that are retrieved + * from and returned to an underlying mempool. This structure is identical + * to the local_cache[lcore_id] pointed to by the mempool structure. */ struct rte_mempool_cache * rte_mempool_cache_create(uint32_t size, int socket_id) @@ -786,9 +786,9 @@ rte_mempool_cache_create(uint32_t size, int socket_id) } /* - * Free a cache. It's the responsibility of the user to make sure that any - * remaining objects in the cache are flushed to the corresponding - * mempool. + * Free a user owned cache. It's the responsibility of the user to make + * sure that any remaining objects in the cache are flushed to the + * corresponding mempool. */ void rte_mempool_cache_free(struct rte_mempool_cache *cache) -- 2.25.1
[PATCH 4/4] mempool: use lcore API to check if lcore ID is valid
Use lcore API to check if the lcore ID is valid. The runtime check does not add much value. Hence use assert to validate the lcore ID. Signed-off-by: Honnappa Nagarahalli Reviewed-by: Wathsala Vithanage Reviewed-by: Ruifeng Wang --- lib/mempool/rte_mempool.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/lib/mempool/rte_mempool.h b/lib/mempool/rte_mempool.h index 009bd10215..00c5aa961b 100644 --- a/lib/mempool/rte_mempool.h +++ b/lib/mempool/rte_mempool.h @@ -1314,10 +1314,9 @@ rte_mempool_cache_free(struct rte_mempool_cache *cache); static __rte_always_inline struct rte_mempool_cache * rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id) { - if (mp->cache_size == 0) - return NULL; + RTE_ASSERT(rte_lcore_id_is_valid(lcore_id)); - if (lcore_id >= RTE_MAX_LCORE) + if (mp->cache_size == 0) return NULL; rte_mempool_trace_default_cache(mp, lcore_id, -- 2.25.1
[PATCH v2 0/2] vhost: fix names to follow new naming convention
In this series, we simply replace 'master' with 'frontend' and 'slave' with 'backend' so that vhost user protocol message, protocol names follow the new naming conventions. v2: - Update .mailmap in patch 1 - Replace 'master' with 'frontend' in patch 2 Nobuhiro MIKI (2): vhost: fix constants to follow new naming convention vhost: refactor to follow new naming convention .mailmap| 2 +- drivers/vdpa/ifc/ifcvf_vdpa.c | 6 +-- drivers/vdpa/mlx5/mlx5_vdpa.c | 4 +- drivers/vdpa/sfc/sfc_vdpa_ops.c | 4 +- lib/vhost/rte_vhost.h | 16 +++--- lib/vhost/version.map | 2 +- lib/vhost/vhost.c | 4 +- lib/vhost/vhost.h | 4 +- lib/vhost/vhost_crypto.c| 2 +- lib/vhost/vhost_user.c | 90 - lib/vhost/vhost_user.h | 22 11 files changed, 78 insertions(+), 78 deletions(-) -- 2.31.1
[PATCH v2 1/2] vhost: fix constants to follow new naming convention
DPDK apps (e.g. dpdk-skeleton) output this name during negotiation. But, it is not consistent when debugging using QEMU as a front-end, for example. This is because QEMU already follows new naming convention [1]. Some type names and variable names, such as VhostUserSlaveRequest, are still in old naming convention. But, in this patch we only focus on constants. [1] https://qemu-project.gitlab.io/qemu/interop/vhost-user.html Signed-off-by: Nobuhiro MIKI Acked-by: Stephen Hemminger --- .mailmap| 2 +- drivers/vdpa/ifc/ifcvf_vdpa.c | 4 ++-- drivers/vdpa/mlx5/mlx5_vdpa.c | 4 ++-- drivers/vdpa/sfc/sfc_vdpa_ops.c | 4 ++-- lib/vhost/rte_vhost.h | 8 lib/vhost/vhost_user.c | 14 +++--- lib/vhost/vhost_user.h | 14 +++--- 7 files changed, 25 insertions(+), 25 deletions(-) diff --git a/.mailmap b/.mailmap index 8e7d78f37eef..3400241a6908 100644 --- a/.mailmap +++ b/.mailmap @@ -980,7 +980,7 @@ Nithin Dabilpuram Nitzan Weller Noa Ezra -Nobuhiro Miki +Nobuhiro MIKI Norbert Ciosek Odi Assli Ognjen Joldzic diff --git a/drivers/vdpa/ifc/ifcvf_vdpa.c b/drivers/vdpa/ifc/ifcvf_vdpa.c index 4a87673682fc..fe4d278c5380 100644 --- a/drivers/vdpa/ifc/ifcvf_vdpa.c +++ b/drivers/vdpa/ifc/ifcvf_vdpa.c @@ -1315,8 +1315,8 @@ ifcvf_get_vdpa_features(struct rte_vdpa_device *vdev, uint64_t *features) #define VDPA_SUPPORTED_PROTOCOL_FEATURES \ (1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK | \ -1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ | \ -1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD | \ +1ULL << VHOST_USER_PROTOCOL_F_BACKEND_REQ | \ +1ULL << VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD | \ 1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER | \ 1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD | \ 1ULL << VHOST_USER_PROTOCOL_F_MQ | \ diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c index f8dfa9513311..f1737f82a8ae 100644 --- a/drivers/vdpa/mlx5/mlx5_vdpa.c +++ b/drivers/vdpa/mlx5/mlx5_vdpa.c @@ -37,8 +37,8 @@ (1ULL << VIRTIO_NET_F_MTU)) #define MLX5_VDPA_PROTOCOL_FEATURES \ - ((1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ) | \ -(1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD) | \ + ((1ULL << VHOST_USER_PROTOCOL_F_BACKEND_REQ) | \ +(1ULL << VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD) | \ (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) | \ (1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) | \ (1ULL << VHOST_USER_PROTOCOL_F_MQ) | \ diff --git a/drivers/vdpa/sfc/sfc_vdpa_ops.c b/drivers/vdpa/sfc/sfc_vdpa_ops.c index 6401d4e16f25..e88c7eeaa609 100644 --- a/drivers/vdpa/sfc/sfc_vdpa_ops.c +++ b/drivers/vdpa/sfc/sfc_vdpa_ops.c @@ -21,8 +21,8 @@ /* These protocol features are needed to enable notifier ctrl */ #define SFC_VDPA_PROTOCOL_FEATURES \ ((1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK) | \ -(1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ) | \ -(1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD) | \ +(1ULL << VHOST_USER_PROTOCOL_F_BACKEND_REQ) | \ +(1ULL << VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD) | \ (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) | \ (1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) | \ (1ULL << VHOST_USER_PROTOCOL_F_MQ)) diff --git a/lib/vhost/rte_vhost.h b/lib/vhost/rte_vhost.h index a395843fe96d..ed255fc6c51f 100644 --- a/lib/vhost/rte_vhost.h +++ b/lib/vhost/rte_vhost.h @@ -80,8 +80,8 @@ extern "C" { #define VHOST_USER_PROTOCOL_F_NET_MTU 4 #endif -#ifndef VHOST_USER_PROTOCOL_F_SLAVE_REQ -#define VHOST_USER_PROTOCOL_F_SLAVE_REQ5 +#ifndef VHOST_USER_PROTOCOL_F_BACKEND_REQ +#define VHOST_USER_PROTOCOL_F_BACKEND_REQ 5 #endif #ifndef VHOST_USER_PROTOCOL_F_CRYPTO_SESSION @@ -96,8 +96,8 @@ extern "C" { #define VHOST_USER_PROTOCOL_F_CONFIG 9 #endif -#ifndef VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD -#define VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD 10 +#ifndef VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD +#define VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD 10 #endif #ifndef VHOST_USER_PROTOCOL_F_HOST_NOTIFIER diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c index 9e361082dc9b..cd4104ea5ce0 100644 --- a/lib/vhost/vhost_user.c +++ b/lib/vhost/vhost_user.c @@ -2846,7 +2846,7 @@ VHOST_MESSAGE_HANDLER(VHOST_USER_GET_QUEUE_NUM, vhost_user_get_queue_num, false) VHOST_MESSAGE_HANDLER(VHOST_USER_SET_VRING_ENABLE, vhost_user_set_vring_enable, false) \ VHOST_MESSAGE_HANDLER(VHOST_USER_SEND_RARP, vhost_user_send_rarp, false) \ VHOST_MESSAGE_HANDLER(VHOST_USER_NET_SET_MTU, vhost_user_net_set_mtu, false) \ -VHOST_MESSAGE_HANDLER(VHOST_USER_SET_SLAVE_REQ_FD, vhost_user_set_req_fd, t
[PATCH v2 2/2] vhost: refactor to follow new naming convention
Simply replace 'master' with 'frontend' and 'slave' with 'backend'. Signed-off-by: Nobuhiro MIKI Acked-by: Stephen Hemminger --- drivers/vdpa/ifc/ifcvf_vdpa.c | 2 +- lib/vhost/rte_vhost.h | 8 ++-- lib/vhost/version.map | 2 +- lib/vhost/vhost.c | 4 +- lib/vhost/vhost.h | 4 +- lib/vhost/vhost_crypto.c | 2 +- lib/vhost/vhost_user.c| 82 +-- lib/vhost/vhost_user.h| 8 ++-- 8 files changed, 56 insertions(+), 56 deletions(-) diff --git a/drivers/vdpa/ifc/ifcvf_vdpa.c b/drivers/vdpa/ifc/ifcvf_vdpa.c index fe4d278c5380..e4133568c1aa 100644 --- a/drivers/vdpa/ifc/ifcvf_vdpa.c +++ b/drivers/vdpa/ifc/ifcvf_vdpa.c @@ -605,7 +605,7 @@ virtio_interrupt_handler(struct ifcvf_internal *internal) int vid = internal->vid; int ret; - ret = rte_vhost_slave_config_change(vid, 1); + ret = rte_vhost_backend_config_change(vid, 1); if (ret) DRV_LOG(ERR, "failed to notify the guest about configuration space change."); } diff --git a/lib/vhost/rte_vhost.h b/lib/vhost/rte_vhost.h index ed255fc6c51f..58a5d4be92f8 100644 --- a/lib/vhost/rte_vhost.h +++ b/lib/vhost/rte_vhost.h @@ -264,9 +264,9 @@ typedef enum rte_vhost_msg_result (*rte_vhost_msg_handle)(int vid, void *msg); * Optional vhost user message handlers. */ struct rte_vhost_user_extern_ops { - /* Called prior to the master message handling. */ + /* Called prior to the frontend message handling. */ rte_vhost_msg_handle pre_msg_handle; - /* Called after the master message handling. */ + /* Called after the frontend message handling. */ rte_vhost_msg_handle post_msg_handle; }; @@ -1060,13 +1060,13 @@ rte_vhost_get_vdpa_device(int vid); * @param vid * vhost device ID * @param need_reply - * wait for the master response the status of this operation + * wait for the frontend response the status of this operation * @return * 0 on success, < 0 on failure */ __rte_experimental int -rte_vhost_slave_config_change(int vid, bool need_reply); +rte_vhost_backend_config_change(int vid, bool need_reply); /** * Retrieve names of statistics of a Vhost virtqueue. diff --git a/lib/vhost/version.map b/lib/vhost/version.map index d779a5cf3a0a..d322a4a888cd 100644 --- a/lib/vhost/version.map +++ b/lib/vhost/version.map @@ -68,7 +68,7 @@ EXPERIMENTAL { global: rte_vhost_crypto_driver_start; - rte_vhost_slave_config_change; + rte_vhost_backend_config_change; rte_vhost_async_channel_register; rte_vhost_async_channel_unregister; rte_vhost_submit_enqueue_burst; diff --git a/lib/vhost/vhost.c b/lib/vhost/vhost.c index 358672bb176b..ef3794381751 100644 --- a/lib/vhost/vhost.c +++ b/lib/vhost/vhost.c @@ -701,9 +701,9 @@ vhost_new_device(void) dev->vid = i; dev->flags = VIRTIO_DEV_BUILTIN_VIRTIO_NET; - dev->slave_req_fd = -1; + dev->backend_req_fd = -1; dev->postcopy_ufd = -1; - rte_spinlock_init(&dev->slave_req_lock); + rte_spinlock_init(&dev->backend_req_lock); return i; } diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h index a31d810531d7..8fdab13c7097 100644 --- a/lib/vhost/vhost.h +++ b/lib/vhost/vhost.h @@ -499,8 +499,8 @@ struct virtio_net { uint32_tmax_guest_pages; struct guest_page *guest_pages; - int slave_req_fd; - rte_spinlock_t slave_req_lock; + int backend_req_fd; + rte_spinlock_t backend_req_lock; int postcopy_ufd; int postcopy_listening; diff --git a/lib/vhost/vhost_crypto.c b/lib/vhost/vhost_crypto.c index f02bf865c349..9bf5ef67b9ad 100644 --- a/lib/vhost/vhost_crypto.c +++ b/lib/vhost/vhost_crypto.c @@ -451,7 +451,7 @@ vhost_crypto_msg_post_handler(int vid, void *msg) return RTE_VHOST_MSG_RESULT_ERR; } - switch (ctx->msg.request.master) { + switch (ctx->msg.request.frontend) { case VHOST_USER_CRYPTO_CREATE_SESS: vhost_crypto_create_sess(vcrypto, &ctx->msg.payload.crypto_session); diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c index cd4104ea5ce0..92ed4f02a68c 100644 --- a/lib/vhost/vhost_user.c +++ b/lib/vhost/vhost_user.c @@ -7,11 +7,11 @@ * The vhost-user protocol connection is an external interface, so it must be * robust against invalid inputs. * - * This is important because the vhost-user master is only one step removed + * This is important because the vhost-user frontend is only one step removed * from the guest. Malicious guests that have escaped will then launch further - * attacks from the vhost-user master. + * attacks from the vhost-user frontend. * - * Even in deployments where guests are trusted, a bug in the vhost-user master + * Even i
RE: [PATCH v2 1/2] vhost: fix constants to follow new naming convention
> -Original Message- > From: Nobuhiro MIKI > Sent: Thursday, March 9, 2023 1:17 PM > To: maxime.coque...@redhat.com; Xia, Chenbo > Cc: dev@dpdk.org; step...@networkplumber.org; Nobuhiro MIKI corp.jp> > Subject: [PATCH v2 1/2] vhost: fix constants to follow new naming > convention > > DPDK apps (e.g. dpdk-skeleton) output this name > during negotiation. But, it is not consistent when > debugging using QEMU as a front-end, for example. > This is because QEMU already follows new naming convention [1]. > > Some type names and variable names, such as VhostUserSlaveRequest, > are still in old naming convention. But, in this patch we > only focus on constants. > > [1] https://qemu-project.gitlab.io/qemu/interop/vhost-user.html > > Signed-off-by: Nobuhiro MIKI > Acked-by: Stephen Hemminger > --- > .mailmap| 2 +- > drivers/vdpa/ifc/ifcvf_vdpa.c | 4 ++-- > drivers/vdpa/mlx5/mlx5_vdpa.c | 4 ++-- > drivers/vdpa/sfc/sfc_vdpa_ops.c | 4 ++-- > lib/vhost/rte_vhost.h | 8 > lib/vhost/vhost_user.c | 14 +++--- > lib/vhost/vhost_user.h | 14 +++--- > 7 files changed, 25 insertions(+), 25 deletions(-) > > diff --git a/.mailmap b/.mailmap > index 8e7d78f37eef..3400241a6908 100644 > --- a/.mailmap > +++ b/.mailmap > @@ -980,7 +980,7 @@ Nithin Dabilpuram > Nitin Saxena > Nitzan Weller > Noa Ezra > -Nobuhiro Miki > +Nobuhiro MIKI > Norbert Ciosek > Odi Assli > Ognjen Joldzic > diff --git a/drivers/vdpa/ifc/ifcvf_vdpa.c b/drivers/vdpa/ifc/ifcvf_vdpa.c > index 4a87673682fc..fe4d278c5380 100644 > --- a/drivers/vdpa/ifc/ifcvf_vdpa.c > +++ b/drivers/vdpa/ifc/ifcvf_vdpa.c > @@ -1315,8 +1315,8 @@ ifcvf_get_vdpa_features(struct rte_vdpa_device *vdev, > uint64_t *features) > > #define VDPA_SUPPORTED_PROTOCOL_FEATURES \ > (1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK | \ > - 1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ | \ > - 1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD | \ > + 1ULL << VHOST_USER_PROTOCOL_F_BACKEND_REQ | \ > + 1ULL << VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD | \ >1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER | \ >1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD | \ >1ULL << VHOST_USER_PROTOCOL_F_MQ | \ > diff --git a/drivers/vdpa/mlx5/mlx5_vdpa.c b/drivers/vdpa/mlx5/mlx5_vdpa.c > index f8dfa9513311..f1737f82a8ae 100644 > --- a/drivers/vdpa/mlx5/mlx5_vdpa.c > +++ b/drivers/vdpa/mlx5/mlx5_vdpa.c > @@ -37,8 +37,8 @@ > (1ULL << VIRTIO_NET_F_MTU)) > > #define MLX5_VDPA_PROTOCOL_FEATURES \ > - ((1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ) | \ > - (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD) | \ > + ((1ULL << VHOST_USER_PROTOCOL_F_BACKEND_REQ) | \ > + (1ULL << VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD) | \ >(1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) | \ >(1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) | \ >(1ULL << VHOST_USER_PROTOCOL_F_MQ) | \ > diff --git a/drivers/vdpa/sfc/sfc_vdpa_ops.c > b/drivers/vdpa/sfc/sfc_vdpa_ops.c > index 6401d4e16f25..e88c7eeaa609 100644 > --- a/drivers/vdpa/sfc/sfc_vdpa_ops.c > +++ b/drivers/vdpa/sfc/sfc_vdpa_ops.c > @@ -21,8 +21,8 @@ > /* These protocol features are needed to enable notifier ctrl */ > #define SFC_VDPA_PROTOCOL_FEATURES \ > ((1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK) | \ > - (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ) | \ > - (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD) | \ > + (1ULL << VHOST_USER_PROTOCOL_F_BACKEND_REQ) | \ > + (1ULL << VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD) | \ >(1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) | \ >(1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) | \ >(1ULL << VHOST_USER_PROTOCOL_F_MQ)) > diff --git a/lib/vhost/rte_vhost.h b/lib/vhost/rte_vhost.h > index a395843fe96d..ed255fc6c51f 100644 > --- a/lib/vhost/rte_vhost.h > +++ b/lib/vhost/rte_vhost.h > @@ -80,8 +80,8 @@ extern "C" { > #define VHOST_USER_PROTOCOL_F_NET_MTU4 > #endif > > -#ifndef VHOST_USER_PROTOCOL_F_SLAVE_REQ > -#define VHOST_USER_PROTOCOL_F_SLAVE_REQ 5 > +#ifndef VHOST_USER_PROTOCOL_F_BACKEND_REQ > +#define VHOST_USER_PROTOCOL_F_BACKEND_REQ5 > #endif > > #ifndef VHOST_USER_PROTOCOL_F_CRYPTO_SESSION > @@ -96,8 +96,8 @@ extern "C" { > #define VHOST_USER_PROTOCOL_F_CONFIG 9 > #endif > > -#ifndef VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD > -#define VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD 10 > +#ifndef VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD > +#define VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD 10 > #endif > > #ifndef VHOST_USER_PROTOCOL_F_HOST_NOTIFIER > diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c > index 9e361082dc9b..cd4104ea5ce0 100644 > --- a/
RE: [PATCH v2 2/2] vhost: refactor to follow new naming convention
> -Original Message- > From: Nobuhiro MIKI > Sent: Thursday, March 9, 2023 1:17 PM > To: maxime.coque...@redhat.com; Xia, Chenbo > Cc: dev@dpdk.org; step...@networkplumber.org; Nobuhiro MIKI corp.jp> > Subject: [PATCH v2 2/2] vhost: refactor to follow new naming convention > > Simply replace 'master' with 'frontend' and 'slave' with 'backend'. > > Signed-off-by: Nobuhiro MIKI > Acked-by: Stephen Hemminger > --- > drivers/vdpa/ifc/ifcvf_vdpa.c | 2 +- > lib/vhost/rte_vhost.h | 8 ++-- > lib/vhost/version.map | 2 +- > lib/vhost/vhost.c | 4 +- > lib/vhost/vhost.h | 4 +- > lib/vhost/vhost_crypto.c | 2 +- > lib/vhost/vhost_user.c| 82 +-- > lib/vhost/vhost_user.h| 8 ++-- > 8 files changed, 56 insertions(+), 56 deletions(-) > > diff --git a/drivers/vdpa/ifc/ifcvf_vdpa.c b/drivers/vdpa/ifc/ifcvf_vdpa.c > index fe4d278c5380..e4133568c1aa 100644 > --- a/drivers/vdpa/ifc/ifcvf_vdpa.c > +++ b/drivers/vdpa/ifc/ifcvf_vdpa.c > @@ -605,7 +605,7 @@ virtio_interrupt_handler(struct ifcvf_internal > *internal) > int vid = internal->vid; > int ret; > > - ret = rte_vhost_slave_config_change(vid, 1); > + ret = rte_vhost_backend_config_change(vid, 1); > if (ret) > DRV_LOG(ERR, "failed to notify the guest about configuration > space change."); > } > diff --git a/lib/vhost/rte_vhost.h b/lib/vhost/rte_vhost.h > index ed255fc6c51f..58a5d4be92f8 100644 > --- a/lib/vhost/rte_vhost.h > +++ b/lib/vhost/rte_vhost.h > @@ -264,9 +264,9 @@ typedef enum rte_vhost_msg_result > (*rte_vhost_msg_handle)(int vid, void *msg); > * Optional vhost user message handlers. > */ > struct rte_vhost_user_extern_ops { > - /* Called prior to the master message handling. */ > + /* Called prior to the frontend message handling. */ > rte_vhost_msg_handle pre_msg_handle; > - /* Called after the master message handling. */ > + /* Called after the frontend message handling. */ > rte_vhost_msg_handle post_msg_handle; > }; > > @@ -1060,13 +1060,13 @@ rte_vhost_get_vdpa_device(int vid); > * @param vid > * vhost device ID > * @param need_reply > - * wait for the master response the status of this operation > + * wait for the frontend response the status of this operation > * @return > * 0 on success, < 0 on failure > */ > __rte_experimental > int > -rte_vhost_slave_config_change(int vid, bool need_reply); > +rte_vhost_backend_config_change(int vid, bool need_reply); > > /** > * Retrieve names of statistics of a Vhost virtqueue. > diff --git a/lib/vhost/version.map b/lib/vhost/version.map > index d779a5cf3a0a..d322a4a888cd 100644 > --- a/lib/vhost/version.map > +++ b/lib/vhost/version.map > @@ -68,7 +68,7 @@ EXPERIMENTAL { > global: > > rte_vhost_crypto_driver_start; > - rte_vhost_slave_config_change; > + rte_vhost_backend_config_change; > rte_vhost_async_channel_register; > rte_vhost_async_channel_unregister; > rte_vhost_submit_enqueue_burst; > diff --git a/lib/vhost/vhost.c b/lib/vhost/vhost.c > index 358672bb176b..ef3794381751 100644 > --- a/lib/vhost/vhost.c > +++ b/lib/vhost/vhost.c > @@ -701,9 +701,9 @@ vhost_new_device(void) > > dev->vid = i; > dev->flags = VIRTIO_DEV_BUILTIN_VIRTIO_NET; > - dev->slave_req_fd = -1; > + dev->backend_req_fd = -1; > dev->postcopy_ufd = -1; > - rte_spinlock_init(&dev->slave_req_lock); > + rte_spinlock_init(&dev->backend_req_lock); > > return i; > } > diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h > index a31d810531d7..8fdab13c7097 100644 > --- a/lib/vhost/vhost.h > +++ b/lib/vhost/vhost.h > @@ -499,8 +499,8 @@ struct virtio_net { > uint32_tmax_guest_pages; > struct guest_page *guest_pages; > > - int slave_req_fd; > - rte_spinlock_t slave_req_lock; > + int backend_req_fd; > + rte_spinlock_t backend_req_lock; > > int postcopy_ufd; > int postcopy_listening; > diff --git a/lib/vhost/vhost_crypto.c b/lib/vhost/vhost_crypto.c > index f02bf865c349..9bf5ef67b9ad 100644 > --- a/lib/vhost/vhost_crypto.c > +++ b/lib/vhost/vhost_crypto.c > @@ -451,7 +451,7 @@ vhost_crypto_msg_post_handler(int vid, void *msg) > return RTE_VHOST_MSG_RESULT_ERR; > } > > - switch (ctx->msg.request.master) { > + switch (ctx->msg.request.frontend) { > case VHOST_USER_CRYPTO_CREATE_SESS: > vhost_crypto_create_sess(vcrypto, > &ctx->msg.payload.crypto_session); > diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c > index cd4104ea5ce0..92ed4f02a68c 100644 > --- a/lib/vhost/vhost_user.c > +++ b/lib/vhost/vhost_user.c > @@ -7,11 +7,11 @@ > * The vhost-user protocol connection is an external interface, so it > must be > *
[dpdk-dev] [PATCH] doc: deprecation notice to remove LiquidIO ethdev driver
From: Jerin Jacob The LiquidIO product line(drivers/net/liquidio) has been substituted with CN9K/CN10K OCTEON product line smart NICs located in drivers/net/octeon_ep/. DPDK v20.08 has categorized the LiquidIO driver as UNMAINTAINED because of the absence of updates in the driver. Due to the above reasons, the driver will be unavailable from DPDK 23.07. Signed-off-by: Jerin Jacob --- doc/guides/rel_notes/deprecation.rst | 6 ++ 1 file changed, 6 insertions(+) diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst index 872847e938..eb6c3aedd8 100644 --- a/doc/guides/rel_notes/deprecation.rst +++ b/doc/guides/rel_notes/deprecation.rst @@ -135,3 +135,9 @@ Deprecation Notices Its removal has been postponed to let potential users report interest in maintaining it. In the absence of such interest, this library will be removed in DPDK 23.11. + +* net/liquidio: remove LiquidIO ethdev driver. The LiquidIO product line has been substituted + with CN9K/CN10K OCTEON product line smart NICs located in ``drivers/net/octeon_ep/``. + DPDK v20.08 has categorized the LiquidIO driver as UNMAINTAINED because of the absence of + updates in the driver. Due to the above reasons, the driver will be unavailable from DPDK 23.07. + -- 2.39.2
Re: [PATCH] examples/l3fwd-power: support CPPC cpufreq
Hi Jie On 2023/1/31 10:58, Jie Hai wrote: Currently the l3fwd-power only supports ACPI cpufreq and Pstate cpufreq, This patch adds CPPC cpufreq. Signed-off-by: Jie Hai Looks good, so Acked-by: Dongdong Liu Thanks, Dongdong --- examples/l3fwd-power/main.c | 9 ++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/examples/l3fwd-power/main.c b/examples/l3fwd-power/main.c index fd3ade330f82..5090d5598172 100644 --- a/examples/l3fwd-power/main.c +++ b/examples/l3fwd-power/main.c @@ -2453,9 +2453,10 @@ init_power_library(void) /* we're not supporting the VM channel mode */ env = rte_power_get_env(); if (env != PM_ENV_ACPI_CPUFREQ && - env != PM_ENV_PSTATE_CPUFREQ) { + env != PM_ENV_PSTATE_CPUFREQ && + env != PM_ENV_CPPC_CPUFREQ) { RTE_LOG(ERR, POWER, - "Only ACPI and PSTATE mode are supported\n"); + "Only ACPI, PSTATE and CPPC mode are supported\n"); return -1; } } @@ -2639,12 +2640,14 @@ autodetect_mode(void) /* * Empty poll and telemetry modes have to be specifically requested to * be enabled, but we can auto-detect between interrupt mode with or -* without frequency scaling. Both ACPI and pstate can be used. +* without frequency scaling. Any of ACPI, pstate and CPPC can be used. */ if (rte_power_check_env_supported(PM_ENV_ACPI_CPUFREQ)) return APP_MODE_LEGACY; if (rte_power_check_env_supported(PM_ENV_PSTATE_CPUFREQ)) return APP_MODE_LEGACY; + if (rte_power_check_env_supported(PM_ENV_CPPC_CPUFREQ)) + return APP_MODE_LEGACY; RTE_LOG(NOTICE, L3FWD_POWER, "Frequency scaling not supported, selecting interrupt-only mode\n");
Re: [PATCH v2 1/2] build: clarify configuration without IOVA field in mbuf
09/03/2023 02:43, fengchengwen: > On 2023/3/7 0:13, Thomas Monjalon wrote: > > --- a/doc/guides/rel_notes/release_22_11.rst > > +++ b/doc/guides/rel_notes/release_22_11.rst > > @@ -504,7 +504,7 @@ ABI Changes > >``rte-worker-`` so that DPDK can accommodate lcores higher > > than 99. > > > > * mbuf: Replaced ``buf_iova`` field with ``next`` field and added a new > > field > > - ``dynfield2`` at its place in second cacheline if ``RTE_IOVA_AS_PA`` is > > 0. > > + ``dynfield2`` at its place in second cacheline if ``RTE_IOVA_IN_MBUF`` > > is 0. > > Should add to release 23.03 rst. Yes we could add a note in API changes. > The original 22.11 still have RTE_IOVA_AS_PA definition. Yes it was not a good idea to rename in the release notes. > > -if dpdk_conf.get('RTE_IOVA_AS_PA') == 0 > > -build = false > > -reason = 'driver does not support disabling IOVA as PA mode' > > +if not get_option('enable_iova_as_pa') > > subdir_done() > > endif > > Suggest keep original, and replace RTE_IOVA_AS_PA with RTE_IOVA_IN_MBUF: > if dpdk_conf.get('RTE_IOVA_IN_MBUF') == 0 > subdir_done() > endif Why testing the C macro in Meson? It looks simpler to check the Meson option in Meson. > Meson build 0.63.0 already support deprecated a option by a new option. > When update to the new meson verion, the drivers' meson.build will not be > modified. I don't understand this comment.
Re: ixgbe rxq interrupt not working
Hi Stephen, ixgbe interfaces works correctly when not used with dpdk. # cat /proc/interrupts | grep enp2s0f1 109:436 0 0 0 IR-PCI-MSI 1050624-edge enp2s0f1-TxRx-0 110: 0167 0 0 IR-PCI-MSI 1050625-edge enp2s0f1-TxRx-1 111: 0 0306 0 IR-PCI-MSI 1050626-edge enp2s0f1-TxRx-2 112: 0 0 0173 IR-PCI-MSI 1050627-edge enp2s0f1-TxRx-3 113: 0 0 1 0 IR-PCI-MSI 1050628-edge enp2s0f1 dmesg doesn't seem to offer any clue. # dmesg | grep ixgbe [7.680989] ixgbe: Intel(R) 10 Gigabit PCI Express Network Driver [7.680996] ixgbe: Copyright (c) 1999-2016 Intel Corporation. [8.318606] ixgbe :02:00.0: Multiqueue Enabled: Rx Queue count = 4, Tx Queue count = 4 XDP Queue count = 0 [8.477642] ixgbe :02:00.0: MAC: 6, PHY: 27, PBA No: 000700-000 [8.477654] ixgbe :02:00.0: 00:e0:ed:db:33:69 [8.539077] ixgbe :02:00.0: Intel(R) 10 Gigabit Network Connection [8.888011] ixgbe :02:00.1: Multiqueue Enabled: Rx Queue count = 4, Tx Queue count = 4 XDP Queue count = 0 [9.019755] ixgbe :02:00.1: MAC: 6, PHY: 27, PBA No: 000700-000 [9.019760] ixgbe :02:00.1: 00:e0:ed:db:33:68 [9.063320] ixgbe :02:00.1: Intel(R) 10 Gigabit Network Connection Thanks & Regards, Rajasekhar On Wed, Mar 8, 2023 at 11:14 PM Stephen Hemminger < step...@networkplumber.org> wrote: > On Wed, 8 Mar 2023 22:54:12 +0530 > Rajasekhar Pulluru wrote: > > > No Honnappa. > > > > Thanks & Regards, > > Rajasekhar > > > > On Wed, Mar 8, 2023 at 5:49 AM Honnappa Nagarahalli < > > honnappa.nagaraha...@arm.com> wrote: > > > > > > > > > > > From: Rajasekhar Pulluru > > > Sent: Tuesday, March 7, 2023 12:52 PM > > > To: dev@dpdk.org > > > Subject: ixgbe rxq interrupt not working > > > > > > Hi Team, > > > > > > Bringing-up dpdk-22.07 on an intel machine with 8 ports, 4 of them > driven > > > by igb and the rest of the 4 ports driven by ixgbe. > > > [Honnappa] Do you have packets crossing between the 2 drivers? > > > > > > > > > I am following the below sequence to initialize these ports: > > > > > > dev_conf.intr_conf.lsc = 1; //Enable link state change interrupt > > > dev_conf.intr_conf.rxq = 1; //Enable RX Queue Interrupt > > > dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_NONE; > > > dev_conf.rxmode.offloads = 0; > > > dev_conf.txmode.mq_mode = RTE_ETH_MQ_TX_NONE; > > > dev_conf.txmode.offloads = 0; > > > > > > rte_eth_dev_configure > > > rte_eth_rx_queue_setup > > > rte_eth_tx_queue_setup > > > rte_eth_dev_start > > > data = port_id << CHAR_BIT | queue_id; > > > rte_eth_dev_rx_intr_ctl_q(port_id, queue_id, RTE_EPOLL_PER_THREAD, > > > RTE_INTR_EVENT_ADD, (void *)((uintptr_t)data)); > > > rte_eth_dev_rx_intr_enable(port_id, queue_id); > > > > > > And then main loop repeats the below: > > > > > > rte_epoll_wait(RTE_EPOLL_PER_THREAD, event, 1, timeout /* 200micro-sec > > > */); /* ignore return value */ > > > rte_eth_dev_rx_intr_disable(port_id, queue_id); > > > rte_eth_rx_burst(port_id, queue_id, pkts, num_pkts); > > > rte_eth_dev_rx_intr_enable(port_id, queue_id); > > > > > > The code is same for all the ports, igb ports are able to come-up and > rx > > > packets, where-as the ixgbe ports are not able to rx packets at all. > > > cat /proc/interrupts dumps vfio-msix counters for ixgbe as 0, where-as > > > it's non-zero for igb. > > > If I don't use/enable rxq interrupt for ixgbe (and remove epoll wait, > > > interrupt enable/disable from while loop) and simply poll for > > > rte_eth_rx_burst in a loop, ixgbe ports are able to rx packets. > > > > > > What could be wrong here? Appreciate any help. > > > > > > I would also like to know if there's an asynchronous rxq interrupt > > > notification to the application instead of rte_epoll_wait (and sleep). > > > > > > Thanks & Regards, > > > Rajasekhar > > > > > Does the device work as expected when not used with DPDK? > I.e does the kernel driver handle it correctly. > > > Also check the kernel dmesg log, for any relevant info. > There maybe VFIO or other overlap involved. >