Since the conversion of ice to page pool, the ethtool loopback test
crashes:

 BUG: kernel NULL pointer dereference, address: 000000000000000c
 #PF: supervisor write access in kernel mode
 #PF: error_code(0x0002) - not-present page
 PGD 1100f1067 P4D 0
 Oops: Oops: 0002 [#1] SMP NOPTI
 CPU: 23 UID: 0 PID: 5904 Comm: ethtool Kdump: loaded Not tainted 
6.19.0-0.rc7.260128g1f97d9dcf5364.49.eln154.x86_64 #1 PREEMPT(lazy)
 Hardware name: [...]
 RIP: 0010:ice_alloc_rx_bufs+0x1cd/0x310 [ice]
 Code: 83 6c 24 30 01 66 41 89 47 08 0f 84 c0 00 00 00 41 0f b7 dc 48 8b 44 24 
18 48 c1 e3 04 41 bb 00 10 00 00 48 8d 2c 18 8b 04 24 <89> 45 0c 41 8b 4d 00 49 
d3 e3 44 3b 5c 24 24 0f 83 ac fe ff ff 44
 RSP: 0018:ff7894738aa1f768 EFLAGS: 00010246
 RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000000
 RDX: 0000000000000000 RSI: 0000000000000700 RDI: 0000000000000000
 RBP: 0000000000000000 R08: ff16dcae79880200 R09: 0000000000000019
 R10: 0000000000000001 R11: 0000000000001000 R12: 0000000000000000
 R13: 0000000000000000 R14: 0000000000000000 R15: ff16dcae6c670000
 FS:  00007fcf428850c0(0000) GS:ff16dcb149710000(0000) knlGS:0000000000000000
 CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
 CR2: 000000000000000c CR3: 0000000121227005 CR4: 0000000000773ef0
 PKRU: 55555554
 Call Trace:
  <TASK>
  ice_vsi_cfg_rxq+0xca/0x460 [ice]
  ice_vsi_cfg_rxqs+0x54/0x70 [ice]
  ice_loopback_test+0xa9/0x520 [ice]
  ice_self_test+0x1b9/0x280 [ice]
  ethtool_self_test+0xe5/0x200
  __dev_ethtool+0x1106/0x1a90
  dev_ethtool+0xbe/0x1a0
  dev_ioctl+0x258/0x4c0
  sock_do_ioctl+0xe3/0x130
  __x64_sys_ioctl+0xb9/0x100
  do_syscall_64+0x7c/0x700
  entry_SYSCALL_64_after_hwframe+0x76/0x7e
  [...]

It crashes because we have not initialized libeth for the rx ring.

Fix it by treating ICE_VSI_LB VSIs slightly more like normal PF VSIs and
letting them have a q_vector. It's just a dummy, because the loopback
test does not use interrupts, but it contains a napi struct that can be
passed to libeth_rx_fq_create() called from ice_vsi_cfg_rxq() ->
ice_rxq_pp_create().

Fixes: 93f53db9f9dc ("ice: switch to Page Pool")
Signed-off-by: Michal Schmidt <[email protected]>
---
 drivers/net/ethernet/intel/ice/ice_base.c    |  5 ++++-
 drivers/net/ethernet/intel/ice/ice_ethtool.c |  4 ++++
 drivers/net/ethernet/intel/ice/ice_lib.c     | 15 ++++++++++-----
 3 files changed, 18 insertions(+), 6 deletions(-)

diff --git a/drivers/net/ethernet/intel/ice/ice_base.c 
b/drivers/net/ethernet/intel/ice/ice_base.c
index eadb1e3d12b3..f0da50df6791 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -124,6 +124,8 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 
v_idx)
        if (vsi->type == ICE_VSI_VF) {
                ice_calc_vf_reg_idx(vsi->vf, q_vector);
                goto out;
+       } else if (vsi->type == ICE_VSI_LB) {
+               goto skip_alloc;
        } else if (vsi->type == ICE_VSI_CTRL && vsi->vf) {
                struct ice_vsi *ctrl_vsi = ice_get_vf_ctrl_vsi(pf, vsi);
 
@@ -662,7 +664,8 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
        u32 rx_buf_len;
        int err;
 
-       if (ring->vsi->type == ICE_VSI_PF || ring->vsi->type == ICE_VSI_SF) {
+       if (ring->vsi->type == ICE_VSI_PF || ring->vsi->type == ICE_VSI_SF ||
+           ring->vsi->type == ICE_VSI_LB) {
                if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
                        err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
                                                 ring->q_index,
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c 
b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 3565a5d96c6d..e9f2618950c8 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -1289,6 +1289,10 @@ static u64 ice_loopback_test(struct net_device *netdev)
        test_vsi->netdev = netdev;
        tx_ring = test_vsi->tx_rings[0];
        rx_ring = test_vsi->rx_rings[0];
+       /* Dummy q_vector and napi. Fill the minimum required for
+        * ice_rxq_pp_create().
+        */
+       rx_ring->q_vector->napi.dev = netdev;
 
        if (ice_lbtest_prepare_rings(test_vsi)) {
                ret = 2;
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c 
b/drivers/net/ethernet/intel/ice/ice_lib.c
index d47af94f31a9..bad67e4dc044 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -107,10 +107,6 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
        if (!vsi->rxq_map)
                goto err_rxq_map;
 
-       /* There is no need to allocate q_vectors for a loopback VSI. */
-       if (vsi->type == ICE_VSI_LB)
-               return 0;
-
        /* allocate memory for q_vector pointers */
        vsi->q_vectors = devm_kcalloc(dev, vsi->num_q_vectors,
                                      sizeof(*vsi->q_vectors), GFP_KERNEL);
@@ -239,6 +235,8 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
        case ICE_VSI_LB:
                vsi->alloc_txq = 1;
                vsi->alloc_rxq = 1;
+               /* A dummy q_vector, no actual IRQ. */
+               vsi->num_q_vectors = 1;
                break;
        default:
                dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n", vsi_type);
@@ -2424,14 +2422,21 @@ static int ice_vsi_cfg_def(struct ice_vsi *vsi)
                }
                break;
        case ICE_VSI_LB:
-               ret = ice_vsi_alloc_rings(vsi);
+               ret = ice_vsi_alloc_q_vectors(vsi);
                if (ret)
                        goto unroll_vsi_init;
 
+               ret = ice_vsi_alloc_rings(vsi);
+               if (ret)
+                       goto unroll_alloc_q_vector;
+
                ret = ice_vsi_alloc_ring_stats(vsi);
                if (ret)
                        goto unroll_vector_base;
 
+               /* Simply map the dummy q_vector to the only rx_ring */
+               vsi->rx_rings[0]->q_vector = vsi->q_vectors[0];
+
                break;
        default:
                /* clean up the resources and exit */
-- 
2.52.0

Reply via email to