As a consequence of refactoring idpf code to use libeth APIs, idpf_vc_xn_shutdown was merged with and replaced by idpf_deinit_dflt_mbx. This does not affect the Tx path, as it checked for a presence of an xn manager anyway. Rx processing is handled by the mbx_task that is not always cancelled before calling the new consolidated mailbox deinit function. Moreover, in the reset path idpf_intr_rel() reschedules it after the deinit is done. This leads to mbx_task referencing the freed mailbox and causing KASAN warnings.
To remedy this, in the init path, do the first queueing of mbx_task in idpf_init_dflt_mbx(), in deinit and reset, always cancel the task in idpf_deinit_dflt_mbx() and in every flow first call idpf_mb_intr_rel_irq(). Reviewed-by: Michal Kubiak <michal.kub...@intel.com> Signed-off-by: Larysa Zaremba <larysa.zare...@intel.com> --- drivers/net/ethernet/intel/idpf/idpf.h | 1 + drivers/net/ethernet/intel/idpf/idpf_lib.c | 9 ++++----- drivers/net/ethernet/intel/idpf/idpf_virtchnl.c | 6 +++++- 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h index 23cdb1b713ad..7fa6414a05ba 100644 --- a/drivers/net/ethernet/intel/idpf/idpf.h +++ b/drivers/net/ethernet/intel/idpf/idpf.h @@ -896,6 +896,7 @@ void idpf_vc_event_task(struct work_struct *work); void idpf_dev_ops_init(struct idpf_adapter *adapter); void idpf_vf_dev_ops_init(struct idpf_adapter *adapter); int idpf_intr_req(struct idpf_adapter *adapter); +void idpf_mb_intr_rel_irq(struct idpf_adapter *adapter); void idpf_intr_rel(struct idpf_adapter *adapter); u16 idpf_get_max_tx_hdr_size(struct idpf_adapter *adapter); int idpf_initiate_soft_reset(struct idpf_vport *vport, diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c index 6d2369e8a344..d0f1d19e8786 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lib.c +++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c @@ -66,9 +66,11 @@ static void idpf_deinit_vector_stack(struct idpf_adapter *adapter) * This will also disable interrupt mode and queue up mailbox task. Mailbox * task will reschedule itself if not in interrupt mode. */ -static void idpf_mb_intr_rel_irq(struct idpf_adapter *adapter) +void idpf_mb_intr_rel_irq(struct idpf_adapter *adapter) { - clear_bit(IDPF_MB_INTR_MODE, adapter->flags); + if (!test_and_clear_bit(IDPF_MB_INTR_MODE, adapter->flags)) + return; + kfree(free_irq(adapter->msix_entries[0].vector, adapter)); queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0); } @@ -1783,14 +1785,11 @@ static int idpf_init_hard_reset(struct idpf_adapter *adapter) goto unlock_mutex; } - queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0); - /* Initialize the state machine, also allocate memory and request * resources */ err = idpf_vc_core_init(adapter); if (err) { - cancel_delayed_work_sync(&adapter->mbx_task); idpf_deinit_dflt_mbx(adapter); goto unlock_mutex; } diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c index 9f175b7c2007..03cb42bffe60 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c +++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c @@ -2357,6 +2357,8 @@ int idpf_init_dflt_mbx(struct idpf_adapter *adapter) adapter->xn_init_params.xnm = params.xnm; adapter->state = __IDPF_VER_CHECK; + queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0); + return 0; } @@ -2366,6 +2368,9 @@ int idpf_init_dflt_mbx(struct idpf_adapter *adapter) */ void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter) { + idpf_mb_intr_rel_irq(adapter); + cancel_delayed_work_sync(&adapter->mbx_task); + if (adapter->arq && adapter->asq) { idpf_mb_clean(adapter, adapter->asq); libie_ctlq_xn_deinit(adapter->xn_init_params.xnm, @@ -2605,7 +2610,6 @@ void idpf_vc_core_deinit(struct idpf_adapter *adapter) idpf_deinit_dflt_mbx(adapter); cancel_delayed_work_sync(&adapter->serv_task); - cancel_delayed_work_sync(&adapter->mbx_task); idpf_vport_params_buf_rel(adapter); -- 2.47.0