On 11/5/25 4:51 PM, Dave Jiang wrote:
> The following lockdep splat was observed while kernel auto-online a CXL
> memory region:
>
> ======================================================
> WARNING: possible circular locking dependency detected
> 6.17.0djtest+ #53 Tainted: G W
> ------------------------------------------------------
> systemd-udevd/3334 is trying to acquire lock:
> ffffffff90346188 (hmem_resource_lock){+.+.}-{4:4}, at:
> hmem_register_resource+0x31/0x50
>
> but task is already holding lock:
> ffffffff90338890 ((node_chain).rwsem){++++}-{4:4}, at:
> blocking_notifier_call_chain+0x2e/0x70
>
> which lock already depends on the new lock.
> [..]
> Chain exists of:
> hmem_resource_lock --> mem_hotplug_lock --> (node_chain).rwsem
>
> Possible unsafe locking scenario:
>
> CPU0 CPU1
> ---- ----
> rlock((node_chain).rwsem);
> lock(mem_hotplug_lock);
> lock((node_chain).rwsem);
> lock(hmem_resource_lock);
>
> The lock ordering can cause potential deadlock. There are instances
> where hmem_resource_lock is taken after (node_chain).rwsem, and vice
> versa.
>
> Split out the target update section of hmat_register_target() so that
> hmat_callback() only envokes that section instead of attempt to register
> hmem devices that it does not need to.
>
> Fixes: cf8741ac57ed ("ACPI: NUMA: HMAT: Register "soft reserved" memory as an
> "hmem" device")
> Signed-off-by: Dave Jiang <[email protected]>
Applied to cxl/fixes
71cd75e2b101a31d09f031e132a6ad04c911e164
> ---
> v4:
> - Fix fixes tag. (Jonathan)
> - Refactor hmat_hotplug_target(). (Jonathan)
> ---
> drivers/acpi/numa/hmat.c | 47 ++++++++++++++++++++++------------------
> 1 file changed, 26 insertions(+), 21 deletions(-)
>
> diff --git a/drivers/acpi/numa/hmat.c b/drivers/acpi/numa/hmat.c
> index 1dc73d20d989..d10cbe93c3a7 100644
> --- a/drivers/acpi/numa/hmat.c
> +++ b/drivers/acpi/numa/hmat.c
> @@ -874,10 +874,33 @@ static void hmat_register_target_devices(struct
> memory_target *target)
> }
> }
>
> -static void hmat_register_target(struct memory_target *target)
> +static void hmat_hotplug_target(struct memory_target *target)
> {
> int nid = pxm_to_node(target->memory_pxm);
>
> + /*
> + * Skip offline nodes. This can happen when memory
> + * marked EFI_MEMORY_SP, "specific purpose", is applied
> + * to all the memory in a proximity domain leading to
> + * the node being marked offline / unplugged, or if
> + * memory-only "hotplug" node is offline.
> + */
> + if (nid == NUMA_NO_NODE || !node_online(nid))
> + return;
> +
> + guard(mutex)(&target_lock);
> + if (target->registered)
> + return;
> +
> + hmat_register_target_initiators(target);
> + hmat_register_target_cache(target);
> + hmat_register_target_perf(target, ACCESS_COORDINATE_LOCAL);
> + hmat_register_target_perf(target, ACCESS_COORDINATE_CPU);
> + target->registered = true;
> +}
> +
> +static void hmat_register_target(struct memory_target *target)
> +{
> /*
> * Devices may belong to either an offline or online
> * node, so unconditionally add them.
> @@ -896,25 +919,7 @@ static void hmat_register_target(struct memory_target
> *target)
> }
> }
>
> - /*
> - * Skip offline nodes. This can happen when memory
> - * marked EFI_MEMORY_SP, "specific purpose", is applied
> - * to all the memory in a proximity domain leading to
> - * the node being marked offline / unplugged, or if
> - * memory-only "hotplug" node is offline.
> - */
> - if (nid == NUMA_NO_NODE || !node_online(nid))
> - return;
> -
> - mutex_lock(&target_lock);
> - if (!target->registered) {
> - hmat_register_target_initiators(target);
> - hmat_register_target_cache(target);
> - hmat_register_target_perf(target, ACCESS_COORDINATE_LOCAL);
> - hmat_register_target_perf(target, ACCESS_COORDINATE_CPU);
> - target->registered = true;
> - }
> - mutex_unlock(&target_lock);
> + hmat_hotplug_target(target);
> }
>
> static void hmat_register_targets(void)
> @@ -940,7 +945,7 @@ static int hmat_callback(struct notifier_block *self,
> if (!target)
> return NOTIFY_OK;
>
> - hmat_register_target(target);
> + hmat_hotplug_target(target);
> return NOTIFY_OK;
> }
>