Re: [PATCH 3/3] spapr_numa.c: fix ibm,max-associativity-domains calculation

2021-01-28 Thread Greg Kurz
On Thu, 28 Jan 2021 12:17:31 -0300
Daniel Henrique Barboza  wrote:

> The current logic for calculating 'maxdomain' making it a sum of
> numa_state->num_nodes with spapr->gpu_numa_id. spapr->gpu_numa_id is
> used as a index to determine the next available NUMA id that a
> given NVGPU can use.
> 
> The problem is that the initial value of gpu_numa_id, for any topology
> that has more than one NUMA node, is equal to numa_state->num_nodes.
> This means that our maxdomain will always be, at least, twice the
> amount of existing NUMA nodes. This means that a guest with 4 NUMA
> nodes will end up with the following max-associativity-domains:
> 
> rtas/ibm,max-associativity-domains
>  0004 0008 0008 0008 0008
> 
> This overtuning of maxdomains doesn't go unnoticed in the guest, being
> detected in SLUB during boot:
> 
>  dmesg | grep SLUB
> [0.00] SLUB: HWalign=128, Order=0-3, MinObjects=0, CPUs=4, Nodes=8
> 
> SLUB is detecting 8 total nodes, with 4 nodes being online.
> 
> This patch fixes ibm,max-associativity-domains by considering the amount
> of NVGPUs NUMA nodes presented in the guest, instead of
> spapr->gpu_numa_id.
> 
> Reported-by: Cédric Le Goater 
> Signed-off-by: Daniel Henrique Barboza 
> ---
>  hw/ppc/spapr_numa.c | 16 +++-
>  1 file changed, 15 insertions(+), 1 deletion(-)
> 
> diff --git a/hw/ppc/spapr_numa.c b/hw/ppc/spapr_numa.c
> index f71105c783..f4d6abce87 100644
> --- a/hw/ppc/spapr_numa.c
> +++ b/hw/ppc/spapr_numa.c
> @@ -60,6 +60,19 @@ unsigned int spapr_numa_initial_nvgpu_NUMA_id(MachineState 
> *machine)
>  return MAX(1, machine->numa_state->num_nodes);
>  }
>  
> +/*
> + * Note: if called before spapr_phb_pci_collect_nvgpu() finishes collecting
> + * all NVGPUs, this function will not give the right number of NVGPUs NUMA
> + * nodes.
> + */

This helper has exactly one user : spapr_numa_write_rtas_dt(). Maybe just
open-code it there, with a comment that spapr->gpu_numa_id is assumed to
be correct at the time we populate the device tree ?

> +static
> +unsigned int spapr_numa_get_number_nvgpus_nodes(SpaprMachineState *spapr)
> +{
> +MachineState *ms = MACHINE(spapr);
> +
> +return spapr->gpu_numa_id - spapr_numa_initial_nvgpu_NUMA_id(ms);
> +}
> +
>  /*
>   * This function will translate the user distances into
>   * what the kernel understand as possible values: 10
> @@ -311,6 +324,7 @@ void spapr_numa_write_rtas_dt(SpaprMachineState *spapr, 
> void *fdt, int rtas)
>  {
>  MachineState *ms = MACHINE(spapr);
>  SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
> +uint32_t number_nvgpus_nodes = spapr_numa_get_number_nvgpus_nodes(spapr);
>  uint32_t refpoints[] = {
>  cpu_to_be32(0x4),
>  cpu_to_be32(0x3),
> @@ -318,7 +332,7 @@ void spapr_numa_write_rtas_dt(SpaprMachineState *spapr, 
> void *fdt, int rtas)
>  cpu_to_be32(0x1),
>  };
>  uint32_t nr_refpoints = ARRAY_SIZE(refpoints);
> -uint32_t maxdomain = ms->numa_state->num_nodes + spapr->gpu_numa_id;
> +uint32_t maxdomain = ms->numa_state->num_nodes + number_nvgpus_nodes;
>  uint32_t maxdomains[] = {
>  cpu_to_be32(4),
>  cpu_to_be32(maxdomain),




[PATCH 3/3] spapr_numa.c: fix ibm, max-associativity-domains calculation

2021-01-28 Thread Daniel Henrique Barboza
The current logic for calculating 'maxdomain' making it a sum of
numa_state->num_nodes with spapr->gpu_numa_id. spapr->gpu_numa_id is
used as a index to determine the next available NUMA id that a
given NVGPU can use.

The problem is that the initial value of gpu_numa_id, for any topology
that has more than one NUMA node, is equal to numa_state->num_nodes.
This means that our maxdomain will always be, at least, twice the
amount of existing NUMA nodes. This means that a guest with 4 NUMA
nodes will end up with the following max-associativity-domains:

rtas/ibm,max-associativity-domains
 0004 0008 0008 0008 0008

This overtuning of maxdomains doesn't go unnoticed in the guest, being
detected in SLUB during boot:

 dmesg | grep SLUB
[0.00] SLUB: HWalign=128, Order=0-3, MinObjects=0, CPUs=4, Nodes=8

SLUB is detecting 8 total nodes, with 4 nodes being online.

This patch fixes ibm,max-associativity-domains by considering the amount
of NVGPUs NUMA nodes presented in the guest, instead of
spapr->gpu_numa_id.

Reported-by: Cédric Le Goater 
Signed-off-by: Daniel Henrique Barboza 
---
 hw/ppc/spapr_numa.c | 16 +++-
 1 file changed, 15 insertions(+), 1 deletion(-)

diff --git a/hw/ppc/spapr_numa.c b/hw/ppc/spapr_numa.c
index f71105c783..f4d6abce87 100644
--- a/hw/ppc/spapr_numa.c
+++ b/hw/ppc/spapr_numa.c
@@ -60,6 +60,19 @@ unsigned int spapr_numa_initial_nvgpu_NUMA_id(MachineState 
*machine)
 return MAX(1, machine->numa_state->num_nodes);
 }
 
+/*
+ * Note: if called before spapr_phb_pci_collect_nvgpu() finishes collecting
+ * all NVGPUs, this function will not give the right number of NVGPUs NUMA
+ * nodes.
+ */
+static
+unsigned int spapr_numa_get_number_nvgpus_nodes(SpaprMachineState *spapr)
+{
+MachineState *ms = MACHINE(spapr);
+
+return spapr->gpu_numa_id - spapr_numa_initial_nvgpu_NUMA_id(ms);
+}
+
 /*
  * This function will translate the user distances into
  * what the kernel understand as possible values: 10
@@ -311,6 +324,7 @@ void spapr_numa_write_rtas_dt(SpaprMachineState *spapr, 
void *fdt, int rtas)
 {
 MachineState *ms = MACHINE(spapr);
 SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr);
+uint32_t number_nvgpus_nodes = spapr_numa_get_number_nvgpus_nodes(spapr);
 uint32_t refpoints[] = {
 cpu_to_be32(0x4),
 cpu_to_be32(0x3),
@@ -318,7 +332,7 @@ void spapr_numa_write_rtas_dt(SpaprMachineState *spapr, 
void *fdt, int rtas)
 cpu_to_be32(0x1),
 };
 uint32_t nr_refpoints = ARRAY_SIZE(refpoints);
-uint32_t maxdomain = ms->numa_state->num_nodes + spapr->gpu_numa_id;
+uint32_t maxdomain = ms->numa_state->num_nodes + number_nvgpus_nodes;
 uint32_t maxdomains[] = {
 cpu_to_be32(4),
 cpu_to_be32(maxdomain),
-- 
2.26.2