On Tue, 25 Apr 2017 19:06:34 +0200
Andrew Jones <drjo...@redhat.com> wrote:

> On Wed, Mar 22, 2017 at 02:32:39PM +0100, Igor Mammedov wrote:
> > Signed-off-by: Igor Mammedov <imamm...@redhat.com>
> > ---
> >  hw/arm/virt-acpi-build.c | 19 +++++++------------
> >  hw/arm/virt.c            | 13 +++++++------
> >  2 files changed, 14 insertions(+), 18 deletions(-)
> > 
> > diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c
> > index 0835e59..ce7499c 100644
> > --- a/hw/arm/virt-acpi-build.c
> > +++ b/hw/arm/virt-acpi-build.c
> > @@ -486,30 +486,25 @@ build_srat(GArray *table_data, BIOSLinker *linker, 
> > VirtMachineState *vms)
> >      AcpiSystemResourceAffinityTable *srat;
> >      AcpiSratProcessorGiccAffinity *core;
> >      AcpiSratMemoryAffinity *numamem;
> > -    int i, j, srat_start;
> > +    int i, srat_start;
> >      uint64_t mem_base;
> > -    uint32_t *cpu_node = g_malloc0(vms->smp_cpus * sizeof(uint32_t));
> > -
> > -    for (i = 0; i < vms->smp_cpus; i++) {
> > -        j = numa_get_node_for_cpu(i);
> > -        if (j < nb_numa_nodes) {
> > -                cpu_node[i] = j;
> > -        }
> > -    }
> > +    MachineClass *mc = MACHINE_GET_CLASS(vms);
> > +    const CPUArchIdList *cpu_list = 
> > mc->possible_cpu_arch_ids(MACHINE(vms));
> >  
> >      srat_start = table_data->len;
> >      srat = acpi_data_push(table_data, sizeof(*srat));
> >      srat->reserved1 = cpu_to_le32(1);
> >  
> > -    for (i = 0; i < vms->smp_cpus; ++i) {
> > +    for (i = 0; i < cpu_list->len; ++i) {
> > +        int node_id = cpu_list->cpus[i].props.has_node_id ?
> > +            cpu_list->cpus[i].props.node_id : 0;
> >          core = acpi_data_push(table_data, sizeof(*core));
> >          core->type = ACPI_SRAT_PROCESSOR_GICC;
> >          core->length = sizeof(*core);
> > -        core->proximity = cpu_to_le32(cpu_node[i]);
> > +        core->proximity = cpu_to_le32(node_id);
> >          core->acpi_processor_uid = cpu_to_le32(i);
> >          core->flags = cpu_to_le32(1);
> >      }
> > -    g_free(cpu_node);
> >  
> >      mem_base = vms->memmap[VIRT_MEM].base;
> >      for (i = 0; i < nb_numa_nodes; ++i) {
> > diff --git a/hw/arm/virt.c b/hw/arm/virt.c
> > index 68d44f3..0a75df5 100644
> > --- a/hw/arm/virt.c
> > +++ b/hw/arm/virt.c
> > @@ -338,7 +338,7 @@ static void fdt_add_cpu_nodes(const VirtMachineState 
> > *vms)
> >  {
> >      int cpu;
> >      int addr_cells = 1;
> > -    unsigned int i;
> > +    const MachineState *ms = MACHINE(vms);
> >  
> >      /*
> >       * From Documentation/devicetree/bindings/arm/cpus.txt
> > @@ -369,6 +369,7 @@ static void fdt_add_cpu_nodes(const VirtMachineState 
> > *vms)
> >      for (cpu = vms->smp_cpus - 1; cpu >= 0; cpu--) {
> >          char *nodename = g_strdup_printf("/cpus/cpu@%d", cpu);
> >          ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(cpu));
> > +        CPUState *cs = CPU(armcpu);
> >  
> >          qemu_fdt_add_subnode(vms->fdt, nodename);
> >          qemu_fdt_setprop_string(vms->fdt, nodename, "device_type", "cpu");
> > @@ -389,9 +390,9 @@ static void fdt_add_cpu_nodes(const VirtMachineState 
> > *vms)
> >                                    armcpu->mp_affinity);
> >          }
> >  
> > -        i = numa_get_node_for_cpu(cpu);
> > -        if (i < nb_numa_nodes) {
> > -            qemu_fdt_setprop_cell(vms->fdt, nodename, "numa-node-id", i);
> > +        if (ms->possible_cpus->cpus[cs->cpu_index].props.has_node_id) {
> > +            qemu_fdt_setprop_cell(vms->fdt, nodename, "numa-node-id",
> > +                ms->possible_cpus->cpus[cs->cpu_index].props.node_id);
> >          }
> >  
> >          g_free(nodename);
> > @@ -1378,8 +1379,8 @@ static void machvirt_init(MachineState *machine)
> >          cs = CPU(cpuobj);
> >          cs->cpu_index = n;
> >  
> > -        node_id = numa_get_node_for_cpu(cs->cpu_index);
> > -        if (node_id == nb_numa_nodes) {
> > +        node_id = 
> > machine->possible_cpus->cpus[cs->cpu_index].props.node_id;
> > +        if 
> > (!machine->possible_cpus->cpus[cs->cpu_index].props.has_node_id) {
> >              /* by default CPUState::numa_node was 0 if it's not set via CLI
> >               * keep it this way for now but in future we probably should
> >               * refuse to start up with incomplete numa mapping */
> > -- 
> > 2.7.4
> > 
> >  
> 
> We now have many machine->possible_cpus->cpus[index].props.[has_]node_id
> instances. I think we need inline accessors added to include/sysemu/numa.h
> like
> 
>  static inline bool numa_has_node_id(MachineState *ms, int index)
>  {
>    return ms->possible_cpus->cpus[index].props.has_node_id;
>  }
> 
>  static inline int numa_node_id(MachineState *ms, int index)
>  {
>    return ms->possible_cpus->cpus[index].props.node_id;
>  }
> 
>  ...
> 
> to improve readability and maintainability.
I dislike this kind of one-line wrappers as it hurts readability
and maintainability of code for me as I'm forced to jump
around code every time I see such wrapper to recall what and
how it does. Code still fits in one line so I'd like to keep
it wrapper-less in this case if you don't insist on the change.

> 
> Or, instead, we could provide macros to allow assignments, e.g.
> 
>  #define NUMA_HAS_NODE_ID(ms, index) \
>    ((ms)->possible_cpus->cpus[index].props.has_node_id)
>  #define NUMA_NODE_ID(ms, index) \
>    ((ms)->possible_cpus->cpus[index].props.node_id)
ditto + worse debuggability 

> 
> Thanks,
> drew
> 


Reply via email to