[PATCH] i386/cpu: Don't emulate L3 cache on 8000_001D if l3-cache is disabled

2023-05-31 Thread Yanan Wang via
Currently, we only avoid emulating L3 cache properties for AMD CPU
when l3-cache is off, but failed to consider this case on CPUID 8000_001D.
This result in a fact that we will still have L3 caches in the VM
although we pass "host-cache-info=off,l3-cache=off" CLI to qemu.

Fixes: 8f4202fb108 ("i386: Populate AMD Processor Cache Information for cpuid 
0x801D")
Signed-off-by: Yanan Wang 
---
 target/i386/cpu.c | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index 1242bd541a..17c367c5ba 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -6337,8 +6337,10 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, 
uint32_t count,
_info, eax, ebx, ecx, edx);
 break;
 case 3: /* L3 cache info */
-encode_cache_cpuid801d(env->cache_info_amd.l3_cache,
-   _info, eax, ebx, ecx, edx);
+if (cpu->enable_l3_cache) {
+encode_cache_cpuid801d(env->cache_info_amd.l3_cache,
+   _info, eax, ebx, ecx, edx);
+}
 break;
 default: /* end of info */
 *eax = *ebx = *ecx = *edx = 0;
-- 
2.33.0




[PATCH] softmmu/device_tree: Remove redundant pointer assignment

2022-01-10 Thread Yanan Wang via
The pointer assignment "const char *p = path;" in function
qemu_fdt_add_path is unnecessary. Let's remove it and just
use the "path" passed in. No functional change.

Suggested-by: Richard Henderson 
Signed-off-by: Yanan Wang 
---
Based on: softmmu/device_tree: Silence compiler warning with --enable-sanitizers
https://patchew.org/QEMU/20220107133844.145039-1-th...@redhat.com/
---
 softmmu/device_tree.c | 9 -
 1 file changed, 4 insertions(+), 5 deletions(-)

diff --git a/softmmu/device_tree.c b/softmmu/device_tree.c
index 9e96f5ecd5..8897c79ea4 100644
--- a/softmmu/device_tree.c
+++ b/softmmu/device_tree.c
@@ -556,7 +556,6 @@ int qemu_fdt_add_subnode(void *fdt, const char *name)
 int qemu_fdt_add_path(void *fdt, const char *path)
 {
 const char *name;
-const char *p = path;
 int namelen, retval;
 int parent = 0;
 
@@ -565,9 +564,9 @@ int qemu_fdt_add_path(void *fdt, const char *path)
 }
 
 do {
-name = p + 1;
-p = strchr(name, '/');
-namelen = p != NULL ? p - name : strlen(name);
+name = path + 1;
+path = strchr(name, '/');
+namelen = path != NULL ? path - name : strlen(name);
 
 retval = fdt_subnode_offset_namelen(fdt, parent, name, namelen);
 if (retval < 0 && retval != -FDT_ERR_NOTFOUND) {
@@ -584,7 +583,7 @@ int qemu_fdt_add_path(void *fdt, const char *path)
 }
 
 parent = retval;
-} while (p);
+} while (path);
 
 return retval;
 }
-- 
2.27.0




[PATCH v7 3/6] hw/acpi/aml-build: Improve scalability of PPTT generation

2022-01-07 Thread Yanan Wang via
Use g_queue APIs to reduce the nested loops and code indentation
with the processor hierarchy levels increasing. Consenquently,
it's more scalable to add new topology level to build_pptt.

No functional change intended.

Signed-off-by: Yanan Wang 
Reviewed-by: Andrew Jones 
---
 hw/acpi/aml-build.c | 50 +
 1 file changed, 32 insertions(+), 18 deletions(-)

diff --git a/hw/acpi/aml-build.c b/hw/acpi/aml-build.c
index b3b3310df3..6aaedca2e5 100644
--- a/hw/acpi/aml-build.c
+++ b/hw/acpi/aml-build.c
@@ -2001,7 +2001,10 @@ static void build_processor_hierarchy_node(GArray *tbl, 
uint32_t flags,
 void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms,
 const char *oem_id, const char *oem_table_id)
 {
-int pptt_start = table_data->len;
+GQueue *list = g_queue_new();
+guint pptt_start = table_data->len;
+guint parent_offset;
+guint length, i;
 int uid = 0;
 int socket;
 AcpiTable table = { .sig = "PPTT", .rev = 2,
@@ -2010,9 +2013,8 @@ void build_pptt(GArray *table_data, BIOSLinker *linker, 
MachineState *ms,
 acpi_table_begin(, table_data);
 
 for (socket = 0; socket < ms->smp.sockets; socket++) {
-uint32_t socket_offset = table_data->len - pptt_start;
-int core;
-
+g_queue_push_tail(list,
+GUINT_TO_POINTER(table_data->len - pptt_start));
 build_processor_hierarchy_node(
 table_data,
 /*
@@ -2021,35 +2023,47 @@ void build_pptt(GArray *table_data, BIOSLinker *linker, 
MachineState *ms,
  */
 (1 << 0),
 0, socket, NULL, 0);
+}
 
-for (core = 0; core < ms->smp.cores; core++) {
-uint32_t core_offset = table_data->len - pptt_start;
-int thread;
+length = g_queue_get_length(list);
+for (i = 0; i < length; i++) {
+int core;
 
+parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
+for (core = 0; core < ms->smp.cores; core++) {
 if (ms->smp.threads > 1) {
+g_queue_push_tail(list,
+GUINT_TO_POINTER(table_data->len - pptt_start));
 build_processor_hierarchy_node(
 table_data,
 (0 << 0), /* not a physical package */
-socket_offset, core, NULL, 0);
-
-for (thread = 0; thread < ms->smp.threads; thread++) {
-build_processor_hierarchy_node(
-table_data,
-(1 << 1) | /* ACPI Processor ID valid */
-(1 << 2) | /* Processor is a Thread */
-(1 << 3),  /* Node is a Leaf */
-core_offset, uid++, NULL, 0);
-}
+parent_offset, core, NULL, 0);
 } else {
 build_processor_hierarchy_node(
 table_data,
 (1 << 1) | /* ACPI Processor ID valid */
 (1 << 3),  /* Node is a Leaf */
-socket_offset, uid++, NULL, 0);
+parent_offset, uid++, NULL, 0);
 }
 }
 }
 
+length = g_queue_get_length(list);
+for (i = 0; i < length; i++) {
+int thread;
+
+parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
+for (thread = 0; thread < ms->smp.threads; thread++) {
+build_processor_hierarchy_node(
+table_data,
+(1 << 1) | /* ACPI Processor ID valid */
+(1 << 2) | /* Processor is a Thread */
+(1 << 3),  /* Node is a Leaf */
+parent_offset, uid++, NULL, 0);
+}
+}
+
+g_queue_free(list);
 acpi_table_end(linker, );
 }
 
-- 
2.27.0




[PATCH v7 4/6] tests/acpi/bios-tables-test: Allow changes to virt/PPTT file

2022-01-07 Thread Yanan Wang via
List test/data/acpi/virt/PPTT as the expected files allowed to
be changed in tests/qtest/bios-tables-test-allowed-diff.h

Signed-off-by: Yanan Wang 
Acked-by: Ani Sinha 
---
 tests/qtest/bios-tables-test-allowed-diff.h | 1 +
 1 file changed, 1 insertion(+)

diff --git a/tests/qtest/bios-tables-test-allowed-diff.h 
b/tests/qtest/bios-tables-test-allowed-diff.h
index dfb8523c8b..cb143a55a6 100644
--- a/tests/qtest/bios-tables-test-allowed-diff.h
+++ b/tests/qtest/bios-tables-test-allowed-diff.h
@@ -1 +1,2 @@
 /* List of comma-separated changed AML files to ignore */
+"tests/data/acpi/virt/PPTT",
-- 
2.27.0




[PATCH v7 6/6] tests/acpi/bios-table-test: Update expected virt/PPTT file

2022-01-07 Thread Yanan Wang via
Run ./tests/data/acpi/rebuild-expected-aml.sh from build directory
to update PPTT binary. Also empty bios-tables-test-allowed-diff.h.

The disassembled differences between actual and expected PPTT:

 /*
  * Intel ACPI Component Architecture
  * AML/ASL+ Disassembler version 20200528 (64-bit version)
  * Copyright (c) 2000 - 2020 Intel Corporation
  *
- * Disassembly of tests/data/acpi/virt/PPTT, Tue Jan  4 12:51:11 2022
+ * Disassembly of /tmp/aml-2ZGOF1, Tue Jan  4 12:51:11 2022
  *
  * ACPI Data Table [PPTT]
  *
  * Format: [HexOffset DecimalOffset ByteLength]  FieldName : FieldValue
  */

 [000h    4]Signature : "PPTT"[Processor Properties 
Topology Table]
-[004h 0004   4] Table Length : 004C
+[004h 0004   4] Table Length : 0060
 [008h 0008   1] Revision : 02
-[009h 0009   1] Checksum : A8
+[009h 0009   1] Checksum : 48
 [00Ah 0010   6]   Oem ID : "BOCHS "
 [010h 0016   8] Oem Table ID : "BXPC"
 [018h 0024   4] Oem Revision : 0001
 [01Ch 0028   4]  Asl Compiler ID : "BXPC"
 [020h 0032   4]Asl Compiler Revision : 0001

 [024h 0036   1]Subtable Type : 00 [Processor Hierarchy Node]
 [025h 0037   1]   Length : 14
 [026h 0038   2] Reserved : 
 [028h 0040   4]Flags (decoded below) : 0001
 Physical package : 1
  ACPI Processor ID valid : 0
Processor is a thread : 0
   Node is a leaf : 0
 Identical Implementation : 0
 [02Ch 0044   4]   Parent : 
 [030h 0048   4]ACPI Processor ID : 
 [034h 0052   4]  Private Resource Number : 

 [038h 0056   1]Subtable Type : 00 [Processor Hierarchy Node]
 [039h 0057   1]   Length : 14
 [03Ah 0058   2] Reserved : 
-[03Ch 0060   4]Flags (decoded below) : 000A
+[03Ch 0060   4]Flags (decoded below) : 
 Physical package : 0
- ACPI Processor ID valid : 1
+ ACPI Processor ID valid : 0
Processor is a thread : 0
-  Node is a leaf : 1
+  Node is a leaf : 0
 Identical Implementation : 0
 [040h 0064   4]   Parent : 0024
 [044h 0068   4]ACPI Processor ID : 
 [048h 0072   4]  Private Resource Number : 

-Raw Table Data: Length 76 (0x4C)
+[04Ch 0076   1]Subtable Type : 00 [Processor Hierarchy Node]
+[04Dh 0077   1]   Length : 14
+[04Eh 0078   2] Reserved : 
+[050h 0080   4]Flags (decoded below) : 000A
+Physical package : 0
+ ACPI Processor ID valid : 1
+   Processor is a thread : 0
+  Node is a leaf : 1
+Identical Implementation : 0
+[054h 0084   4]   Parent : 0038
+[058h 0088   4]ACPI Processor ID : 
+[05Ch 0092   4]  Private Resource Number : 
+
+Raw Table Data: Length 96 (0x60)

-: 50 50 54 54 4C 00 00 00 02 A8 42 4F 43 48 53 20  // PPTTL.BOCHS
+: 50 50 54 54 60 00 00 00 02 48 42 4F 43 48 53 20  // PPTT`HBOCHS
 0010: 42 58 50 43 20 20 20 20 01 00 00 00 42 58 50 43  // BXPCBXPC
 0020: 01 00 00 00 00 14 00 00 01 00 00 00 00 00 00 00  // 
-0030: 00 00 00 00 00 00 00 00 00 14 00 00 0A 00 00 00  // 
-0040: 24 00 00 00 00 00 00 00 00 00 00 00  // $...
+0030: 00 00 00 00 00 00 00 00 00 14 00 00 00 00 00 00  // 
+0040: 24 00 00 00 00 00 00 00 00 00 00 00 00 14 00 00  // $...
+0050: 0A 00 00 00 38 00 00 00 00 00 00 00 00 00 00 00  // 8...

Signed-off-by: Yanan Wang 
Reviewed-by: Ani Sinha 
---
 tests/data/acpi/virt/PPTT   | Bin 76 -> 96 bytes
 tests/qtest/bios-tables-test-allowed-diff.h |   1 -
 2 files changed, 1 deletion(-)

diff --git a/tests/data/acpi/virt/PPTT b/tests/data/acpi/virt/PPTT
index 
7a1258ecf123555b24462c98ccbb76b4ac1d0c2b..f56ea63b369a604877374ad696c396e796ab1c83
 100644
GIT binary patch
delta 53
zcmV-50LuSNU

[PATCH v7 1/6] hw/arm/virt: Support CPU cluster on ARM virt machine

2022-01-07 Thread Yanan Wang via
ARM64 machines like Kunpeng Family Server Chips have a level
of hardware topology in which a group of CPU cores share L3
cache tag or L2 cache. For example, Kunpeng 920 typically
has 6 or 8 clusters in each NUMA node (also represent range
of CPU die), and each cluster has 4 CPU cores. All clusters
share L3 cache data, but CPU cores in each cluster share a
local L3 tag.

Running a guest kernel with Cluster-Aware Scheduling on the
Hosts which have physical clusters, if we can design a vCPU
topology with cluster level for guest kernel and then have
a dedicated vCPU pinning, the guest will gain scheduling
performance improvement from cache affinity of CPU cluster.

So let's enable the support for this new parameter on ARM
virt machines. After this patch, we can define a 4-level
CPU hierarchy like: cpus=*,maxcpus=*,sockets=*,clusters=*,
cores=*,threads=*.

Signed-off-by: Yanan Wang 
Reviewed-by: Andrew Jones 
---
 hw/arm/virt.c   |  1 +
 qemu-options.hx | 10 ++
 2 files changed, 11 insertions(+)

diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index 6bce595aba..f413e146d9 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -2700,6 +2700,7 @@ static void virt_machine_class_init(ObjectClass *oc, void 
*data)
 hc->unplug_request = virt_machine_device_unplug_request_cb;
 hc->unplug = virt_machine_device_unplug_cb;
 mc->nvdimm_supported = true;
+mc->smp_props.clusters_supported = true;
 mc->auto_enable_numa_with_memhp = true;
 mc->auto_enable_numa_with_memdev = true;
 mc->default_ram_id = "mach-virt.ram";
diff --git a/qemu-options.hx b/qemu-options.hx
index fd1f8135fb..69ef1cdb85 100644
--- a/qemu-options.hx
+++ b/qemu-options.hx
@@ -277,6 +277,16 @@ SRST
 
 -smp 16,sockets=2,dies=2,cores=2,threads=2,maxcpus=16
 
+The following sub-option defines a CPU topology hierarchy (2 sockets
+totally on the machine, 2 clusters per socket, 2 cores per cluster,
+2 threads per core) for ARM virt machines which support sockets/clusters
+/cores/threads. Some members of the option can be omitted but their values
+will be automatically computed:
+
+::
+
+-smp 16,sockets=2,clusters=2,cores=2,threads=2,maxcpus=16
+
 Historically preference was given to the coarsest topology parameters
 when computing missing values (ie sockets preferred over cores, which
 were preferred over threads), however, this behaviour is considered
-- 
2.27.0




[PATCH v7 5/6] hw/acpi/aml-build: Support cluster level in PPTT generation

2022-01-07 Thread Yanan Wang via
Support CPU cluster topology level in generation of ACPI
Processor Properties Topology Table (PPTT).

Signed-off-by: Yanan Wang 
Reviewed-by: Andrew Jones 
---
 hw/acpi/aml-build.c | 18 ++
 1 file changed, 18 insertions(+)

diff --git a/hw/acpi/aml-build.c b/hw/acpi/aml-build.c
index 6aaedca2e5..bb2cad63b5 100644
--- a/hw/acpi/aml-build.c
+++ b/hw/acpi/aml-build.c
@@ -2001,6 +2001,7 @@ static void build_processor_hierarchy_node(GArray *tbl, 
uint32_t flags,
 void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms,
 const char *oem_id, const char *oem_table_id)
 {
+MachineClass *mc = MACHINE_GET_CLASS(ms);
 GQueue *list = g_queue_new();
 guint pptt_start = table_data->len;
 guint parent_offset;
@@ -2025,6 +2026,23 @@ void build_pptt(GArray *table_data, BIOSLinker *linker, 
MachineState *ms,
 0, socket, NULL, 0);
 }
 
+if (mc->smp_props.clusters_supported) {
+length = g_queue_get_length(list);
+for (i = 0; i < length; i++) {
+int cluster;
+
+parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
+for (cluster = 0; cluster < ms->smp.clusters; cluster++) {
+g_queue_push_tail(list,
+GUINT_TO_POINTER(table_data->len - pptt_start));
+build_processor_hierarchy_node(
+table_data,
+(0 << 0), /* not a physical package */
+parent_offset, cluster, NULL, 0);
+}
+}
+}
+
 length = g_queue_get_length(list);
 for (i = 0; i < length; i++) {
 int core;
-- 
2.27.0




[PATCH v7 2/6] hw/arm/virt: Support cluster level in DT cpu-map

2022-01-07 Thread Yanan Wang via
Support one cluster level between core and physical package in the
cpu-map of Arm/virt devicetree. This is also consistent with Linux
Doc "Documentation/devicetree/bindings/cpu/cpu-topology.txt".

Signed-off-by: Yanan Wang 
Reviewed-by: Andrew Jones 
---
 hw/arm/virt.c | 15 ---
 1 file changed, 8 insertions(+), 7 deletions(-)

diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index f413e146d9..fc5eea8c8c 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -430,9 +430,8 @@ static void fdt_add_cpu_nodes(const VirtMachineState *vms)
  * can contain several layers of clustering within a single physical
  * package and cluster nodes can be contained in parent cluster nodes.
  *
- * Given that cluster is not yet supported in the vCPU topology,
- * we currently generate one cluster node within each socket node
- * by default.
+ * Note: currently we only support one layer of clustering within
+ * each physical package.
  */
 qemu_fdt_add_subnode(ms->fdt, "/cpus/cpu-map");
 
@@ -442,14 +441,16 @@ static void fdt_add_cpu_nodes(const VirtMachineState *vms)
 
 if (ms->smp.threads > 1) {
 map_path = g_strdup_printf(
-"/cpus/cpu-map/socket%d/cluster0/core%d/thread%d",
-cpu / (ms->smp.cores * ms->smp.threads),
+"/cpus/cpu-map/socket%d/cluster%d/core%d/thread%d",
+cpu / (ms->smp.clusters * ms->smp.cores * ms->smp.threads),
+(cpu / (ms->smp.cores * ms->smp.threads)) % 
ms->smp.clusters,
 (cpu / ms->smp.threads) % ms->smp.cores,
 cpu % ms->smp.threads);
 } else {
 map_path = g_strdup_printf(
-"/cpus/cpu-map/socket%d/cluster0/core%d",
-cpu / ms->smp.cores,
+"/cpus/cpu-map/socket%d/cluster%d/core%d",
+cpu / (ms->smp.clusters * ms->smp.cores),
+(cpu / ms->smp.cores) % ms->smp.clusters,
 cpu % ms->smp.cores);
 }
 qemu_fdt_add_path(ms->fdt, map_path);
-- 
2.27.0




[PATCH v7 0/6] ARM virt: Support CPU cluster topology

2022-01-07 Thread Yanan Wang via
This v7 series enables the support for CPU cluster topology on
ARM virt machines. The generic infrastructure for CPU cluster
parameter has been in upstream.

Background and descriptions:
The new Cluster-Aware Scheduling support has landed in Linux 5.16,
which has been proved to benefit the scheduling performance (e.g.
load balance and wake_affine strategy) for both x86_64 and AArch64.
We can see the PR [1] or the actual patch series [2] for reference.

So since Linux 5.16 we have four-level arch-neutral CPU topology
definition like below and a new scheduler level for clusters.
struct cpu_topology {
int thread_id;
int core_id;
int cluster_id;
int package_id;
int llc_id;
cpumask_t thread_sibling;
cpumask_t core_sibling;
cpumask_t cluster_sibling;
cpumask_t llc_sibling;
}

A cluster generally means a group of CPU cores which share L2 cache
or other mid-level resources, and it is the shared resources that
is used to improve scheduler's behavior. From the point of view of
the size range, it's between CPU die and CPU core. For example, on
some ARM64 Kunpeng servers, we have 6 clusters in each NUMA node,
and 4 CPU cores in each cluster. The 4 CPU cores share a separate
L2 cache and a L3 cache tag, which brings cache affinity advantage.

[1] 
https://lore.kernel.org/lkml/163572864855.3357115.17938524897008353101.tglx@xen13/
[2] https://lkml.org/lkml/2021/9/24/178

In virtualization, on the Hosts which have physical clusters, if we
can design a vCPU topology with cluster level for guest kernel and
have a dedicated vCPU pinning. A Cluster-Aware Guest kernel can also
make use of the cache affinity of CPU clusters to gain similar
scheduling performance. This series only enables clusters support
in the vCPU topology on ARM virt machines. We can also enable it
for other machine types in the future if needed.

The patches in this series do:
- Enable CPU cluster support on ARM virt machines, so that users
  can specify a 4-level CPU hierarchy sockets/clusters/cores/threads.
  And the 4-level topology will be described to guest kernel through
  ACPI PPTT and DT cpu-map.

Changelog:
v6->v7:
- dropped the introduced ARM specific build_pptt() and extend the generic one
- added A-bs/R-bs from Andrew and Ani, thanks.
- v6: https://patchew.org/QEMU/20220103084636.2496-1-wangyana...@huawei.com/

v5->v6:
- dropped the generic part which is in upstream now
- rebased on latest master
- v5: https://patchew.org/QEMU/20211228092221.21068-1-wangyana...@huawei.com/

Yanan Wang (6):
  hw/arm/virt: Support CPU cluster on ARM virt machine
  hw/arm/virt: Support cluster level in DT cpu-map
  hw/acpi/aml-build: Improve scalability of PPTT generation
  tests/acpi/bios-tables-test: Allow changes to virt/PPTT file
  hw/acpi/aml-build: Support cluster level in PPTT generation
  tests/acpi/bios-table-test: Update expected virt/PPTT file

 hw/acpi/aml-build.c   |  68 --
 hw/arm/virt.c |  16 +
 qemu-options.hx   |  10 ++
 tests/data/acpi/virt/PPTT | Bin 76 -> 96 bytes
 4 files changed, 69 insertions(+), 25 deletions(-)

-- 
2.27.0




[PATCH v6 6/7] hw/arm/virt-acpi-build: Support cluster level in PPTT generation

2022-01-03 Thread Yanan Wang via
Support cluster level in generation of ACPI Processor Properties
Topology Table (PPTT) for ARM virt machines.

Signed-off-by: Yanan Wang 
---
 hw/arm/virt-acpi-build.c | 15 +++
 1 file changed, 15 insertions(+)

diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c
index 3ce7680393..5f91969688 100644
--- a/hw/arm/virt-acpi-build.c
+++ b/hw/arm/virt-acpi-build.c
@@ -840,6 +840,21 @@ build_pptt(GArray *table_data, BIOSLinker *linker, 
VirtMachineState *vms)
 0, socket, NULL, 0);
 }
 
+length = g_queue_get_length(list);
+for (i = 0; i < length; i++) {
+int cluster;
+
+father_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
+for (cluster = 0; cluster < ms->smp.clusters; cluster++) {
+g_queue_push_tail(list,
+GUINT_TO_POINTER(table_data->len - pptt_start));
+build_processor_hierarchy_node(
+table_data,
+(0 << 0), /* not a physical package */
+father_offset, cluster, NULL, 0);
+}
+}
+
 length = g_queue_get_length(list);
 for (i = 0; i < length; i++) {
 int core;
-- 
2.27.0




[PATCH v6 5/7] tests/acpi/bios-tables-test: Allow changes to virt/PPTT file

2022-01-03 Thread Yanan Wang via
List test/data/acpi/virt/PPTT as the expected files allowed to
be changed in tests/qtest/bios-tables-test-allowed-diff.h

Signed-off-by: Yanan Wang 
---
 tests/qtest/bios-tables-test-allowed-diff.h | 1 +
 1 file changed, 1 insertion(+)

diff --git a/tests/qtest/bios-tables-test-allowed-diff.h 
b/tests/qtest/bios-tables-test-allowed-diff.h
index dfb8523c8b..cb143a55a6 100644
--- a/tests/qtest/bios-tables-test-allowed-diff.h
+++ b/tests/qtest/bios-tables-test-allowed-diff.h
@@ -1 +1,2 @@
 /* List of comma-separated changed AML files to ignore */
+"tests/data/acpi/virt/PPTT",
-- 
2.27.0




[PATCH v6 7/7] tests/acpi/bios-table-test: Update expected virt/PPTT file

2022-01-03 Thread Yanan Wang via
Run ./tests/data/acpi/rebuild-expected-aml.sh from build directory
to update PPTT binary. Also empty bios-tables-test-allowed-diff.h.

The disassembled differences between actual and expected PPTT:

 /*
  * Intel ACPI Component Architecture
  * AML/ASL+ Disassembler version 20180810 (64-bit version)
  * Copyright (c) 2000 - 2018 Intel Corporation
  *
- * Disassembly of tests/data/acpi/virt/PPTT, Mon Oct 25 20:24:53 2021
+ * Disassembly of /tmp/aml-BPI5B1, Mon Oct 25 20:24:53 2021
  *
  * ACPI Data Table [PPTT]
  *
  * Format: [HexOffset DecimalOffset ByteLength]  FieldName : FieldValue
  */

 [000h    4]Signature : "PPTT"[Processor Properties 
Topology Table]
-[004h 0004   4] Table Length : 004C
+[004h 0004   4] Table Length : 0060
 [008h 0008   1] Revision : 02
-[009h 0009   1] Checksum : A8
+[009h 0009   1] Checksum : 48
 [00Ah 0010   6]   Oem ID : "BOCHS "
 [010h 0016   8] Oem Table ID : "BXPC"
 [018h 0024   4] Oem Revision : 0001
 [01Ch 0028   4]  Asl Compiler ID : "BXPC"
 [020h 0032   4]Asl Compiler Revision : 0001

 [024h 0036   1]Subtable Type : 00 [Processor Hierarchy Node]
 [025h 0037   1]   Length : 14
 [026h 0038   2] Reserved : 
 [028h 0040   4]Flags (decoded below) : 0001
 Physical package : 1
  ACPI Processor ID valid : 0
 [02Ch 0044   4]   Parent : 
 [030h 0048   4]ACPI Processor ID : 
 [034h 0052   4]  Private Resource Number : 

 [038h 0056   1]Subtable Type : 00 [Processor Hierarchy Node]
 [039h 0057   1]   Length : 14
 [03Ah 0058   2] Reserved : 
-[03Ch 0060   4]Flags (decoded below) : 000A
+[03Ch 0060   4]Flags (decoded below) : 
 Physical package : 0
- ACPI Processor ID valid : 1
+ ACPI Processor ID valid : 0
 [040h 0064   4]   Parent : 0024
 [044h 0068   4]ACPI Processor ID : 
 [048h 0072   4]  Private Resource Number : 

-Raw Table Data: Length 76 (0x4C)
+[04Ch 0076   1]Subtable Type : 00 [Processor Hierarchy Node]
+[04Dh 0077   1]   Length : 14
+[04Eh 0078   2] Reserved : 
+[050h 0080   4]Flags (decoded below) : 000A
+Physical package : 0
+ ACPI Processor ID valid : 1
+[054h 0084   4]   Parent : 0038
+[058h 0088   4]ACPI Processor ID : 
+[05Ch 0092   4]  Private Resource Number : 
+
+Raw Table Data: Length 96 (0x60)

-: 50 50 54 54 4C 00 00 00 02 A8 42 4F 43 48 53 20  // PPTTL.BOCHS
+: 50 50 54 54 60 00 00 00 02 48 42 4F 43 48 53 20  // PPTT`HBOCHS
 0010: 42 58 50 43 20 20 20 20 01 00 00 00 42 58 50 43  // BXPCBXPC
 0020: 01 00 00 00 00 14 00 00 01 00 00 00 00 00 00 00  // 
-0030: 00 00 00 00 00 00 00 00 00 14 00 00 0A 00 00 00  // 
-0040: 24 00 00 00 00 00 00 00 00 00 00 00  // $...
+0030: 00 00 00 00 00 00 00 00 00 14 00 00 00 00 00 00  // 
+0040: 24 00 00 00 00 00 00 00 00 00 00 00 00 14 00 00  // $...
+0050: 0A 00 00 00 38 00 00 00 00 00 00 00 00 00 00 00  // 8...

Signed-off-by: Yanan Wang 
---
 tests/data/acpi/virt/PPTT   | Bin 76 -> 96 bytes
 tests/qtest/bios-tables-test-allowed-diff.h |   1 -
 2 files changed, 1 deletion(-)

diff --git a/tests/data/acpi/virt/PPTT b/tests/data/acpi/virt/PPTT
index 
7a1258ecf123555b24462c98ccbb76b4ac1d0c2b..f56ea63b369a604877374ad696c396e796ab1c83
 100644
GIT binary patch
delta 53
zcmV-50LuSNU

[PATCH v6 2/7] hw/arm/virt: Support cluster level in DT cpu-map

2022-01-03 Thread Yanan Wang via
Support one cluster level between core and physical package in the
cpu-map of Arm/virt devicetree. This is also consistent with Linux
Doc "Documentation/devicetree/bindings/cpu/cpu-topology.txt".

Signed-off-by: Yanan Wang 
---
 hw/arm/virt.c | 15 ---
 1 file changed, 8 insertions(+), 7 deletions(-)

diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index f413e146d9..fc5eea8c8c 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -430,9 +430,8 @@ static void fdt_add_cpu_nodes(const VirtMachineState *vms)
  * can contain several layers of clustering within a single physical
  * package and cluster nodes can be contained in parent cluster nodes.
  *
- * Given that cluster is not yet supported in the vCPU topology,
- * we currently generate one cluster node within each socket node
- * by default.
+ * Note: currently we only support one layer of clustering within
+ * each physical package.
  */
 qemu_fdt_add_subnode(ms->fdt, "/cpus/cpu-map");
 
@@ -442,14 +441,16 @@ static void fdt_add_cpu_nodes(const VirtMachineState *vms)
 
 if (ms->smp.threads > 1) {
 map_path = g_strdup_printf(
-"/cpus/cpu-map/socket%d/cluster0/core%d/thread%d",
-cpu / (ms->smp.cores * ms->smp.threads),
+"/cpus/cpu-map/socket%d/cluster%d/core%d/thread%d",
+cpu / (ms->smp.clusters * ms->smp.cores * ms->smp.threads),
+(cpu / (ms->smp.cores * ms->smp.threads)) % 
ms->smp.clusters,
 (cpu / ms->smp.threads) % ms->smp.cores,
 cpu % ms->smp.threads);
 } else {
 map_path = g_strdup_printf(
-"/cpus/cpu-map/socket%d/cluster0/core%d",
-cpu / ms->smp.cores,
+"/cpus/cpu-map/socket%d/cluster%d/core%d",
+cpu / (ms->smp.clusters * ms->smp.cores),
+(cpu / ms->smp.cores) % ms->smp.clusters,
 cpu % ms->smp.cores);
 }
 qemu_fdt_add_path(ms->fdt, map_path);
-- 
2.27.0




[PATCH v6 3/7] hw/acpi/aml-build: Improve scalability of PPTT generation

2022-01-03 Thread Yanan Wang via
Currently we generate a PPTT table of n-level processor hierarchy
with n-level loops in build_pptt(). It works fine as now there are
only three CPU topology parameters. But the code may become less
scalable with the processor hierarchy levels increasing.

This patch only improves the scalability of build_pptt by reducing
the loops, and intends to make no functional change.

Signed-off-by: Yanan Wang 
---
 hw/acpi/aml-build.c | 50 +
 1 file changed, 32 insertions(+), 18 deletions(-)

diff --git a/hw/acpi/aml-build.c b/hw/acpi/aml-build.c
index b3b3310df3..be3851be36 100644
--- a/hw/acpi/aml-build.c
+++ b/hw/acpi/aml-build.c
@@ -2001,7 +2001,10 @@ static void build_processor_hierarchy_node(GArray *tbl, 
uint32_t flags,
 void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms,
 const char *oem_id, const char *oem_table_id)
 {
-int pptt_start = table_data->len;
+GQueue *list = g_queue_new();
+guint pptt_start = table_data->len;
+guint father_offset;
+guint length, i;
 int uid = 0;
 int socket;
 AcpiTable table = { .sig = "PPTT", .rev = 2,
@@ -2010,9 +2013,8 @@ void build_pptt(GArray *table_data, BIOSLinker *linker, 
MachineState *ms,
 acpi_table_begin(, table_data);
 
 for (socket = 0; socket < ms->smp.sockets; socket++) {
-uint32_t socket_offset = table_data->len - pptt_start;
-int core;
-
+g_queue_push_tail(list,
+GUINT_TO_POINTER(table_data->len - pptt_start));
 build_processor_hierarchy_node(
 table_data,
 /*
@@ -2021,35 +2023,47 @@ void build_pptt(GArray *table_data, BIOSLinker *linker, 
MachineState *ms,
  */
 (1 << 0),
 0, socket, NULL, 0);
+}
 
-for (core = 0; core < ms->smp.cores; core++) {
-uint32_t core_offset = table_data->len - pptt_start;
-int thread;
+length = g_queue_get_length(list);
+for (i = 0; i < length; i++) {
+int core;
 
+father_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
+for (core = 0; core < ms->smp.cores; core++) {
 if (ms->smp.threads > 1) {
+g_queue_push_tail(list,
+GUINT_TO_POINTER(table_data->len - pptt_start));
 build_processor_hierarchy_node(
 table_data,
 (0 << 0), /* not a physical package */
-socket_offset, core, NULL, 0);
-
-for (thread = 0; thread < ms->smp.threads; thread++) {
-build_processor_hierarchy_node(
-table_data,
-(1 << 1) | /* ACPI Processor ID valid */
-(1 << 2) | /* Processor is a Thread */
-(1 << 3),  /* Node is a Leaf */
-core_offset, uid++, NULL, 0);
-}
+father_offset, core, NULL, 0);
 } else {
 build_processor_hierarchy_node(
 table_data,
 (1 << 1) | /* ACPI Processor ID valid */
 (1 << 3),  /* Node is a Leaf */
-socket_offset, uid++, NULL, 0);
+father_offset, uid++, NULL, 0);
 }
 }
 }
 
+length = g_queue_get_length(list);
+for (i = 0; i < length; i++) {
+int thread;
+
+father_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
+for (thread = 0; thread < ms->smp.threads; thread++) {
+build_processor_hierarchy_node(
+table_data,
+(1 << 1) | /* ACPI Processor ID valid */
+(1 << 2) | /* Processor is a Thread */
+(1 << 3),  /* Node is a Leaf */
+father_offset, uid++, NULL, 0);
+}
+}
+
+g_queue_free(list);
 acpi_table_end(linker, );
 }
 
-- 
2.27.0




[PATCH v6 0/7] ARM virt: Support CPU cluster topology

2022-01-03 Thread Yanan Wang via
This v6 series enables the support for CPU cluster topology on
ARM virt machines. The generic infrastructure for CPU cluster
parameter now is in upstream.

Background and descriptions:
The new Cluster-Aware Scheduling support has landed in Linux 5.16,
which has been proved to benefit the scheduling performance (e.g.
load balance and wake_affine strategy) for both x86_64 and AArch64.
We can see the PR [1] or the actual patch series [2] for reference.

So since Linux 5.16 we have four-level arch-neutral CPU topology
definition like below and a new scheduler level for clusters.
struct cpu_topology {
int thread_id;
int core_id;
int cluster_id;
int package_id;
int llc_id;
cpumask_t thread_sibling;
cpumask_t core_sibling;
cpumask_t cluster_sibling;
cpumask_t llc_sibling;
}

A cluster generally means a group of CPU cores which share L2 cache
or other mid-level resources, and it is the shared resources that
is used to improve scheduler's behavior. From the point of view of
the size range, it's between CPU die and CPU core. For example, on
some ARM64 Kunpeng servers, we have 6 clusters in each NUMA node,
and 4 CPU cores in each cluster. The 4 CPU cores share a separate
L2 cache and a L3 cache tag, which brings cache affinity advantage.

[1] 
https://lore.kernel.org/lkml/163572864855.3357115.17938524897008353101.tglx@xen13/
[2] https://lkml.org/lkml/2021/9/24/178

In virtualization, on the Hosts which have physical clusters, if we
can design a vCPU topology with cluster level for guest kernel and
have a dedicated vCPU pinning. A Cluster-Aware Guest kernel can also
make use of the cache affinity of CPU clusters to gain similar
scheduling performance. This series only enables clusters support
in the vCPU topology on ARM virt machines. We can also enable it
for other machine types in the future if needed.

The patches in this series do:
- Enable CPU cluster support on ARM virt machines, so that users
  can specify a 4-level CPU hierarchy sockets/clusters/cores/threads.
  And the 4-level topology will be described to guest kernel through
  ACPI PPTT and DT cpu-map.

Changelog:
v5->v6:
- drop the generic part which is in upstream now
- rebased on latest master
- v5: https://patchew.org/QEMU/20211228092221.21068-1-wangyana...@huawei.com/

Yanan Wang (7):
  hw/arm/virt: Support CPU cluster on ARM virt machine
  hw/arm/virt: Support cluster level in DT cpu-map
  hw/acpi/aml-build: Improve scalability of PPTT generation
  hw/arm/virt-acpi-build: Make an ARM specific PPTT generator
  tests/acpi/bios-tables-test: Allow changes to virt/PPTT file
  hw/arm/virt-acpi-build: Support cluster level in PPTT generation
  tests/acpi/bios-table-test: Update expected virt/PPTT file

 hw/acpi/aml-build.c |  66 ++
 hw/arm/virt-acpi-build.c|  92 +++-
 hw/arm/virt.c   |  16 ---
 include/hw/acpi/aml-build.h |   5 +-
 qemu-options.hx |  10 
 tests/data/acpi/virt/PPTT   | Bin 76 -> 96 bytes
 6 files changed, 115 insertions(+), 74 deletions(-)

--
2.27.0




[PATCH v6 4/7] hw/arm/virt-acpi-build: Make an ARM specific PPTT generator

2022-01-03 Thread Yanan Wang via
We have a generic build_pptt() in hw/acpi/aml-build.c but it's
currently only used in ARM acpi initialization. Now we are going
to support the new CPU cluster parameter which is currently only
supported by ARM, it won't be a very good idea to add it to the
generic build_pptt() as it will make the code complex and hard
to maintain especially when we also support CPU cache topology
hierarchy in build_pptt() too. Note that the cache topology
design also varies between different CPU targets.

So an ARM specific PPTT generator becomes necessary now. Given
that the generic one is currently only used by ARM, let's just
move build_pptt() from aml-build.c to virt-acpi-build.c with
minor update.

Signed-off-by: Yanan Wang 
---
 hw/acpi/aml-build.c | 80 ++---
 hw/arm/virt-acpi-build.c| 77 ++-
 include/hw/acpi/aml-build.h |  5 ++-
 3 files changed, 81 insertions(+), 81 deletions(-)

diff --git a/hw/acpi/aml-build.c b/hw/acpi/aml-build.c
index be3851be36..040fbc9b4b 100644
--- a/hw/acpi/aml-build.c
+++ b/hw/acpi/aml-build.c
@@ -1968,10 +1968,9 @@ void build_slit(GArray *table_data, BIOSLinker *linker, 
MachineState *ms,
  * ACPI spec, Revision 6.3
  * 5.2.29.1 Processor hierarchy node structure (Type 0)
  */
-static void build_processor_hierarchy_node(GArray *tbl, uint32_t flags,
-   uint32_t parent, uint32_t id,
-   uint32_t *priv_rsrc,
-   uint32_t priv_num)
+void build_processor_hierarchy_node(GArray *tbl, uint32_t flags,
+uint32_t parent, uint32_t id,
+uint32_t *priv_rsrc, uint32_t priv_num)
 {
 int i;
 
@@ -1994,79 +1993,6 @@ static void build_processor_hierarchy_node(GArray *tbl, 
uint32_t flags,
 }
 }
 
-/*
- * ACPI spec, Revision 6.3
- * 5.2.29 Processor Properties Topology Table (PPTT)
- */
-void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms,
-const char *oem_id, const char *oem_table_id)
-{
-GQueue *list = g_queue_new();
-guint pptt_start = table_data->len;
-guint father_offset;
-guint length, i;
-int uid = 0;
-int socket;
-AcpiTable table = { .sig = "PPTT", .rev = 2,
-.oem_id = oem_id, .oem_table_id = oem_table_id };
-
-acpi_table_begin(, table_data);
-
-for (socket = 0; socket < ms->smp.sockets; socket++) {
-g_queue_push_tail(list,
-GUINT_TO_POINTER(table_data->len - pptt_start));
-build_processor_hierarchy_node(
-table_data,
-/*
- * Physical package - represents the boundary
- * of a physical package
- */
-(1 << 0),
-0, socket, NULL, 0);
-}
-
-length = g_queue_get_length(list);
-for (i = 0; i < length; i++) {
-int core;
-
-father_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
-for (core = 0; core < ms->smp.cores; core++) {
-if (ms->smp.threads > 1) {
-g_queue_push_tail(list,
-GUINT_TO_POINTER(table_data->len - pptt_start));
-build_processor_hierarchy_node(
-table_data,
-(0 << 0), /* not a physical package */
-father_offset, core, NULL, 0);
-} else {
-build_processor_hierarchy_node(
-table_data,
-(1 << 1) | /* ACPI Processor ID valid */
-(1 << 3),  /* Node is a Leaf */
-father_offset, uid++, NULL, 0);
-}
-}
-}
-
-length = g_queue_get_length(list);
-for (i = 0; i < length; i++) {
-int thread;
-
-father_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
-for (thread = 0; thread < ms->smp.threads; thread++) {
-build_processor_hierarchy_node(
-table_data,
-(1 << 1) | /* ACPI Processor ID valid */
-(1 << 2) | /* Processor is a Thread */
-(1 << 3),  /* Node is a Leaf */
-father_offset, uid++, NULL, 0);
-}
-}
-
-g_queue_free(list);
-acpi_table_end(linker, );
-}
-
 /* build rev1/rev3/rev5.1 FADT */
 void build_fadt(GArray *tbl, BIOSLinker *linker, const AcpiFadtData *f,
 const char *oem_id, const char *oem_table_id)
diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c
index d0f4867fdf..3ce7680393 100644
--- a/hw/arm/virt-acpi-build.c
+++ b/hw/arm/virt-acpi-build.c
@@ -808,6 +808,80 @@ build_madt(GArray *table_data, BIOSLinker *linker, 
VirtMachineState *vms)
 acpi_table_end(linker, );
 }
 
+/*
+ * ACPI spec, Revision 6.3
+ * 5.2.29 Processor Properties Topology Table (PPTT)
+ */
+static void
+build_pptt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
+{
+

[PATCH v6 1/7] hw/arm/virt: Support CPU cluster on ARM virt machine

2022-01-03 Thread Yanan Wang via
ARM64 machines like Kunpeng Family Server Chips have a level
of hardware topology in which a group of CPU cores share L3
cache tag or L2 cache. For example, Kunpeng 920 typically
has 6 or 8 clusters in each NUMA node (also represent range
of CPU die), and each cluster has 4 CPU cores. All clusters
share L3 cache data, but CPU cores in each cluster share a
local L3 tag.

Running a guest kernel with Cluster-Aware Scheduling on the
Hosts which have physical clusters, if we can design a vCPU
topology with cluster level for guest kernel and then have
a dedicated vCPU pinning, the guest will gain scheduling
performance improvement from cache affinity of CPU cluster.

So let's enable the support for this new parameter on ARM
virt machines. After this patch, we can define a 4-level
CPU hierarchy like: cpus=*,maxcpus=*,sockets=*,clusters=*,
cores=*,threads=*.

Signed-off-by: Yanan Wang 
---
 hw/arm/virt.c   |  1 +
 qemu-options.hx | 10 ++
 2 files changed, 11 insertions(+)

diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index 6bce595aba..f413e146d9 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -2700,6 +2700,7 @@ static void virt_machine_class_init(ObjectClass *oc, void 
*data)
 hc->unplug_request = virt_machine_device_unplug_request_cb;
 hc->unplug = virt_machine_device_unplug_cb;
 mc->nvdimm_supported = true;
+mc->smp_props.clusters_supported = true;
 mc->auto_enable_numa_with_memhp = true;
 mc->auto_enable_numa_with_memdev = true;
 mc->default_ram_id = "mach-virt.ram";
diff --git a/qemu-options.hx b/qemu-options.hx
index fd1f8135fb..69ef1cdb85 100644
--- a/qemu-options.hx
+++ b/qemu-options.hx
@@ -277,6 +277,16 @@ SRST
 
 -smp 16,sockets=2,dies=2,cores=2,threads=2,maxcpus=16
 
+The following sub-option defines a CPU topology hierarchy (2 sockets
+totally on the machine, 2 clusters per socket, 2 cores per cluster,
+2 threads per core) for ARM virt machines which support sockets/clusters
+/cores/threads. Some members of the option can be omitted but their values
+will be automatically computed:
+
+::
+
+-smp 16,sockets=2,clusters=2,cores=2,threads=2,maxcpus=16
+
 Historically preference was given to the coarsest topology parameters
 when computing missing values (ie sockets preferred over cores, which
 were preferred over threads), however, this behaviour is considered
-- 
2.27.0




[PATCH v5 12/14] tests/acpi/bios-tables-test: Allow changes to virt/PPTT file

2021-12-28 Thread Yanan Wang via
List test/data/acpi/virt/PPTT as the expected files allowed to
be changed in tests/qtest/bios-tables-test-allowed-diff.h

Signed-off-by: Yanan Wang 
---
 tests/qtest/bios-tables-test-allowed-diff.h | 1 +
 1 file changed, 1 insertion(+)

diff --git a/tests/qtest/bios-tables-test-allowed-diff.h 
b/tests/qtest/bios-tables-test-allowed-diff.h
index dfb8523c8b..cb143a55a6 100644
--- a/tests/qtest/bios-tables-test-allowed-diff.h
+++ b/tests/qtest/bios-tables-test-allowed-diff.h
@@ -1 +1,2 @@
 /* List of comma-separated changed AML files to ignore */
+"tests/data/acpi/virt/PPTT",
-- 
2.27.0




[PATCH v5 14/14] tests/acpi/bios-table-test: Update expected virt/PPTT file

2021-12-28 Thread Yanan Wang via
Run ./tests/data/acpi/rebuild-expected-aml.sh from build directory
to update PPTT binary. Also empty bios-tables-test-allowed-diff.h.

The disassembled differences between actual and expected PPTT:

 /*
  * Intel ACPI Component Architecture
  * AML/ASL+ Disassembler version 20180810 (64-bit version)
  * Copyright (c) 2000 - 2018 Intel Corporation
  *
- * Disassembly of tests/data/acpi/virt/PPTT, Mon Oct 25 20:24:53 2021
+ * Disassembly of /tmp/aml-BPI5B1, Mon Oct 25 20:24:53 2021
  *
  * ACPI Data Table [PPTT]
  *
  * Format: [HexOffset DecimalOffset ByteLength]  FieldName : FieldValue
  */

 [000h    4]Signature : "PPTT"[Processor Properties 
Topology Table]
-[004h 0004   4] Table Length : 004C
+[004h 0004   4] Table Length : 0060
 [008h 0008   1] Revision : 02
-[009h 0009   1] Checksum : A8
+[009h 0009   1] Checksum : 48
 [00Ah 0010   6]   Oem ID : "BOCHS "
 [010h 0016   8] Oem Table ID : "BXPC"
 [018h 0024   4] Oem Revision : 0001
 [01Ch 0028   4]  Asl Compiler ID : "BXPC"
 [020h 0032   4]Asl Compiler Revision : 0001

 [024h 0036   1]Subtable Type : 00 [Processor Hierarchy Node]
 [025h 0037   1]   Length : 14
 [026h 0038   2] Reserved : 
 [028h 0040   4]Flags (decoded below) : 0001
 Physical package : 1
  ACPI Processor ID valid : 0
 [02Ch 0044   4]   Parent : 
 [030h 0048   4]ACPI Processor ID : 
 [034h 0052   4]  Private Resource Number : 

 [038h 0056   1]Subtable Type : 00 [Processor Hierarchy Node]
 [039h 0057   1]   Length : 14
 [03Ah 0058   2] Reserved : 
-[03Ch 0060   4]Flags (decoded below) : 000A
+[03Ch 0060   4]Flags (decoded below) : 
 Physical package : 0
- ACPI Processor ID valid : 1
+ ACPI Processor ID valid : 0
 [040h 0064   4]   Parent : 0024
 [044h 0068   4]ACPI Processor ID : 
 [048h 0072   4]  Private Resource Number : 

-Raw Table Data: Length 76 (0x4C)
+[04Ch 0076   1]Subtable Type : 00 [Processor Hierarchy Node]
+[04Dh 0077   1]   Length : 14
+[04Eh 0078   2] Reserved : 
+[050h 0080   4]Flags (decoded below) : 000A
+Physical package : 0
+ ACPI Processor ID valid : 1
+[054h 0084   4]   Parent : 0038
+[058h 0088   4]ACPI Processor ID : 
+[05Ch 0092   4]  Private Resource Number : 
+
+Raw Table Data: Length 96 (0x60)

-: 50 50 54 54 4C 00 00 00 02 A8 42 4F 43 48 53 20  // PPTTL.BOCHS
+: 50 50 54 54 60 00 00 00 02 48 42 4F 43 48 53 20  // PPTT`HBOCHS
 0010: 42 58 50 43 20 20 20 20 01 00 00 00 42 58 50 43  // BXPCBXPC
 0020: 01 00 00 00 00 14 00 00 01 00 00 00 00 00 00 00  // 
-0030: 00 00 00 00 00 00 00 00 00 14 00 00 0A 00 00 00  // 
-0040: 24 00 00 00 00 00 00 00 00 00 00 00  // $...
+0030: 00 00 00 00 00 00 00 00 00 14 00 00 00 00 00 00  // 
+0040: 24 00 00 00 00 00 00 00 00 00 00 00 00 14 00 00  // $...
+0050: 0A 00 00 00 38 00 00 00 00 00 00 00 00 00 00 00  // 8...

Signed-off-by: Yanan Wang 
---
 tests/data/acpi/virt/PPTT   | Bin 76 -> 96 bytes
 tests/qtest/bios-tables-test-allowed-diff.h |   1 -
 2 files changed, 1 deletion(-)

diff --git a/tests/data/acpi/virt/PPTT b/tests/data/acpi/virt/PPTT
index 
7a1258ecf123555b24462c98ccbb76b4ac1d0c2b..f56ea63b369a604877374ad696c396e796ab1c83
 100644
GIT binary patch
delta 53
zcmV-50LuSNU

[PATCH v5 13/14] hw/arm/virt-acpi-build: Support cluster level in PPTT generation

2021-12-28 Thread Yanan Wang via
Support cluster level in generation of ACPI Processor Properties
Topology Table (PPTT) for ARM virt machines.

Signed-off-by: Yanan Wang 
---
 hw/arm/virt-acpi-build.c | 15 +++
 1 file changed, 15 insertions(+)

diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c
index 3ce7680393..5f91969688 100644
--- a/hw/arm/virt-acpi-build.c
+++ b/hw/arm/virt-acpi-build.c
@@ -840,6 +840,21 @@ build_pptt(GArray *table_data, BIOSLinker *linker, 
VirtMachineState *vms)
 0, socket, NULL, 0);
 }
 
+length = g_queue_get_length(list);
+for (i = 0; i < length; i++) {
+int cluster;
+
+father_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
+for (cluster = 0; cluster < ms->smp.clusters; cluster++) {
+g_queue_push_tail(list,
+GUINT_TO_POINTER(table_data->len - pptt_start));
+build_processor_hierarchy_node(
+table_data,
+(0 << 0), /* not a physical package */
+father_offset, cluster, NULL, 0);
+}
+}
+
 length = g_queue_get_length(list);
 for (i = 0; i < length; i++) {
 int core;
-- 
2.27.0




[PATCH v5 11/14] hw/arm/virt-acpi-build: Make an ARM specific PPTT generator

2021-12-28 Thread Yanan Wang via
We have a generic build_pptt() in hw/acpi/aml-build.c but it's
currently only used in ARM acpi initialization. Now we are going
to support the new CPU cluster parameter which is currently only
supported by ARM, it won't be a very good idea to add it to the
generic build_pptt() as it will make the code complex and hard
to maintain especially when we also support CPU cache topology
hierarchy in build_pptt() too. Note that the cache topology
design also varies between different CPU targets.

So an ARM specific PPTT generator becomes necessary now. Given
that the generic one is currently only used by ARM, let's just
move build_pptt() from aml-build.c to virt-acpi-build.c with
minor update.

Signed-off-by: Yanan Wang 
---
 hw/acpi/aml-build.c | 80 ++---
 hw/arm/virt-acpi-build.c| 77 ++-
 include/hw/acpi/aml-build.h |  5 ++-
 3 files changed, 81 insertions(+), 81 deletions(-)

diff --git a/hw/acpi/aml-build.c b/hw/acpi/aml-build.c
index be3851be36..040fbc9b4b 100644
--- a/hw/acpi/aml-build.c
+++ b/hw/acpi/aml-build.c
@@ -1968,10 +1968,9 @@ void build_slit(GArray *table_data, BIOSLinker *linker, 
MachineState *ms,
  * ACPI spec, Revision 6.3
  * 5.2.29.1 Processor hierarchy node structure (Type 0)
  */
-static void build_processor_hierarchy_node(GArray *tbl, uint32_t flags,
-   uint32_t parent, uint32_t id,
-   uint32_t *priv_rsrc,
-   uint32_t priv_num)
+void build_processor_hierarchy_node(GArray *tbl, uint32_t flags,
+uint32_t parent, uint32_t id,
+uint32_t *priv_rsrc, uint32_t priv_num)
 {
 int i;
 
@@ -1994,79 +1993,6 @@ static void build_processor_hierarchy_node(GArray *tbl, 
uint32_t flags,
 }
 }
 
-/*
- * ACPI spec, Revision 6.3
- * 5.2.29 Processor Properties Topology Table (PPTT)
- */
-void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms,
-const char *oem_id, const char *oem_table_id)
-{
-GQueue *list = g_queue_new();
-guint pptt_start = table_data->len;
-guint father_offset;
-guint length, i;
-int uid = 0;
-int socket;
-AcpiTable table = { .sig = "PPTT", .rev = 2,
-.oem_id = oem_id, .oem_table_id = oem_table_id };
-
-acpi_table_begin(, table_data);
-
-for (socket = 0; socket < ms->smp.sockets; socket++) {
-g_queue_push_tail(list,
-GUINT_TO_POINTER(table_data->len - pptt_start));
-build_processor_hierarchy_node(
-table_data,
-/*
- * Physical package - represents the boundary
- * of a physical package
- */
-(1 << 0),
-0, socket, NULL, 0);
-}
-
-length = g_queue_get_length(list);
-for (i = 0; i < length; i++) {
-int core;
-
-father_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
-for (core = 0; core < ms->smp.cores; core++) {
-if (ms->smp.threads > 1) {
-g_queue_push_tail(list,
-GUINT_TO_POINTER(table_data->len - pptt_start));
-build_processor_hierarchy_node(
-table_data,
-(0 << 0), /* not a physical package */
-father_offset, core, NULL, 0);
-} else {
-build_processor_hierarchy_node(
-table_data,
-(1 << 1) | /* ACPI Processor ID valid */
-(1 << 3),  /* Node is a Leaf */
-father_offset, uid++, NULL, 0);
-}
-}
-}
-
-length = g_queue_get_length(list);
-for (i = 0; i < length; i++) {
-int thread;
-
-father_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
-for (thread = 0; thread < ms->smp.threads; thread++) {
-build_processor_hierarchy_node(
-table_data,
-(1 << 1) | /* ACPI Processor ID valid */
-(1 << 2) | /* Processor is a Thread */
-(1 << 3),  /* Node is a Leaf */
-father_offset, uid++, NULL, 0);
-}
-}
-
-g_queue_free(list);
-acpi_table_end(linker, );
-}
-
 /* build rev1/rev3/rev5.1 FADT */
 void build_fadt(GArray *tbl, BIOSLinker *linker, const AcpiFadtData *f,
 const char *oem_id, const char *oem_table_id)
diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c
index d0f4867fdf..3ce7680393 100644
--- a/hw/arm/virt-acpi-build.c
+++ b/hw/arm/virt-acpi-build.c
@@ -808,6 +808,80 @@ build_madt(GArray *table_data, BIOSLinker *linker, 
VirtMachineState *vms)
 acpi_table_end(linker, );
 }
 
+/*
+ * ACPI spec, Revision 6.3
+ * 5.2.29 Processor Properties Topology Table (PPTT)
+ */
+static void
+build_pptt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
+{
+

[PATCH v5 08/14] hw/arm/virt: Support clusters on ARM virt machines

2021-12-28 Thread Yanan Wang via
In implementations of ARM64 architecture, at most there could be
a CPU topology hierarchy like "sockets/dies/clusters/cores/threads"
defined. For example, some ARM64 server chip Kunpeng 920 totally
has 2 sockets, 2 NUMA nodes (also represent CPU dies range) in each
socket, 6 clusters in each NUMA node, 4 CPU cores in each cluster.

Clusters within the same NUMA share the L3 cache data and cores
within the same cluster share a L2 cache and a L3 cache tag.
Given that designing a vCPU topology with cluster level for the
guest can gain scheduling performance improvement, let's support
this new parameter on ARM virt machines.

After this, we can define a 4-level CPU topology hierarchy like:
cpus=*,maxcpus=*,sockets=*,clusters=*,cores=*,threads=*.

Signed-off-by: Yanan Wang 
---
 hw/arm/virt.c   |  1 +
 qemu-options.hx | 10 ++
 2 files changed, 11 insertions(+)

diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index 6bce595aba..f413e146d9 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -2700,6 +2700,7 @@ static void virt_machine_class_init(ObjectClass *oc, void 
*data)
 hc->unplug_request = virt_machine_device_unplug_request_cb;
 hc->unplug = virt_machine_device_unplug_cb;
 mc->nvdimm_supported = true;
+mc->smp_props.clusters_supported = true;
 mc->auto_enable_numa_with_memhp = true;
 mc->auto_enable_numa_with_memdev = true;
 mc->default_ram_id = "mach-virt.ram";
diff --git a/qemu-options.hx b/qemu-options.hx
index fd1f8135fb..69ef1cdb85 100644
--- a/qemu-options.hx
+++ b/qemu-options.hx
@@ -277,6 +277,16 @@ SRST
 
 -smp 16,sockets=2,dies=2,cores=2,threads=2,maxcpus=16
 
+The following sub-option defines a CPU topology hierarchy (2 sockets
+totally on the machine, 2 clusters per socket, 2 cores per cluster,
+2 threads per core) for ARM virt machines which support sockets/clusters
+/cores/threads. Some members of the option can be omitted but their values
+will be automatically computed:
+
+::
+
+-smp 16,sockets=2,clusters=2,cores=2,threads=2,maxcpus=16
+
 Historically preference was given to the coarsest topology parameters
 when computing missing values (ie sockets preferred over cores, which
 were preferred over threads), however, this behaviour is considered
-- 
2.27.0




[PATCH v5 10/14] hw/acpi/aml-build: Improve scalability of PPTT generation

2021-12-28 Thread Yanan Wang via
Currently we generate a PPTT table of n-level processor hierarchy
with n-level loops in build_pptt(). It works fine as now there are
only three CPU topology parameters. But the code may become less
scalable with the processor hierarchy levels increasing.

This patch only improves the scalability of build_pptt by reducing
the loops, and intends to make no functional change.

Signed-off-by: Yanan Wang 
---
 hw/acpi/aml-build.c | 50 +
 1 file changed, 32 insertions(+), 18 deletions(-)

diff --git a/hw/acpi/aml-build.c b/hw/acpi/aml-build.c
index b3b3310df3..be3851be36 100644
--- a/hw/acpi/aml-build.c
+++ b/hw/acpi/aml-build.c
@@ -2001,7 +2001,10 @@ static void build_processor_hierarchy_node(GArray *tbl, 
uint32_t flags,
 void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms,
 const char *oem_id, const char *oem_table_id)
 {
-int pptt_start = table_data->len;
+GQueue *list = g_queue_new();
+guint pptt_start = table_data->len;
+guint father_offset;
+guint length, i;
 int uid = 0;
 int socket;
 AcpiTable table = { .sig = "PPTT", .rev = 2,
@@ -2010,9 +2013,8 @@ void build_pptt(GArray *table_data, BIOSLinker *linker, 
MachineState *ms,
 acpi_table_begin(, table_data);
 
 for (socket = 0; socket < ms->smp.sockets; socket++) {
-uint32_t socket_offset = table_data->len - pptt_start;
-int core;
-
+g_queue_push_tail(list,
+GUINT_TO_POINTER(table_data->len - pptt_start));
 build_processor_hierarchy_node(
 table_data,
 /*
@@ -2021,35 +2023,47 @@ void build_pptt(GArray *table_data, BIOSLinker *linker, 
MachineState *ms,
  */
 (1 << 0),
 0, socket, NULL, 0);
+}
 
-for (core = 0; core < ms->smp.cores; core++) {
-uint32_t core_offset = table_data->len - pptt_start;
-int thread;
+length = g_queue_get_length(list);
+for (i = 0; i < length; i++) {
+int core;
 
+father_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
+for (core = 0; core < ms->smp.cores; core++) {
 if (ms->smp.threads > 1) {
+g_queue_push_tail(list,
+GUINT_TO_POINTER(table_data->len - pptt_start));
 build_processor_hierarchy_node(
 table_data,
 (0 << 0), /* not a physical package */
-socket_offset, core, NULL, 0);
-
-for (thread = 0; thread < ms->smp.threads; thread++) {
-build_processor_hierarchy_node(
-table_data,
-(1 << 1) | /* ACPI Processor ID valid */
-(1 << 2) | /* Processor is a Thread */
-(1 << 3),  /* Node is a Leaf */
-core_offset, uid++, NULL, 0);
-}
+father_offset, core, NULL, 0);
 } else {
 build_processor_hierarchy_node(
 table_data,
 (1 << 1) | /* ACPI Processor ID valid */
 (1 << 3),  /* Node is a Leaf */
-socket_offset, uid++, NULL, 0);
+father_offset, uid++, NULL, 0);
 }
 }
 }
 
+length = g_queue_get_length(list);
+for (i = 0; i < length; i++) {
+int thread;
+
+father_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
+for (thread = 0; thread < ms->smp.threads; thread++) {
+build_processor_hierarchy_node(
+table_data,
+(1 << 1) | /* ACPI Processor ID valid */
+(1 << 2) | /* Processor is a Thread */
+(1 << 3),  /* Node is a Leaf */
+father_offset, uid++, NULL, 0);
+}
+}
+
+g_queue_free(list);
 acpi_table_end(linker, );
 }
 
-- 
2.27.0




[PATCH v5 05/14] tests/unit/test-smp-parse: No need to explicitly zero MachineClass members

2021-12-28 Thread Yanan Wang via
The default value of the MachineClass members is 0, which
means we don't have to explicitly zero them. Also the value
of "mc->smp_props.prefer_sockets" will be taken care of by
smp_parse_test(), we don't necessarily need the statement
in machine_base_class_init() either.

Signed-off-by: Yanan Wang 
---
 tests/unit/test-smp-parse.c | 10 --
 1 file changed, 10 deletions(-)

diff --git a/tests/unit/test-smp-parse.c b/tests/unit/test-smp-parse.c
index 331719bbc4..72d83d1bbc 100644
--- a/tests/unit/test-smp-parse.c
+++ b/tests/unit/test-smp-parse.c
@@ -523,8 +523,6 @@ static void machine_base_class_init(ObjectClass *oc, void 
*data)
 {
 MachineClass *mc = MACHINE_CLASS(oc);
 
-mc->smp_props.prefer_sockets = true;
-
 mc->name = g_strdup(SMP_MACHINE_NAME);
 }
 
@@ -534,9 +532,6 @@ static void machine_generic_valid_class_init(ObjectClass 
*oc, void *data)
 
 mc->min_cpus = MIN_CPUS;
 mc->max_cpus = MAX_CPUS;
-
-mc->smp_props.dies_supported = false;
-mc->smp_props.clusters_supported = false;
 }
 
 static void machine_generic_invalid_class_init(ObjectClass *oc, void *data)
@@ -546,9 +541,6 @@ static void machine_generic_invalid_class_init(ObjectClass 
*oc, void *data)
 /* Force invalid min CPUs and max CPUs */
 mc->min_cpus = 2;
 mc->max_cpus = 511;
-
-mc->smp_props.dies_supported = false;
-mc->smp_props.clusters_supported = false;
 }
 
 static void machine_with_dies_class_init(ObjectClass *oc, void *data)
@@ -559,7 +551,6 @@ static void machine_with_dies_class_init(ObjectClass *oc, 
void *data)
 mc->max_cpus = MAX_CPUS;
 
 mc->smp_props.dies_supported = true;
-mc->smp_props.clusters_supported = false;
 }
 
 static void machine_with_clusters_class_init(ObjectClass *oc, void *data)
@@ -570,7 +561,6 @@ static void machine_with_clusters_class_init(ObjectClass 
*oc, void *data)
 mc->max_cpus = MAX_CPUS;
 
 mc->smp_props.clusters_supported = true;
-mc->smp_props.dies_supported = false;
 }
 
 static void test_generic_valid(const void *opaque)
-- 
2.27.0




[PATCH v5 07/14] MAINTAINERS: Self-recommended as reviewer of "Machine core"

2021-12-28 Thread Yanan Wang via
I've built interests in the generic machine subsystem and
have also been working on projects related to this part,
self-recommand myself as a reviewer so that I can help to
review some patches familiar to me, and have a chance to
learn more continuously.

Signed-off-by: Yanan Wang 
---
 MAINTAINERS | 1 +
 1 file changed, 1 insertion(+)

diff --git a/MAINTAINERS b/MAINTAINERS
index 5456536805..fe5eea76f6 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1631,6 +1631,7 @@ Machine core
 M: Eduardo Habkost 
 M: Marcel Apfelbaum 
 R: Philippe Mathieu-Daudé 
+R: Yanan Wang 
 S: Supported
 F: cpu.c
 F: hw/core/cpu.c
-- 
2.27.0




[PATCH v5 09/14] hw/arm/virt: Support cluster level in DT cpu-map

2021-12-28 Thread Yanan Wang via
Support one cluster level between core and physical package in the
cpu-map of Arm/virt devicetree. This is also consistent with Linux
Doc "Documentation/devicetree/bindings/cpu/cpu-topology.txt".

Signed-off-by: Yanan Wang 
---
 hw/arm/virt.c | 15 ---
 1 file changed, 8 insertions(+), 7 deletions(-)

diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index f413e146d9..fc5eea8c8c 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -430,9 +430,8 @@ static void fdt_add_cpu_nodes(const VirtMachineState *vms)
  * can contain several layers of clustering within a single physical
  * package and cluster nodes can be contained in parent cluster nodes.
  *
- * Given that cluster is not yet supported in the vCPU topology,
- * we currently generate one cluster node within each socket node
- * by default.
+ * Note: currently we only support one layer of clustering within
+ * each physical package.
  */
 qemu_fdt_add_subnode(ms->fdt, "/cpus/cpu-map");
 
@@ -442,14 +441,16 @@ static void fdt_add_cpu_nodes(const VirtMachineState *vms)
 
 if (ms->smp.threads > 1) {
 map_path = g_strdup_printf(
-"/cpus/cpu-map/socket%d/cluster0/core%d/thread%d",
-cpu / (ms->smp.cores * ms->smp.threads),
+"/cpus/cpu-map/socket%d/cluster%d/core%d/thread%d",
+cpu / (ms->smp.clusters * ms->smp.cores * ms->smp.threads),
+(cpu / (ms->smp.cores * ms->smp.threads)) % 
ms->smp.clusters,
 (cpu / ms->smp.threads) % ms->smp.cores,
 cpu % ms->smp.threads);
 } else {
 map_path = g_strdup_printf(
-"/cpus/cpu-map/socket%d/cluster0/core%d",
-cpu / ms->smp.cores,
+"/cpus/cpu-map/socket%d/cluster%d/core%d",
+cpu / (ms->smp.clusters * ms->smp.cores),
+(cpu / ms->smp.cores) % ms->smp.clusters,
 cpu % ms->smp.cores);
 }
 qemu_fdt_add_path(ms->fdt, map_path);
-- 
2.27.0




[PATCH v5 06/14] tests/unit/test-smp-parse: Keep default MIN/MAX CPUs in machine_base_class_init

2021-12-28 Thread Yanan Wang via
Most machine types in test-smp-parse will be OK to have the default
MIN/MAX CPUs except "smp-generic-invalid", let's keep the default
values in machine_base_class_init which will be inherited. And if
we hope a different value for a specific machine, modify it in its
own initialization function.

Signed-off-by: Yanan Wang 
---
 tests/unit/test-smp-parse.c | 16 ++--
 1 file changed, 2 insertions(+), 14 deletions(-)

diff --git a/tests/unit/test-smp-parse.c b/tests/unit/test-smp-parse.c
index 72d83d1bbc..fdc39a846c 100644
--- a/tests/unit/test-smp-parse.c
+++ b/tests/unit/test-smp-parse.c
@@ -523,15 +523,10 @@ static void machine_base_class_init(ObjectClass *oc, void 
*data)
 {
 MachineClass *mc = MACHINE_CLASS(oc);
 
-mc->name = g_strdup(SMP_MACHINE_NAME);
-}
-
-static void machine_generic_valid_class_init(ObjectClass *oc, void *data)
-{
-MachineClass *mc = MACHINE_CLASS(oc);
-
 mc->min_cpus = MIN_CPUS;
 mc->max_cpus = MAX_CPUS;
+
+mc->name = g_strdup(SMP_MACHINE_NAME);
 }
 
 static void machine_generic_invalid_class_init(ObjectClass *oc, void *data)
@@ -547,9 +542,6 @@ static void machine_with_dies_class_init(ObjectClass *oc, 
void *data)
 {
 MachineClass *mc = MACHINE_CLASS(oc);
 
-mc->min_cpus = MIN_CPUS;
-mc->max_cpus = MAX_CPUS;
-
 mc->smp_props.dies_supported = true;
 }
 
@@ -557,9 +549,6 @@ static void machine_with_clusters_class_init(ObjectClass 
*oc, void *data)
 {
 MachineClass *mc = MACHINE_CLASS(oc);
 
-mc->min_cpus = MIN_CPUS;
-mc->max_cpus = MAX_CPUS;
-
 mc->smp_props.clusters_supported = true;
 }
 
@@ -718,7 +707,6 @@ static const TypeInfo smp_machine_types[] = {
 }, {
 .name   = MACHINE_TYPE_NAME("smp-generic-valid"),
 .parent = TYPE_MACHINE,
-.class_init = machine_generic_valid_class_init,
 }, {
 .name   = MACHINE_TYPE_NAME("smp-generic-invalid"),
 .parent = TYPE_MACHINE,
-- 
2.27.0




[PATCH v5 03/14] hw/core/machine: Wrap target specific parameters together

2021-12-28 Thread Yanan Wang via
Wrap the CPU target specific parameters together into a single
variable except generic sockets/cores/threads, to make related
code lines shorter and more concise.

No functional change intended.

Signed-off-by: Yanan Wang 
---
 hw/core/machine-smp.c | 17 ++---
 1 file changed, 10 insertions(+), 7 deletions(-)

diff --git a/hw/core/machine-smp.c b/hw/core/machine-smp.c
index b39ed21e65..4547d7bbdc 100644
--- a/hw/core/machine-smp.c
+++ b/hw/core/machine-smp.c
@@ -79,6 +79,7 @@ void machine_parse_smp_config(MachineState *ms,
 unsigned cores   = config->has_cores ? config->cores : 0;
 unsigned threads = config->has_threads ? config->threads : 0;
 unsigned maxcpus = config->has_maxcpus ? config->maxcpus : 0;
+unsigned others;
 
 /*
  * Specified CPU topology parameters must be greater than zero,
@@ -111,6 +112,8 @@ void machine_parse_smp_config(MachineState *ms,
 dies = dies > 0 ? dies : 1;
 clusters = clusters > 0 ? clusters : 1;
 
+others = dies * clusters;
+
 /* compute missing values based on the provided ones */
 if (cpus == 0 && maxcpus == 0) {
 sockets = sockets > 0 ? sockets : 1;
@@ -124,30 +127,30 @@ void machine_parse_smp_config(MachineState *ms,
 if (sockets == 0) {
 cores = cores > 0 ? cores : 1;
 threads = threads > 0 ? threads : 1;
-sockets = maxcpus / (dies * clusters * cores * threads);
+sockets = maxcpus / (cores * threads * others);
 } else if (cores == 0) {
 threads = threads > 0 ? threads : 1;
-cores = maxcpus / (sockets * dies * clusters * threads);
+cores = maxcpus / (sockets * threads * others);
 }
 } else {
 /* prefer cores over sockets since 6.2 */
 if (cores == 0) {
 sockets = sockets > 0 ? sockets : 1;
 threads = threads > 0 ? threads : 1;
-cores = maxcpus / (sockets * dies * clusters * threads);
+cores = maxcpus / (sockets * threads * others);
 } else if (sockets == 0) {
 threads = threads > 0 ? threads : 1;
-sockets = maxcpus / (dies * clusters * cores * threads);
+sockets = maxcpus / (cores * threads * others);
 }
 }
 
 /* try to calculate omitted threads at last */
 if (threads == 0) {
-threads = maxcpus / (sockets * dies * clusters * cores);
+threads = maxcpus / (sockets * cores * others);
 }
 }
 
-maxcpus = maxcpus > 0 ? maxcpus : sockets * dies * clusters * cores * 
threads;
+maxcpus = maxcpus > 0 ? maxcpus : sockets * cores * threads * others;
 cpus = cpus > 0 ? cpus : maxcpus;
 
 ms->smp.cpus = cpus;
@@ -159,7 +162,7 @@ void machine_parse_smp_config(MachineState *ms,
 ms->smp.max_cpus = maxcpus;
 
 /* sanity-check of the computed topology */
-if (sockets * dies * clusters * cores * threads != maxcpus) {
+if (sockets * cores * threads * others != maxcpus) {
 g_autofree char *topo_msg = cpu_hierarchy_to_string(ms);
 error_setg(errp, "Invalid CPU topology: "
"product of the hierarchy must match maxcpus: "
-- 
2.27.0




[PATCH v5 01/14] qemu-options: Improve readability of SMP related Docs

2021-12-28 Thread Yanan Wang via
We have a description in qemu-options.hx for each CPU topology
parameter to explain what it exactly means, and also an extra
declaration for the target-specific one, e.g. "for PC only"
when describing "dies", and "for PC, it's on one die" when
describing "cores".

Now we are going to introduce one more non-generic parameter
"clusters", it will make the Doc less readable and  if we still
continue to use the legacy way to describe it.

So let's at first make two tweaks of the Docs to improve the
readability and also scalability:
1) In the -help text: Delete the extra specific declaration and
   describe each topology parameter level by level. Then add a
   note to declare that different machines may support different
   subsets and the actual meaning of the supported parameters
   will vary accordingly.
2) In the rST text: List all the sub-hierarchies currently
   supported in QEMU, and correspondingly give an example of
   -smp configuration for each of them.

Signed-off-by: Yanan Wang 
---
 qemu-options.hx | 76 ++---
 1 file changed, 59 insertions(+), 17 deletions(-)

diff --git a/qemu-options.hx b/qemu-options.hx
index 7d47510947..b39377de3f 100644
--- a/qemu-options.hx
+++ b/qemu-options.hx
@@ -207,14 +207,26 @@ ERST
 
 DEF("smp", HAS_ARG, QEMU_OPTION_smp,
 "-smp 
[[cpus=]n][,maxcpus=maxcpus][,sockets=sockets][,dies=dies][,cores=cores][,threads=threads]\n"
-"set the number of CPUs to 'n' [default=1]\n"
+"set the number of initial CPUs to 'n' [default=1]\n"
 "maxcpus= maximum number of total CPUs, including\n"
 "offline CPUs for hotplug, etc\n"
-"sockets= number of discrete sockets in the system\n"
-"dies= number of CPU dies on one socket (for PC only)\n"
-"cores= number of CPU cores on one socket (for PC, it's on 
one die)\n"
-"threads= number of threads on one CPU core\n",
-QEMU_ARCH_ALL)
+"sockets= number of sockets on the machine board\n"
+"dies= number of dies in one socket\n"
+"cores= number of cores in one die\n"
+"threads= number of threads in one core\n"
+"Note: Different machines may have different subsets of the CPU topology\n"
+"  parameters supported, so the actual meaning of the supported 
parameters\n"
+"  will vary accordingly. For example, for a machine type that 
supports a\n"
+"  three-level CPU hierarchy of sockets/cores/threads, the parameters 
will\n"
+"  sequentially mean as below:\n"
+"sockets means the number of sockets on the machine 
board\n"
+"cores means the number of cores in one socket\n"
+"threads means the number of threads in one core\n"
+"  For a particular machine type board, an expected CPU topology 
hierarchy\n"
+"  can be defined through the supported sub-option. Unsupported 
parameters\n"
+"  can also be provided in addition to the sub-option, but their 
values\n"
+"  must be set as 1 in the purpose of correct parsing.\n",
+QEMU_ARCH_ALL)
 SRST
 ``-smp 
[[cpus=]n][,maxcpus=maxcpus][,sockets=sockets][,dies=dies][,cores=cores][,threads=threads]``
 Simulate a SMP system with '\ ``n``\ ' CPUs initially present on
@@ -225,27 +237,57 @@ SRST
 initial CPU count will match the maximum number. When only one of them
 is given then the omitted one will be set to its counterpart's value.
 Both parameters may be specified, but the maximum number of CPUs must
-be equal to or greater than the initial CPU count. Both parameters are
-subject to an upper limit that is determined by the specific machine
-type chosen.
-
-To control reporting of CPU topology information, the number of sockets,
-dies per socket, cores per die, and threads per core can be specified.
-The sum `` sockets * cores * dies * threads `` must be equal to the
-maximum CPU count. CPU targets may only support a subset of the topology
-parameters. Where a CPU target does not support use of a particular
-topology parameter, its value should be assumed to be 1 for the purpose
-of computing the CPU maximum count.
+be equal to or greater than the initial CPU count. Product of the
+CPU topology hierarchy must be equal to the maximum number of CPUs.
+Both parameters are subject to an upper limit that is determined by
+the specific machine type chosen.
+
+To control reporting of CPU topology information, values of the topology
+parameters can be specified. Machines may only support a subset of the
+parameters and different machines may have different subsets supported
+which vary depending on capacity of the corresponding CPU targets. So
+for a particular machine type board, an expected topology hierarchy can
+   

[PATCH v5 04/14] tests/unit/test-smp-parse: Add testcases for CPU clusters

2021-12-28 Thread Yanan Wang via
Add testcases for parsing of the four-level CPU topology hierarchy,
ie sockets/clusters/cores/threads, which will be supported on ARM
virt machines.

Signed-off-by: Yanan Wang 
---
 tests/unit/test-smp-parse.c | 130 ++--
 1 file changed, 123 insertions(+), 7 deletions(-)

diff --git a/tests/unit/test-smp-parse.c b/tests/unit/test-smp-parse.c
index b6df8137fc..331719bbc4 100644
--- a/tests/unit/test-smp-parse.c
+++ b/tests/unit/test-smp-parse.c
@@ -61,6 +61,20 @@
 .has_maxcpus = hf, .maxcpus = f,  \
 }
 
+/*
+ * Currently a 4-level topology hierarchy is supported on ARM virt machines
+ *  -sockets/clusters/cores/threads
+ */
+#define SMP_CONFIG_WITH_CLUSTERS(ha, a, hb, b, hc, c, hd, d, he, e, hf, f) \
+{ \
+.has_cpus = ha, .cpus = a,\
+.has_sockets  = hb, .sockets  = b,\
+.has_clusters = hc, .clusters = c,\
+.has_cores= hd, .cores= d,\
+.has_threads  = he, .threads  = e,\
+.has_maxcpus  = hf, .maxcpus  = f,\
+}
+
 /**
  * @config - the given SMP configuration
  * @expect_prefer_sockets - the expected parsing result for the
@@ -290,6 +304,10 @@ static const struct SMPTestData data_generic_invalid[] = {
 /* config: -smp 2,dies=2 */
 .config = SMP_CONFIG_WITH_DIES(T, 2, F, 0, T, 2, F, 0, F, 0, F, 0),
 .expect_error = "dies not supported by this machine's CPU topology",
+}, {
+/* config: -smp 2,clusters=2 */
+.config = SMP_CONFIG_WITH_CLUSTERS(T, 2, F, 0, T, 2, F, 0, F, 0, F, 0),
+.expect_error = "clusters not supported by this machine's CPU 
topology",
 }, {
 /* config: -smp 8,sockets=2,cores=4,threads=2,maxcpus=8 */
 .config = SMP_CONFIG_GENERIC(T, 8, T, 2, T, 4, T, 2, T, 8),
@@ -337,20 +355,40 @@ static const struct SMPTestData data_with_dies_invalid[] 
= {
 },
 };
 
+static const struct SMPTestData data_with_clusters_invalid[] = {
+{
+/* config: -smp 16,sockets=2,clusters=2,cores=4,threads=2,maxcpus=16 */
+.config = SMP_CONFIG_WITH_CLUSTERS(T, 16, T, 2, T, 2, T, 4, T, 2, T, 
16),
+.expect_error = "Invalid CPU topology: "
+"product of the hierarchy must match maxcpus: "
+"sockets (2) * clusters (2) * cores (4) * threads (2) "
+"!= maxcpus (16)",
+}, {
+/* config: -smp 34,sockets=2,clusters=2,cores=4,threads=2,maxcpus=32 */
+.config = SMP_CONFIG_WITH_CLUSTERS(T, 34, T, 2, T, 2, T, 4, T, 2, T, 
32),
+.expect_error = "Invalid CPU topology: "
+"maxcpus must be equal to or greater than smp: "
+"sockets (2) * clusters (2) * cores (4) * threads (2) "
+"== maxcpus (32) < smp_cpus (34)",
+},
+};
+
 static char *smp_config_to_string(const SMPConfiguration *config)
 {
 return g_strdup_printf(
 "(SMPConfiguration) {\n"
-".has_cpus= %5s, cpus= %" PRId64 ",\n"
-".has_sockets = %5s, sockets = %" PRId64 ",\n"
-".has_dies= %5s, dies= %" PRId64 ",\n"
-".has_cores   = %5s, cores   = %" PRId64 ",\n"
-".has_threads = %5s, threads = %" PRId64 ",\n"
-".has_maxcpus = %5s, maxcpus = %" PRId64 ",\n"
+".has_cpus = %5s, cpus = %" PRId64 ",\n"
+".has_sockets  = %5s, sockets  = %" PRId64 ",\n"
+".has_dies = %5s, dies = %" PRId64 ",\n"
+".has_clusters = %5s, clusters = %" PRId64 ",\n"
+".has_cores= %5s, cores= %" PRId64 ",\n"
+".has_threads  = %5s, threads  = %" PRId64 ",\n"
+".has_maxcpus  = %5s, maxcpus  = %" PRId64 ",\n"
 "}",
 config->has_cpus ? "true" : "false", config->cpus,
 config->has_sockets ? "true" : "false", config->sockets,
 config->has_dies ? "true" : "false", config->dies,
+config->has_clusters ? "true" : "false", config->clusters,
 config->has_cores ? "true" : "false", config->cores,
 config->has_threads ? "true" : "false", config->threads,
 config->has_maxcpus ? "true" : "false", config->maxcpus);
@@ -363,11 +401,12 @@ static char *cpu_topology_to_string(const CpuTopology 
*topo)
 ".cpus = %u,\n"
 ".sockets  = %u,\n"
 ".dies = %u,\n"
+".clusters = %u,\n"
 ".cores= %u,\n"
 ".threads  = %u,\n"
 ".max_cpus = %u,\n"
 "}",
-topo->cpus, topo->sockets, topo->dies,
+topo->cpus, topo->sockets, topo->dies, topo->clusters,
 topo->cores, topo->threads, topo->max_cpus);
 }
 
@@ -391,6 +430,7 @@ static void check_parse(MachineState *ms, const 

[PATCH v5 02/14] hw/core/machine: Introduce CPU cluster topology support

2021-12-28 Thread Yanan Wang via
The new Cluster-Aware Scheduling support has landed in Linux 5.16,
which has been proved to benefit the scheduling performance (e.g.
load balance and wake_affine strategy) on both x86_64 and AArch64.

So now in Linux 5.16 we have four-level arch-neutral CPU topology
definition like below and a new scheduler level for clusters.
struct cpu_topology {
int thread_id;
int core_id;
int cluster_id;
int package_id;
int llc_id;
cpumask_t thread_sibling;
cpumask_t core_sibling;
cpumask_t cluster_sibling;
cpumask_t llc_sibling;
}

A cluster generally means a group of CPU cores which share L2 cache
or other mid-level resources, and it is the shared resources that
is used to improve scheduler's behavior. From the point of view of
the size range, it's between CPU die and CPU core. For example, on
some ARM64 Kunpeng servers, we have 6 clusters in each NUMA node,
and 4 CPU cores in each cluster. The 4 CPU cores share a separate
L2 cache and a L3 cache tag, which brings cache affinity advantage.

In virtualization, on the Hosts which have pClusters, if we can
design a vCPU topology with cluster level for guest kernel and
have a dedicated vCPU pinning. A Cluster-Aware Guest kernel can
also make use of the cache affinity of CPU clusters to gain
similar scheduling performance.

This patch adds infrastructure for CPU cluster level topology
configuration and parsing, so that the user can specify cluster
parameter if their machines support it.

Signed-off-by: Yanan Wang 
---
 hw/core/machine-smp.c | 26 +++---
 hw/core/machine.c |  3 +++
 include/hw/boards.h   |  6 +-
 qapi/machine.json |  5 -
 qemu-options.hx   |  7 ---
 softmmu/vl.c  |  3 +++
 6 files changed, 38 insertions(+), 12 deletions(-)

diff --git a/hw/core/machine-smp.c b/hw/core/machine-smp.c
index 2cbfd57429..b39ed21e65 100644
--- a/hw/core/machine-smp.c
+++ b/hw/core/machine-smp.c
@@ -37,6 +37,10 @@ static char *cpu_hierarchy_to_string(MachineState *ms)
 g_string_append_printf(s, " * dies (%u)", ms->smp.dies);
 }
 
+if (mc->smp_props.clusters_supported) {
+g_string_append_printf(s, " * clusters (%u)", ms->smp.clusters);
+}
+
 g_string_append_printf(s, " * cores (%u)", ms->smp.cores);
 g_string_append_printf(s, " * threads (%u)", ms->smp.threads);
 
@@ -71,6 +75,7 @@ void machine_parse_smp_config(MachineState *ms,
 unsigned cpus= config->has_cpus ? config->cpus : 0;
 unsigned sockets = config->has_sockets ? config->sockets : 0;
 unsigned dies= config->has_dies ? config->dies : 0;
+unsigned clusters = config->has_clusters ? config->clusters : 0;
 unsigned cores   = config->has_cores ? config->cores : 0;
 unsigned threads = config->has_threads ? config->threads : 0;
 unsigned maxcpus = config->has_maxcpus ? config->maxcpus : 0;
@@ -82,6 +87,7 @@ void machine_parse_smp_config(MachineState *ms,
 if ((config->has_cpus && config->cpus == 0) ||
 (config->has_sockets && config->sockets == 0) ||
 (config->has_dies && config->dies == 0) ||
+(config->has_clusters && config->clusters == 0) ||
 (config->has_cores && config->cores == 0) ||
 (config->has_threads && config->threads == 0) ||
 (config->has_maxcpus && config->maxcpus == 0)) {
@@ -97,8 +103,13 @@ void machine_parse_smp_config(MachineState *ms,
 error_setg(errp, "dies not supported by this machine's CPU topology");
 return;
 }
+if (!mc->smp_props.clusters_supported && clusters > 1) {
+error_setg(errp, "clusters not supported by this machine's CPU 
topology");
+return;
+}
 
 dies = dies > 0 ? dies : 1;
+clusters = clusters > 0 ? clusters : 1;
 
 /* compute missing values based on the provided ones */
 if (cpus == 0 && maxcpus == 0) {
@@ -113,41 +124,42 @@ void machine_parse_smp_config(MachineState *ms,
 if (sockets == 0) {
 cores = cores > 0 ? cores : 1;
 threads = threads > 0 ? threads : 1;
-sockets = maxcpus / (dies * cores * threads);
+sockets = maxcpus / (dies * clusters * cores * threads);
 } else if (cores == 0) {
 threads = threads > 0 ? threads : 1;
-cores = maxcpus / (sockets * dies * threads);
+cores = maxcpus / (sockets * dies * clusters * threads);
 }
 } else {
 /* prefer cores over sockets since 6.2 */
 if (cores == 0) {
 sockets = sockets > 0 ? sockets : 1;
 threads = threads > 0 ? threads : 1;
-cores = maxcpus / (sockets * dies * threads);
+cores = maxcpus / (sockets * dies * clusters * threads);
 } else if (sockets == 0) {
 threads = threads > 0 ? threads : 1;
-sockets = maxcpus / (dies * cores * threads);
+sockets = maxcpus / (dies * 

[PATCH v5 00/14] ARM virt: Introduce CPU clusters topology support

2021-12-28 Thread Yanan Wang via
Hi,

This series introduces the new CPU clusters topology parameter
and enable the support for it on ARM virt machines.

Background and descriptions:
The new Cluster-Aware Scheduling support has landed in Linux 5.16,
which has been proved to benefit the scheduling performance (e.g.
load balance and wake_affine strategy) for both x86_64 and AArch64.
We can see the PR [1] or the actual patch series [2] for reference.

So since Linux 5.16 we have four-level arch-neutral CPU topology
definition like below and a new scheduler level for clusters.
struct cpu_topology {
int thread_id;
int core_id;
int cluster_id;
int package_id;
int llc_id;
cpumask_t thread_sibling;
cpumask_t core_sibling;
cpumask_t cluster_sibling;
cpumask_t llc_sibling;
}

A cluster generally means a group of CPU cores which share L2 cache
or other mid-level resources, and it is the shared resources that
is used to improve scheduler's behavior. From the point of view of
the size range, it's between CPU die and CPU core. For example, on
some ARM64 Kunpeng servers, we have 6 clusters in each NUMA node,
and 4 CPU cores in each cluster. The 4 CPU cores share a separate
L2 cache and a L3 cache tag, which brings cache affinity advantage.

[1] 
https://lore.kernel.org/lkml/163572864855.3357115.17938524897008353101.tglx@xen13/
[2] https://lkml.org/lkml/2021/9/24/178

In virtualization, on the Hosts which have pClusters, if we can
design a vCPU topology with cluster level for guest kernel and
have a dedicated vCPU pinning. A Cluster-Aware Guest kernel can
also make use of the cache affinity of CPU clusters to gain
similar scheduling performance. So this series introduce clusters
support in the vCPU topology on ARM virt machines.

The patches are arranged mainly in two parts:
The first part (patch 1-7):
- Implement infrastructure for CPU cluster level topology support,
  including the SMP documentation, configuration and parsing,
  adding testcases for clusters.

The second part (part 8-14):
- Enable CPU cluster support on ARM virt machines, so that users
  can specify a 4-level CPU hierarchy sockets/clusters/cores/threads.
  And the 4-level topology will be described to guest kernel through
  ACPI PPTT and DT cpu-map.

Changelog:
v3->v4:
- Significant change from v3 to v4, since the whole series is reworked
  based on latest QEMU SMP frame.
- v3: https://patchew.org/QEMU/20210516103228.37792-1-wangyana...@huawei.com/

v4->v5:
- newly added patches 4-7
- rebased on Philippe series: "tests/unit: Rework test-smp-parse tests"
  https://patchew.org/QEMU/20211216132015.815493-1-phi...@redhat.com/
- v4: https://patchew.org/QEMU/20211121122502.9844-1-wangyana...@huawei.com/

Yanan Wang (14):
  qemu-options: Improve readability of SMP related Docs
  hw/core/machine: Introduce CPU cluster topology support
  hw/core/machine: Wrap target specific parameters together
  tests/unit/test-smp-parse: Add testcases for CPU clusters
  tests/unit/test-smp-parse: No need to explicitly zero MachineClass
members
  tests/unit/test-smp-parse: Keep default MIN/MAX CPUs in
machine_base_class_init
  MAINTAINERS: Self-recommended as reviewer of "Machine core"
  hw/arm/virt: Support clusters on ARM virt machines
  hw/arm/virt: Support cluster level in DT cpu-map
  hw/acpi/aml-build: Improve scalability of PPTT generation
  hw/arm/virt-acpi-build: Make an ARM specific PPTT generator
  tests/acpi/bios-tables-test: Allow changes to virt/PPTT file
  hw/arm/virt-acpi-build: Support cluster level in PPTT generation
  tests/acpi/bios-table-test: Update expected virt/PPTT file

 MAINTAINERS |   1 +
 hw/acpi/aml-build.c |  66 +
 hw/arm/virt-acpi-build.c|  92 +++-
 hw/arm/virt.c   |  16 +++--
 hw/core/machine-smp.c   |  29 ++--
 hw/core/machine.c   |   3 +
 include/hw/acpi/aml-build.h |   5 +-
 include/hw/boards.h |   6 +-
 qapi/machine.json   |   5 +-
 qemu-options.hx |  91 ++-
 softmmu/vl.c|   3 +
 tests/data/acpi/virt/PPTT   | Bin 76 -> 96 bytes
 tests/unit/test-smp-parse.c | 140 ++--
 13 files changed, 332 insertions(+), 125 deletions(-)

--
2.27.0




[PATCH for-6.2] qapi/machine.json: Fix incorrect description for die-id

2021-11-21 Thread Yanan Wang via
In terms of scope, die-id should mean "the die number within
socket the CPU belongs to" instead of "the die number within
node/board the CPU belongs to". Fix it to avoid confusing
the Doc reader.

Fixes: 176d2cda0d ("i386/cpu: Consolidate die-id validity in smp context")
Signed-off-by: Yanan Wang 
---
 qapi/machine.json | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/qapi/machine.json b/qapi/machine.json
index 067e3f5378..f1839acf20 100644
--- a/qapi/machine.json
+++ b/qapi/machine.json
@@ -867,7 +867,7 @@
 #
 # @node-id: NUMA node ID the CPU belongs to
 # @socket-id: socket number within node/board the CPU belongs to
-# @die-id: die number within node/board the CPU belongs to (Since 4.1)
+# @die-id: die number within socket the CPU belongs to (since 4.1)
 # @core-id: core number within die the CPU belongs to
 # @thread-id: thread number within core the CPU belongs to
 #
-- 
2.19.1




[PATCH v4 06/10] hw/acpi/aml-build: Improve scalability of PPTT generation

2021-11-21 Thread Yanan Wang via
Currently we generate a PPTT table of n-level processor hierarchy
with n-level loops in build_pptt(). It works fine as now there are
only three CPU topology parameters. But the code may become less
scalable with the processor hierarchy levels increasing.

This patch only improves the scalability of build_pptt by reducing
the loops, and intends to make no functional change.

Signed-off-by: Yanan Wang 
---
 hw/acpi/aml-build.c | 50 +
 1 file changed, 32 insertions(+), 18 deletions(-)

diff --git a/hw/acpi/aml-build.c b/hw/acpi/aml-build.c
index b3b3310df3..be3851be36 100644
--- a/hw/acpi/aml-build.c
+++ b/hw/acpi/aml-build.c
@@ -2001,7 +2001,10 @@ static void build_processor_hierarchy_node(GArray *tbl, 
uint32_t flags,
 void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms,
 const char *oem_id, const char *oem_table_id)
 {
-int pptt_start = table_data->len;
+GQueue *list = g_queue_new();
+guint pptt_start = table_data->len;
+guint father_offset;
+guint length, i;
 int uid = 0;
 int socket;
 AcpiTable table = { .sig = "PPTT", .rev = 2,
@@ -2010,9 +2013,8 @@ void build_pptt(GArray *table_data, BIOSLinker *linker, 
MachineState *ms,
 acpi_table_begin(, table_data);
 
 for (socket = 0; socket < ms->smp.sockets; socket++) {
-uint32_t socket_offset = table_data->len - pptt_start;
-int core;
-
+g_queue_push_tail(list,
+GUINT_TO_POINTER(table_data->len - pptt_start));
 build_processor_hierarchy_node(
 table_data,
 /*
@@ -2021,35 +2023,47 @@ void build_pptt(GArray *table_data, BIOSLinker *linker, 
MachineState *ms,
  */
 (1 << 0),
 0, socket, NULL, 0);
+}
 
-for (core = 0; core < ms->smp.cores; core++) {
-uint32_t core_offset = table_data->len - pptt_start;
-int thread;
+length = g_queue_get_length(list);
+for (i = 0; i < length; i++) {
+int core;
 
+father_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
+for (core = 0; core < ms->smp.cores; core++) {
 if (ms->smp.threads > 1) {
+g_queue_push_tail(list,
+GUINT_TO_POINTER(table_data->len - pptt_start));
 build_processor_hierarchy_node(
 table_data,
 (0 << 0), /* not a physical package */
-socket_offset, core, NULL, 0);
-
-for (thread = 0; thread < ms->smp.threads; thread++) {
-build_processor_hierarchy_node(
-table_data,
-(1 << 1) | /* ACPI Processor ID valid */
-(1 << 2) | /* Processor is a Thread */
-(1 << 3),  /* Node is a Leaf */
-core_offset, uid++, NULL, 0);
-}
+father_offset, core, NULL, 0);
 } else {
 build_processor_hierarchy_node(
 table_data,
 (1 << 1) | /* ACPI Processor ID valid */
 (1 << 3),  /* Node is a Leaf */
-socket_offset, uid++, NULL, 0);
+father_offset, uid++, NULL, 0);
 }
 }
 }
 
+length = g_queue_get_length(list);
+for (i = 0; i < length; i++) {
+int thread;
+
+father_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
+for (thread = 0; thread < ms->smp.threads; thread++) {
+build_processor_hierarchy_node(
+table_data,
+(1 << 1) | /* ACPI Processor ID valid */
+(1 << 2) | /* Processor is a Thread */
+(1 << 3),  /* Node is a Leaf */
+father_offset, uid++, NULL, 0);
+}
+}
+
+g_queue_free(list);
 acpi_table_end(linker, );
 }
 
-- 
2.19.1




[PATCH v4 07/10] hw/arm/virt-acpi-build: Make an ARM specific PPTT generator

2021-11-21 Thread Yanan Wang via
We have a generic build_pptt() in hw/acpi/aml-build.c but it's
currently only used in ARM acpi initialization. Now we are going
to support the new CPU cluster parameter which is currently only
supported by ARM, it won't be a very good idea to add it to the
generic build_pptt() as it will make the code complex and hard
to maintain especially when we also support CPU cache topology
hierarchy in build_pptt() too. Note that the cache topology
design also varies between different CPU targets.

So an ARM specific PPTT generator becomes necessary now. Given
that the generic one is currently only used by ARM, let's just
move build_pptt() from aml-build.c to virt-acpi-build.c with
minor update.

Signed-off-by: Yanan Wang 
---
 hw/acpi/aml-build.c | 80 ++---
 hw/arm/virt-acpi-build.c| 77 ++-
 include/hw/acpi/aml-build.h |  5 ++-
 3 files changed, 81 insertions(+), 81 deletions(-)

diff --git a/hw/acpi/aml-build.c b/hw/acpi/aml-build.c
index be3851be36..040fbc9b4b 100644
--- a/hw/acpi/aml-build.c
+++ b/hw/acpi/aml-build.c
@@ -1968,10 +1968,9 @@ void build_slit(GArray *table_data, BIOSLinker *linker, 
MachineState *ms,
  * ACPI spec, Revision 6.3
  * 5.2.29.1 Processor hierarchy node structure (Type 0)
  */
-static void build_processor_hierarchy_node(GArray *tbl, uint32_t flags,
-   uint32_t parent, uint32_t id,
-   uint32_t *priv_rsrc,
-   uint32_t priv_num)
+void build_processor_hierarchy_node(GArray *tbl, uint32_t flags,
+uint32_t parent, uint32_t id,
+uint32_t *priv_rsrc, uint32_t priv_num)
 {
 int i;
 
@@ -1994,79 +1993,6 @@ static void build_processor_hierarchy_node(GArray *tbl, 
uint32_t flags,
 }
 }
 
-/*
- * ACPI spec, Revision 6.3
- * 5.2.29 Processor Properties Topology Table (PPTT)
- */
-void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms,
-const char *oem_id, const char *oem_table_id)
-{
-GQueue *list = g_queue_new();
-guint pptt_start = table_data->len;
-guint father_offset;
-guint length, i;
-int uid = 0;
-int socket;
-AcpiTable table = { .sig = "PPTT", .rev = 2,
-.oem_id = oem_id, .oem_table_id = oem_table_id };
-
-acpi_table_begin(, table_data);
-
-for (socket = 0; socket < ms->smp.sockets; socket++) {
-g_queue_push_tail(list,
-GUINT_TO_POINTER(table_data->len - pptt_start));
-build_processor_hierarchy_node(
-table_data,
-/*
- * Physical package - represents the boundary
- * of a physical package
- */
-(1 << 0),
-0, socket, NULL, 0);
-}
-
-length = g_queue_get_length(list);
-for (i = 0; i < length; i++) {
-int core;
-
-father_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
-for (core = 0; core < ms->smp.cores; core++) {
-if (ms->smp.threads > 1) {
-g_queue_push_tail(list,
-GUINT_TO_POINTER(table_data->len - pptt_start));
-build_processor_hierarchy_node(
-table_data,
-(0 << 0), /* not a physical package */
-father_offset, core, NULL, 0);
-} else {
-build_processor_hierarchy_node(
-table_data,
-(1 << 1) | /* ACPI Processor ID valid */
-(1 << 3),  /* Node is a Leaf */
-father_offset, uid++, NULL, 0);
-}
-}
-}
-
-length = g_queue_get_length(list);
-for (i = 0; i < length; i++) {
-int thread;
-
-father_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
-for (thread = 0; thread < ms->smp.threads; thread++) {
-build_processor_hierarchy_node(
-table_data,
-(1 << 1) | /* ACPI Processor ID valid */
-(1 << 2) | /* Processor is a Thread */
-(1 << 3),  /* Node is a Leaf */
-father_offset, uid++, NULL, 0);
-}
-}
-
-g_queue_free(list);
-acpi_table_end(linker, );
-}
-
 /* build rev1/rev3/rev5.1 FADT */
 void build_fadt(GArray *tbl, BIOSLinker *linker, const AcpiFadtData *f,
 const char *oem_id, const char *oem_table_id)
diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c
index 674f902652..bef7056213 100644
--- a/hw/arm/virt-acpi-build.c
+++ b/hw/arm/virt-acpi-build.c
@@ -807,6 +807,80 @@ build_madt(GArray *table_data, BIOSLinker *linker, 
VirtMachineState *vms)
 acpi_table_end(linker, );
 }
 
+/*
+ * ACPI spec, Revision 6.3
+ * 5.2.29 Processor Properties Topology Table (PPTT)
+ */
+static void
+build_pptt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
+{
+

[PATCH v4 08/10] tests/acpi/bios-tables-test: Allow changes to virt/PPTT file

2021-11-21 Thread Yanan Wang via
List test/data/acpi/virt/PPTT as the expected files allowed to
be changed in tests/qtest/bios-tables-test-allowed-diff.h

Signed-off-by: Yanan Wang 
---
 tests/qtest/bios-tables-test-allowed-diff.h | 1 +
 1 file changed, 1 insertion(+)

diff --git a/tests/qtest/bios-tables-test-allowed-diff.h 
b/tests/qtest/bios-tables-test-allowed-diff.h
index dfb8523c8b..cb143a55a6 100644
--- a/tests/qtest/bios-tables-test-allowed-diff.h
+++ b/tests/qtest/bios-tables-test-allowed-diff.h
@@ -1 +1,2 @@
 /* List of comma-separated changed AML files to ignore */
+"tests/data/acpi/virt/PPTT",
-- 
2.19.1




[PATCH v4 01/10] qemu-options: Improve readability of SMP related Docs

2021-11-21 Thread Yanan Wang via
We have a description in qemu-options.hx for each CPU topology
parameter to explain what it exactly means, and also an extra
declaration for the target-specific one, e.g. "for PC only"
when describing "dies", and "for PC, it's on one die" when
describing "cores".

Now we are going to introduce one more non-generic parameter
"clusters", it will make the Doc less readable and  if we still
continue to use the legacy way to describe it.

So let's at first make two tweaks of the Docs to improve the
readability and also scalability:
1) In the -help text: Delete the extra specific declaration and
   describe each topology parameter level by level. Then add a
   note to declare that different machines may support different
   subsets and the actual meaning of the supported parameters
   will vary accordingly.
2) In the rST text: List all the sub-hierarchies currently
   supported in QEMU, and correspondingly give an example of
   -smp configuration for each of them.

Signed-off-by: Yanan Wang 
---
 qemu-options.hx | 76 ++---
 1 file changed, 59 insertions(+), 17 deletions(-)

diff --git a/qemu-options.hx b/qemu-options.hx
index ae2c6dbbfc..7a59db7764 100644
--- a/qemu-options.hx
+++ b/qemu-options.hx
@@ -207,14 +207,26 @@ ERST
 
 DEF("smp", HAS_ARG, QEMU_OPTION_smp,
 "-smp 
[[cpus=]n][,maxcpus=maxcpus][,sockets=sockets][,dies=dies][,cores=cores][,threads=threads]\n"
-"set the number of CPUs to 'n' [default=1]\n"
+"set the number of initial CPUs to 'n' [default=1]\n"
 "maxcpus= maximum number of total CPUs, including\n"
 "offline CPUs for hotplug, etc\n"
-"sockets= number of discrete sockets in the system\n"
-"dies= number of CPU dies on one socket (for PC only)\n"
-"cores= number of CPU cores on one socket (for PC, it's on 
one die)\n"
-"threads= number of threads on one CPU core\n",
-QEMU_ARCH_ALL)
+"sockets= number of sockets on the machine board\n"
+"dies= number of dies in one socket\n"
+"cores= number of cores in one die\n"
+"threads= number of threads in one core\n"
+"Note: Different machines may have different subsets of the CPU topology\n"
+"  parameters supported, so the actual meaning of the supported 
parameters\n"
+"  will vary accordingly. For example, for a machine type that 
supports a\n"
+"  three-level CPU hierarchy of sockets/cores/threads, the parameters 
will\n"
+"  sequentially mean as below:\n"
+"sockets means the number of sockets on the machine 
board\n"
+"cores means the number of cores in one socket\n"
+"threads means the number of threads in one core\n"
+"  For a particular machine type board, an expected CPU topology 
hierarchy\n"
+"  can be defined through the supported sub-option. Unsupported 
parameters\n"
+"  can also be provided in addition to the sub-option, but their 
values\n"
+"  must be set as 1 in the purpose of correct parsing.\n",
+QEMU_ARCH_ALL)
 SRST
 ``-smp 
[[cpus=]n][,maxcpus=maxcpus][,sockets=sockets][,dies=dies][,cores=cores][,threads=threads]``
 Simulate a SMP system with '\ ``n``\ ' CPUs initially present on
@@ -225,27 +237,57 @@ SRST
 initial CPU count will match the maximum number. When only one of them
 is given then the omitted one will be set to its counterpart's value.
 Both parameters may be specified, but the maximum number of CPUs must
-be equal to or greater than the initial CPU count. Both parameters are
-subject to an upper limit that is determined by the specific machine
-type chosen.
-
-To control reporting of CPU topology information, the number of sockets,
-dies per socket, cores per die, and threads per core can be specified.
-The sum `` sockets * cores * dies * threads `` must be equal to the
-maximum CPU count. CPU targets may only support a subset of the topology
-parameters. Where a CPU target does not support use of a particular
-topology parameter, its value should be assumed to be 1 for the purpose
-of computing the CPU maximum count.
+be equal to or greater than the initial CPU count. Product of the
+CPU topology hierarchy must be equal to the maximum number of CPUs.
+Both parameters are subject to an upper limit that is determined by
+the specific machine type chosen.
+
+To control reporting of CPU topology information, values of the topology
+parameters can be specified. Machines may only support a subset of the
+parameters and different machines may have different subsets supported
+which vary depending on capacity of the corresponding CPU targets. So
+for a particular machine type board, an expected topology hierarchy can
+   

[PATCH v4 10/10] tests/acpi/bios-table-test: Update expected virt/PPTT file

2021-11-21 Thread Yanan Wang via
Run ./tests/data/acpi/rebuild-expected-aml.sh from build directory
to update PPTT binary. Also empty bios-tables-test-allowed-diff.h.

The disassembled differences between actual and expected PPTT:

 /*
  * Intel ACPI Component Architecture
  * AML/ASL+ Disassembler version 20180810 (64-bit version)
  * Copyright (c) 2000 - 2018 Intel Corporation
  *
- * Disassembly of tests/data/acpi/virt/PPTT, Mon Oct 25 20:24:53 2021
+ * Disassembly of /tmp/aml-BPI5B1, Mon Oct 25 20:24:53 2021
  *
  * ACPI Data Table [PPTT]
  *
  * Format: [HexOffset DecimalOffset ByteLength]  FieldName : FieldValue
  */

 [000h    4]Signature : "PPTT"[Processor Properties 
Topology Table]
-[004h 0004   4] Table Length : 004C
+[004h 0004   4] Table Length : 0060
 [008h 0008   1] Revision : 02
-[009h 0009   1] Checksum : A8
+[009h 0009   1] Checksum : 48
 [00Ah 0010   6]   Oem ID : "BOCHS "
 [010h 0016   8] Oem Table ID : "BXPC"
 [018h 0024   4] Oem Revision : 0001
 [01Ch 0028   4]  Asl Compiler ID : "BXPC"
 [020h 0032   4]Asl Compiler Revision : 0001

 [024h 0036   1]Subtable Type : 00 [Processor Hierarchy Node]
 [025h 0037   1]   Length : 14
 [026h 0038   2] Reserved : 
 [028h 0040   4]Flags (decoded below) : 0001
 Physical package : 1
  ACPI Processor ID valid : 0
 [02Ch 0044   4]   Parent : 
 [030h 0048   4]ACPI Processor ID : 
 [034h 0052   4]  Private Resource Number : 

 [038h 0056   1]Subtable Type : 00 [Processor Hierarchy Node]
 [039h 0057   1]   Length : 14
 [03Ah 0058   2] Reserved : 
-[03Ch 0060   4]Flags (decoded below) : 000A
+[03Ch 0060   4]Flags (decoded below) : 
 Physical package : 0
- ACPI Processor ID valid : 1
+ ACPI Processor ID valid : 0
 [040h 0064   4]   Parent : 0024
 [044h 0068   4]ACPI Processor ID : 
 [048h 0072   4]  Private Resource Number : 

-Raw Table Data: Length 76 (0x4C)
+[04Ch 0076   1]Subtable Type : 00 [Processor Hierarchy Node]
+[04Dh 0077   1]   Length : 14
+[04Eh 0078   2] Reserved : 
+[050h 0080   4]Flags (decoded below) : 000A
+Physical package : 0
+ ACPI Processor ID valid : 1
+[054h 0084   4]   Parent : 0038
+[058h 0088   4]ACPI Processor ID : 
+[05Ch 0092   4]  Private Resource Number : 
+
+Raw Table Data: Length 96 (0x60)

-: 50 50 54 54 4C 00 00 00 02 A8 42 4F 43 48 53 20  // PPTTL.BOCHS
+: 50 50 54 54 60 00 00 00 02 48 42 4F 43 48 53 20  // PPTT`HBOCHS
 0010: 42 58 50 43 20 20 20 20 01 00 00 00 42 58 50 43  // BXPCBXPC
 0020: 01 00 00 00 00 14 00 00 01 00 00 00 00 00 00 00  // 
-0030: 00 00 00 00 00 00 00 00 00 14 00 00 0A 00 00 00  // 
-0040: 24 00 00 00 00 00 00 00 00 00 00 00  // $...
+0030: 00 00 00 00 00 00 00 00 00 14 00 00 00 00 00 00  // 
+0040: 24 00 00 00 00 00 00 00 00 00 00 00 00 14 00 00  // $...
+0050: 0A 00 00 00 38 00 00 00 00 00 00 00 00 00 00 00  // 8...

Signed-off-by: Yanan Wang 
---
 tests/data/acpi/virt/PPTT   | Bin 76 -> 96 bytes
 tests/qtest/bios-tables-test-allowed-diff.h |   1 -
 2 files changed, 1 deletion(-)

diff --git a/tests/data/acpi/virt/PPTT b/tests/data/acpi/virt/PPTT
index 
7a1258ecf123555b24462c98ccbb76b4ac1d0c2b..f56ea63b369a604877374ad696c396e796ab1c83
 100644
GIT binary patch
delta 53
zcmV-50LuSNU

[PATCH v4 03/10] hw/core/machine: Wrap target specific parameters together

2021-11-21 Thread Yanan Wang via
Wrap the CPU target specific parameters together into a single
variable, so that we don't need to update the other lines but
a single line when new topology parameters are introduced.

No functional change intended.

Signed-off-by: Yanan Wang 
---
 hw/core/machine-smp.c | 17 ++---
 1 file changed, 10 insertions(+), 7 deletions(-)

diff --git a/hw/core/machine-smp.c b/hw/core/machine-smp.c
index 87ceb45470..2a3f16e52b 100644
--- a/hw/core/machine-smp.c
+++ b/hw/core/machine-smp.c
@@ -77,6 +77,7 @@ void smp_parse(MachineState *ms, SMPConfiguration *config, 
Error **errp)
 unsigned cores   = config->has_cores ? config->cores : 0;
 unsigned threads = config->has_threads ? config->threads : 0;
 unsigned maxcpus = config->has_maxcpus ? config->maxcpus : 0;
+unsigned others;
 
 /*
  * Specified CPU topology parameters must be greater than zero,
@@ -109,6 +110,8 @@ void smp_parse(MachineState *ms, SMPConfiguration *config, 
Error **errp)
 dies = dies > 0 ? dies : 1;
 clusters = clusters > 0 ? clusters : 1;
 
+others = dies * clusters;
+
 /* compute missing values based on the provided ones */
 if (cpus == 0 && maxcpus == 0) {
 sockets = sockets > 0 ? sockets : 1;
@@ -122,30 +125,30 @@ void smp_parse(MachineState *ms, SMPConfiguration 
*config, Error **errp)
 if (sockets == 0) {
 cores = cores > 0 ? cores : 1;
 threads = threads > 0 ? threads : 1;
-sockets = maxcpus / (dies * clusters * cores * threads);
+sockets = maxcpus / (cores * threads * others);
 } else if (cores == 0) {
 threads = threads > 0 ? threads : 1;
-cores = maxcpus / (sockets * dies * clusters * threads);
+cores = maxcpus / (sockets * threads * others);
 }
 } else {
 /* prefer cores over sockets since 6.2 */
 if (cores == 0) {
 sockets = sockets > 0 ? sockets : 1;
 threads = threads > 0 ? threads : 1;
-cores = maxcpus / (sockets * dies * clusters * threads);
+cores = maxcpus / (sockets * threads * others);
 } else if (sockets == 0) {
 threads = threads > 0 ? threads : 1;
-sockets = maxcpus / (dies * clusters * cores * threads);
+sockets = maxcpus / (cores * threads * others);
 }
 }
 
 /* try to calculate omitted threads at last */
 if (threads == 0) {
-threads = maxcpus / (sockets * dies * clusters * cores);
+threads = maxcpus / (sockets * cores * others);
 }
 }
 
-maxcpus = maxcpus > 0 ? maxcpus : sockets * dies * clusters * cores * 
threads;
+maxcpus = maxcpus > 0 ? maxcpus : sockets * cores * threads * others;
 cpus = cpus > 0 ? cpus : maxcpus;
 
 ms->smp.cpus = cpus;
@@ -157,7 +160,7 @@ void smp_parse(MachineState *ms, SMPConfiguration *config, 
Error **errp)
 ms->smp.max_cpus = maxcpus;
 
 /* sanity-check of the computed topology */
-if (sockets * dies * clusters * cores * threads != maxcpus) {
+if (sockets * cores * threads * others != maxcpus) {
 g_autofree char *topo_msg = cpu_hierarchy_to_string(ms);
 error_setg(errp, "Invalid CPU topology: "
"product of the hierarchy must match maxcpus: "
-- 
2.19.1




[PATCH v4 02/10] hw/core/machine: Introduce CPU cluster topology support

2021-11-21 Thread Yanan Wang via
The new Cluster-Aware Scheduling support has landed in Linux 5.16,
which has been proved to benefit the scheduling performance (e.g.
load balance and wake_affine strategy) on both x86_64 and AArch64.

So now in Linux 5.16 we have four-level arch-neutral CPU topology
definition like below and a new scheduler level for clusters.
struct cpu_topology {
int thread_id;
int core_id;
int cluster_id;
int package_id;
int llc_id;
cpumask_t thread_sibling;
cpumask_t core_sibling;
cpumask_t cluster_sibling;
cpumask_t llc_sibling;
}

A cluster generally means a group of CPU cores which share L2 cache
or other mid-level resources, and it is the shared resources that
is used to improve scheduler's behavior. From the point of view of
the size range, it's between CPU die and CPU core. For example, on
some ARM64 Kunpeng servers, we have 6 clusters in each NUMA node,
and 4 CPU cores in each cluster. The 4 CPU cores share a separate
L2 cache and a L3 cache tag, which brings cache affinity advantage.

In virtualization, on the Hosts which have pClusters, if we can
design a vCPU topology with cluster level for guest kernel and
have a dedicated vCPU pinning. A Cluster-Aware Guest kernel can
also make use of the cache affinity of CPU clusters to gain
similar scheduling performance.

This patch adds infrastructure for CPU cluster level topology
configuration and parsing, so that the user can specify cluster
parameter if their machines support it.

Signed-off-by: Yanan Wang 
---
 hw/core/machine-smp.c | 26 +++---
 hw/core/machine.c |  3 +++
 include/hw/boards.h   |  6 +-
 qapi/machine.json |  5 -
 qemu-options.hx   |  7 ---
 softmmu/vl.c  |  3 +++
 6 files changed, 38 insertions(+), 12 deletions(-)

diff --git a/hw/core/machine-smp.c b/hw/core/machine-smp.c
index 116a0cbbfa..87ceb45470 100644
--- a/hw/core/machine-smp.c
+++ b/hw/core/machine-smp.c
@@ -37,6 +37,10 @@ static char *cpu_hierarchy_to_string(MachineState *ms)
 g_string_append_printf(s, " * dies (%u)", ms->smp.dies);
 }
 
+if (mc->smp_props.clusters_supported) {
+g_string_append_printf(s, " * clusters (%u)", ms->smp.clusters);
+}
+
 g_string_append_printf(s, " * cores (%u)", ms->smp.cores);
 g_string_append_printf(s, " * threads (%u)", ms->smp.threads);
 
@@ -69,6 +73,7 @@ void smp_parse(MachineState *ms, SMPConfiguration *config, 
Error **errp)
 unsigned cpus= config->has_cpus ? config->cpus : 0;
 unsigned sockets = config->has_sockets ? config->sockets : 0;
 unsigned dies= config->has_dies ? config->dies : 0;
+unsigned clusters = config->has_clusters ? config->clusters : 0;
 unsigned cores   = config->has_cores ? config->cores : 0;
 unsigned threads = config->has_threads ? config->threads : 0;
 unsigned maxcpus = config->has_maxcpus ? config->maxcpus : 0;
@@ -80,6 +85,7 @@ void smp_parse(MachineState *ms, SMPConfiguration *config, 
Error **errp)
 if ((config->has_cpus && config->cpus == 0) ||
 (config->has_sockets && config->sockets == 0) ||
 (config->has_dies && config->dies == 0) ||
+(config->has_clusters && config->clusters == 0) ||
 (config->has_cores && config->cores == 0) ||
 (config->has_threads && config->threads == 0) ||
 (config->has_maxcpus && config->maxcpus == 0)) {
@@ -95,8 +101,13 @@ void smp_parse(MachineState *ms, SMPConfiguration *config, 
Error **errp)
 error_setg(errp, "dies not supported by this machine's CPU topology");
 return;
 }
+if (!mc->smp_props.clusters_supported && clusters > 1) {
+error_setg(errp, "clusters not supported by this machine's CPU 
topology");
+return;
+}
 
 dies = dies > 0 ? dies : 1;
+clusters = clusters > 0 ? clusters : 1;
 
 /* compute missing values based on the provided ones */
 if (cpus == 0 && maxcpus == 0) {
@@ -111,41 +122,42 @@ void smp_parse(MachineState *ms, SMPConfiguration 
*config, Error **errp)
 if (sockets == 0) {
 cores = cores > 0 ? cores : 1;
 threads = threads > 0 ? threads : 1;
-sockets = maxcpus / (dies * cores * threads);
+sockets = maxcpus / (dies * clusters * cores * threads);
 } else if (cores == 0) {
 threads = threads > 0 ? threads : 1;
-cores = maxcpus / (sockets * dies * threads);
+cores = maxcpus / (sockets * dies * clusters * threads);
 }
 } else {
 /* prefer cores over sockets since 6.2 */
 if (cores == 0) {
 sockets = sockets > 0 ? sockets : 1;
 threads = threads > 0 ? threads : 1;
-cores = maxcpus / (sockets * dies * threads);
+cores = maxcpus / (sockets * dies * clusters * threads);
 } else if (sockets == 0) {
 threads = threads > 0 ? threads : 1;
- 

[PATCH v4 00/10] ARM virt: Introduce CPU clusters topology support

2021-11-21 Thread Yanan Wang via
Hi,

This series introduces the new CPU clusters topology parameter
and enable the support for it on ARM virt machines.

Background and descriptions:
The new Cluster-Aware Scheduling support has landed in Linux 5.16,
which has been proved to benefit the scheduling performance (e.g.
load balance and wake_affine strategy) on both x86_64 and AArch64.
We can see Kernel PR [1] and the latest patch set [2] for reference.

So now in Linux 5.16 we have four-level arch-neutral CPU topology
definition like below and a new scheduler level for clusters.
struct cpu_topology {
int thread_id;
int core_id;
int cluster_id;
int package_id;
int llc_id;
cpumask_t thread_sibling;
cpumask_t core_sibling;
cpumask_t cluster_sibling;
cpumask_t llc_sibling;
}

A cluster generally means a group of CPU cores which share L2 cache
or other mid-level resources, and it is the shared resources that
is used to improve scheduler's behavior. From the point of view of
the size range, it's between CPU die and CPU core. For example, on
some ARM64 Kunpeng servers, we have 6 clusters in each NUMA node,
and 4 CPU cores in each cluster. The 4 CPU cores share a separate
L2 cache and a L3 cache tag, which brings cache affinity advantage.

[1] 
https://lore.kernel.org/lkml/163572864855.3357115.17938524897008353101.tglx@xen13/
[2] https://lkml.org/lkml/2021/9/24/178

In virtualization, on the Hosts which have pClusters, if we can
design a vCPU topology with cluster level for guest kernel and
have a dedicated vCPU pinning. A Cluster-Aware Guest kernel can
also make use of the cache affinity of CPU clusters to gain
similar scheduling performance.

This series consists of two parts:
The first part (patch 1-3):
Implement infrastructure for CPU cluster level topology support,
including the SMP documentation, configuration and parsing.

The second part (part 4-10):
Enable CPU cluster support on ARM virt machines, so that users
can specify a 4-level CPU hierarchy sockets/clusters/cores/threads.
And the 4-level topology will be described to guest kernel through
ACPI PPTT and DT cpu-map.

Changelog:
v3->v4:
- Significant change from v3 to v4, since the whole series is reworked
  based on latest QEMU SMP frame.
- v3: 
https://lore.kernel.org/qemu-devel/20210516103228.37792-1-wangyana...@huawei.com/

Yanan Wang (10):
  qemu-options: Improve readability of SMP related Docs
  hw/core/machine: Introduce CPU cluster topology support
  hw/core/machine: Wrap target specific parameters together
  hw/arm/virt: Support clusters on ARM virt machines
  hw/arm/virt: Support cluster level in DT cpu-map
  hw/acpi/aml-build: Improve scalability of PPTT generation
  hw/arm/virt-acpi-build: Make an ARM specific PPTT generator
  tests/acpi/bios-tables-test: Allow changes to virt/PPTT file
  hw/acpi/virt-acpi-build: Support cluster level in PPTT generation
  tests/acpi/bios-table-test: Update expected virt/PPTT file

 hw/acpi/aml-build.c |  66 ++
 hw/arm/virt-acpi-build.c|  92 +++-
 hw/arm/virt.c   |  16 ---
 hw/core/machine-smp.c   |  29 +---
 hw/core/machine.c   |   3 ++
 include/hw/acpi/aml-build.h |   5 +-
 include/hw/boards.h |   6 ++-
 qapi/machine.json   |   5 +-
 qemu-options.hx |  91 +++
 softmmu/vl.c|   3 ++
 tests/data/acpi/virt/PPTT   | Bin 76 -> 96 bytes
 11 files changed, 214 insertions(+), 102 deletions(-)

--
2.19.1




[PATCH v4 05/10] hw/arm/virt: Support cluster level in DT cpu-map

2021-11-21 Thread Yanan Wang via
Support one cluster level between core and physical package in the
cpu-map of Arm/virt devicetree. This is also consistent with Linux
Doc "Documentation/devicetree/bindings/cpu/cpu-topology.txt".

Signed-off-by: Yanan Wang 
---
 hw/arm/virt.c | 15 ---
 1 file changed, 8 insertions(+), 7 deletions(-)

diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index b2129f7ccd..dfdc64c4e3 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -430,9 +430,8 @@ static void fdt_add_cpu_nodes(const VirtMachineState *vms)
  * can contain several layers of clustering within a single physical
  * package and cluster nodes can be contained in parent cluster nodes.
  *
- * Given that cluster is not yet supported in the vCPU topology,
- * we currently generate one cluster node within each socket node
- * by default.
+ * Note: currently we only support one layer of clustering within
+ * each physical package.
  */
 qemu_fdt_add_subnode(ms->fdt, "/cpus/cpu-map");
 
@@ -442,14 +441,16 @@ static void fdt_add_cpu_nodes(const VirtMachineState *vms)
 
 if (ms->smp.threads > 1) {
 map_path = g_strdup_printf(
-"/cpus/cpu-map/socket%d/cluster0/core%d/thread%d",
-cpu / (ms->smp.cores * ms->smp.threads),
+"/cpus/cpu-map/socket%d/cluster%d/core%d/thread%d",
+cpu / (ms->smp.clusters * ms->smp.cores * ms->smp.threads),
+(cpu / (ms->smp.cores * ms->smp.threads)) % 
ms->smp.clusters,
 (cpu / ms->smp.threads) % ms->smp.cores,
 cpu % ms->smp.threads);
 } else {
 map_path = g_strdup_printf(
-"/cpus/cpu-map/socket%d/cluster0/core%d",
-cpu / ms->smp.cores,
+"/cpus/cpu-map/socket%d/cluster%d/core%d",
+cpu / (ms->smp.clusters * ms->smp.cores),
+(cpu / ms->smp.cores) % ms->smp.clusters,
 cpu % ms->smp.cores);
 }
 qemu_fdt_add_path(ms->fdt, map_path);
-- 
2.19.1




[PATCH v4 09/10] hw/acpi/virt-acpi-build: Support cluster level in PPTT generation

2021-11-21 Thread Yanan Wang via
Support cluster level in generation of ACPI Processor Properties
Topology Table (PPTT) for ARM virt machines.

Signed-off-by: Yanan Wang 
---
 hw/arm/virt-acpi-build.c | 15 +++
 1 file changed, 15 insertions(+)

diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c
index bef7056213..b34f0dbee0 100644
--- a/hw/arm/virt-acpi-build.c
+++ b/hw/arm/virt-acpi-build.c
@@ -839,6 +839,21 @@ build_pptt(GArray *table_data, BIOSLinker *linker, 
VirtMachineState *vms)
 0, socket, NULL, 0);
 }
 
+length = g_queue_get_length(list);
+for (i = 0; i < length; i++) {
+int cluster;
+
+father_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
+for (cluster = 0; cluster < ms->smp.clusters; cluster++) {
+g_queue_push_tail(list,
+GUINT_TO_POINTER(table_data->len - pptt_start));
+build_processor_hierarchy_node(
+table_data,
+(0 << 0), /* not a physical package */
+father_offset, cluster, NULL, 0);
+}
+}
+
 length = g_queue_get_length(list);
 for (i = 0; i < length; i++) {
 int core;
-- 
2.19.1




[PATCH v4 04/10] hw/arm/virt: Support clusters on ARM virt machines

2021-11-21 Thread Yanan Wang via
In implementations of ARM64 architecture, at most there could be
a CPU topology hierarchy like "sockets/dies/clusters/cores/threads"
defined. For example, some ARM64 server chip Kunpeng 920 totally
has 2 sockets, 2 NUMA nodes (also represent CPU dies range) in each
socket, 6 clusters in each NUMA node, 4 CPU cores in each cluster.

Clusters within the same NUMA share the L3 cache data and cores
within the same cluster share a L2 cache and a L3 cache tag.
Given that designing a vCPU topology with cluster level for the
guest can gain scheduling performance improvement, let's support
this new parameter on ARM virt machines.

After this, we can define a 4-level CPU topology hierarchy like:
cpus=*,maxcpus=*,sockets=*,clusters=*,cores=*,threads=*.

Signed-off-by: Yanan Wang 
---
 hw/arm/virt.c   |  1 +
 qemu-options.hx | 10 ++
 2 files changed, 11 insertions(+)

diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index 369552ad45..b2129f7ccd 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -2698,6 +2698,7 @@ static void virt_machine_class_init(ObjectClass *oc, void 
*data)
 hc->unplug_request = virt_machine_device_unplug_request_cb;
 hc->unplug = virt_machine_device_unplug_cb;
 mc->nvdimm_supported = true;
+mc->smp_props.clusters_supported = true;
 mc->auto_enable_numa_with_memhp = true;
 mc->auto_enable_numa_with_memdev = true;
 mc->default_ram_id = "mach-virt.ram";
diff --git a/qemu-options.hx b/qemu-options.hx
index 0f26f7dad7..74d335e4c3 100644
--- a/qemu-options.hx
+++ b/qemu-options.hx
@@ -277,6 +277,16 @@ SRST
 
 -smp 16,sockets=2,dies=2,cores=2,threads=2,maxcpus=16
 
+The following sub-option defines a CPU topology hierarchy (2 sockets
+totally on the machine, 2 clusters per socket, 2 cores per cluster,
+2 threads per core) for ARM virt machines which support sockets/clusters
+/cores/threads. Some members of the option can be omitted but their values
+will be automatically computed:
+
+::
+
+-smp 16,sockets=2,clusters=2,cores=2,threads=2,maxcpus=16
+
 Historically preference was given to the coarsest topology parameters
 when computing missing values (ie sockets preferred over cores, which
 were preferred over threads), however, this behaviour is considered
-- 
2.19.1