Hi Alireza,

On 2/20/26 11:11, Alireza Sanaee wrote:
Add cache topology to PPTT table.

Signed-off-by: Alireza Sanaee <[email protected]>
---
  hw/acpi/aml-build.c      | 198 +++++++++++++++++++++++++++++++++++++--
  hw/arm/virt-acpi-build.c |   8 +-
  include/hw/acpi/cpu.h    |  10 ++
  3 files changed, 207 insertions(+), 9 deletions(-)

diff --git a/hw/acpi/aml-build.c b/hw/acpi/aml-build.c
index b75aa2336e..a8de45036c 100644
--- a/hw/acpi/aml-build.c
+++ b/hw/acpi/aml-build.c
@@ -32,6 +32,7 @@
  #include "hw/pci/pci_bus.h"
  #include "hw/pci/pci_bridge.h"
  #include "qemu/cutils.h"
+#include "hw/core/cpu.h"
static GArray *build_alloc_array(void)
  {
@@ -2148,6 +2149,102 @@ void build_spcr(GArray *table_data, BIOSLinker *linker,
      }
      acpi_table_end(linker, &table);
  }
+
+static void build_cache_nodes(GArray *tbl, CPUCoreCaches *cache,
+                              uint32_t next_offset)
+{
+    const uint8_t node_length = 24;
+    int start_len = tbl->len;
+    int val;
+
+    build_append_byte(tbl, 1); /* Type 1 - cache */
+    build_append_byte(tbl, node_length); /* Length */
+    build_append_int_noprefix(tbl, 0, 2); /* Reserved */
+    build_append_int_noprefix(tbl, 0x7f, 4); /* Flags */
+    build_append_int_noprefix(tbl, next_offset, 4); /* Offset of next cache up 
*/
+    build_append_int_noprefix(tbl, cache->size, 4); /* Size */
+    build_append_int_noprefix(tbl, cache->sets, 4); /* Number of sets */
+    build_append_byte(tbl, cache->associativity); /* Associativity */
+    val = 0x3;
+    switch (cache->type) {
+    case INSTRUCTION_CACHE:
+        val |= (1 << 2); /* Instruction Cache */
+        break;
+    case DATA_CACHE:
+        val |= (0 << 2); /* Data Cache */
+        break;
+    case UNIFIED_CACHE:
+        val |= (3 << 2); /* Unified */
+        break;
+    }
+    build_append_byte(tbl, val); /* Attributes */
+    build_append_int_noprefix(tbl, cache->linesize, 2); /* Line size */
+    g_assert(tbl->len == start_len + node_length);
+}

Please document the ACPI spec version on top of build_cache_nodes() and
align  the comment about the fields. See
build_processor_hierarchy_node(), as an example.

I thought "Cache ID" field was missing and length was wrong (28), but
then I realized that it optional/non-existent on older spec versions.


+/*
+ * builds caches from the top level (`level_high` parameter) to the bottom
+ * level (`level_low` parameter).  It searches for caches found in
+ * systems' registers, and fills up the table. Then it updates the
+ * `data_offset` and `instr_offset` parameters with the offset of the data
+ * and instruction caches of the lowest level, respectively.
+ */

Please, start the comment with upper case. But, more important, I don't think this comment is much helpful. I'd like to read more about the meaning and need of 'data_offset' and 'instr_offset' in the context of
the ACPI table.


+static bool build_caches(GArray *table_data, uint32_t pptt_start,
+                         int num_caches, CPUCoreCaches *caches,
+                         uint8_t level_high, /* Inclusive */
+                         uint8_t level_low,  /* Inclusive */
+                         uint32_t *data_offset,
+                         uint32_t *instr_offset)
+{
+    uint32_t next_level_offset_data = 0, next_level_offset_instruction = 0;
+    uint32_t this_offset, next_offset = 0;
+    int c, level;
+    bool found_cache = false;
+
+    /* Walk caches from top to bottom */
+    for (level = level_high; level >= level_low; level--) {
+        for (c = 0; c < num_caches; c++) {
+            if (caches[c].level != level) {
+                continue;
+            }
+
+            /* Assume only unified above l1 for now */
+            this_offset = table_data->len - pptt_start;

Is 'this_offset' an invariant that can be hoisted out of the loops?


+            switch (caches[c].type) {
+            case INSTRUCTION_CACHE:
+                next_offset = next_level_offset_instruction;
+                break;
+            case DATA_CACHE:
+                next_offset = next_level_offset_data;
+                break;
+            case UNIFIED_CACHE:
+                /* Either is fine here */
+                next_offset = next_level_offset_instruction;
+                break;
+            }
+            build_cache_nodes(table_data, &caches[c], next_offset);
+            switch (caches[c].type) {
+            case INSTRUCTION_CACHE:
+                next_level_offset_instruction = this_offset;
+                break;
+            case DATA_CACHE:
+                next_level_offset_data = this_offset;
+                break;
+            case UNIFIED_CACHE:
+                next_level_offset_instruction = this_offset;
+                next_level_offset_data = this_offset;
+                break;
+            }
+            *data_offset = next_level_offset_data;
+            *instr_offset = next_level_offset_instruction;
+
+            found_cache = true;
+        }
+    }
+
+    return found_cache;
+}
+
  /*
   * ACPI spec, Revision 6.3
   * 5.2.29 Processor Properties Topology Table (PPTT)
@@ -2158,11 +2255,32 @@ void build_pptt(GArray *table_data, BIOSLinker *linker, 
MachineState *ms,
  {
      MachineClass *mc = MACHINE_GET_CLASS(ms);
      CPUArchIdList *cpus = ms->possible_cpus;
-    int64_t socket_id = -1, cluster_id = -1, core_id = -1;
-    uint32_t socket_offset = 0, cluster_offset = 0, core_offset = 0;
+    uint32_t core_data_offset = 0;
+    uint32_t core_instr_offset = 0;
+    uint32_t cluster_instr_offset = 0;
+    uint32_t cluster_data_offset = 0;
+    uint32_t node_data_offset = 0;
+    uint32_t node_instr_offset = 0;
+    int top_node = 3;
+    int top_cluster = 3;
+    int top_core = 3;
+    int bottom_node = 3;
+    int bottom_cluster = 3;
+    int bottom_core = 3;
+    int64_t socket_id = -1;
+    int64_t cluster_id = -1;
+    int64_t core_id = -1;
+    uint32_t socket_offset = 0;
+    uint32_t cluster_offset = 0;
+    uint32_t core_offset = 0;
      uint32_t pptt_start = table_data->len;
      uint32_t root_offset;
      int n;
+    uint32_t priv_rsrc[2];
+    uint32_t num_priv = 0;
+    bool cache_available;
+    bool llevel;
+
      AcpiTable table = { .sig = "PPTT", .rev = 2,
                          .oem_id = oem_id, .oem_table_id = oem_table_id };
@@ -2192,11 +2310,30 @@ void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms,
              socket_id = cpus->cpus[n].props.socket_id;
              cluster_id = -1;
              core_id = -1;
+            bottom_node = top_node;
+            num_priv = 0;
+            cache_available = machine_defines_cache_at_topo_level(
+                ms, CPU_TOPOLOGY_LEVEL_SOCKET);
+            llevel = machine_find_lowest_level_cache_at_topo_level(
+                ms, &bottom_node, CPU_TOPOLOGY_LEVEL_SOCKET);

See my comment in patch 3/8 about machine_defines_cache_at_topo_level() and machine_find_lowest_level_cache_at_topo_level() similar semantics.


+            if (cache_available && llevel) {
+                build_caches(table_data, pptt_start, num_caches, caches,
+                             top_node, bottom_node, &node_data_offset,
+                             &node_instr_offset);
+                priv_rsrc[0] = node_instr_offset;
+                priv_rsrc[1] = node_data_offset;
+                if (node_instr_offset || node_data_offset) {
+                    num_priv = node_instr_offset == node_data_offset ? 1 : 2;
+                }
+
+                top_cluster = bottom_node - 1;
+            }
+
              socket_offset = table_data->len - pptt_start;
              build_processor_hierarchy_node(table_data,
                  (1 << 0) | /* Physical package */
                  (1 << 4), /* Identical Implementation */
-                root_offset, socket_id, NULL, 0);
+                root_offset, socket_id, priv_rsrc, num_priv);
          }
if (mc->smp_props.clusters_supported && mc->smp_props.has_clusters) {
@@ -2204,21 +2341,68 @@ void build_pptt(GArray *table_data, BIOSLinker *linker, 
MachineState *ms,
                  assert(cpus->cpus[n].props.cluster_id > cluster_id);
                  cluster_id = cpus->cpus[n].props.cluster_id;
                  core_id = -1;
+                bottom_cluster = top_cluster;
+                num_priv = 0;
+                cache_available = machine_defines_cache_at_topo_level(
+                    ms, CPU_TOPOLOGY_LEVEL_CLUSTER);
+                llevel = machine_find_lowest_level_cache_at_topo_level(
+                    ms, &bottom_cluster, CPU_TOPOLOGY_LEVEL_CLUSTER);
+
+                if (cache_available && llevel) {
+                    build_caches(table_data, pptt_start, num_caches, caches,
+                                 top_cluster, bottom_cluster,
+                                 &cluster_data_offset, &cluster_instr_offset);
+                    priv_rsrc[0] = cluster_instr_offset;
+                    priv_rsrc[1] = cluster_data_offset;
+                    if (cluster_instr_offset || cluster_data_offset) {
+                        num_priv =
+                            cluster_instr_offset == cluster_data_offset ? 1 : 
2;
+                    }
+                    top_core = bottom_cluster - 1;
+                } else if (top_cluster == bottom_node - 1) {
+                    /* socket cache but no cluster cache */
+                    top_core = bottom_node - 1;
+                }
+
                  cluster_offset = table_data->len - pptt_start;
                  build_processor_hierarchy_node(table_data,
                      (0 << 0) | /* Not a physical package */
                      (1 << 4), /* Identical Implementation */
-                    socket_offset, cluster_id, NULL, 0);
+                    socket_offset, cluster_id, priv_rsrc, num_priv);
              }
          } else {
+            if (machine_defines_cache_at_topo_level(
+                    ms, CPU_TOPOLOGY_LEVEL_CLUSTER)) {
+                error_setg(&error_fatal, "Not clusters found for the cache");
+                return;
+            }
+
              cluster_offset = socket_offset;
+            top_core = bottom_node - 1; /* there is no cluster */
+        }
+
+        if (cpus->cpus[n].props.core_id != core_id) {
+            bottom_core = top_core;
+            num_priv = 0;
+            cache_available = machine_defines_cache_at_topo_level(
+                ms, CPU_TOPOLOGY_LEVEL_CORE);
+            llevel = machine_find_lowest_level_cache_at_topo_level(
+                ms, &bottom_core, CPU_TOPOLOGY_LEVEL_CORE);
+            if (cache_available && llevel) {
+                build_caches(table_data, pptt_start, num_caches, caches,
+                             top_core, bottom_core, &core_data_offset,
+                             &core_instr_offset);
+                priv_rsrc[0] = core_instr_offset;
+                priv_rsrc[1] = core_data_offset;
+                num_priv = core_instr_offset == core_data_offset ? 1 : 2;
+            }
          }
if (ms->smp.threads == 1) {
              build_processor_hierarchy_node(table_data,
                  (1 << 1) | /* ACPI Processor ID valid */
-                (1 << 3),  /* Node is a Leaf */
-                cluster_offset, n, NULL, 0);
+                (1 << 3), /* Node is a Leaf */
+                cluster_offset, n, priv_rsrc, num_priv);
          } else {
              if (cpus->cpus[n].props.core_id != core_id) {
                  assert(cpus->cpus[n].props.core_id > core_id);
@@ -2227,7 +2411,7 @@ void build_pptt(GArray *table_data, BIOSLinker *linker, 
MachineState *ms,
                  build_processor_hierarchy_node(table_data,
                      (0 << 0) | /* Not a physical package */
                      (1 << 4), /* Identical Implementation */
-                    cluster_offset, core_id, NULL, 0);
+                    cluster_offset, core_id, priv_rsrc, num_priv);
              }
build_processor_hierarchy_node(table_data,
diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c
index a655d1e42b..a6ad8cba1b 100644
--- a/hw/arm/virt-acpi-build.c
+++ b/hw/arm/virt-acpi-build.c
@@ -1255,6 +1255,10 @@ void virt_acpi_build(VirtMachineState *vms, 
AcpiBuildTables *tables)
      unsigned dsdt, xsdt;
      GArray *tables_blob = tables->table_data;
      MachineState *ms = MACHINE(vms);
+    CPUCoreCaches caches[CPU_MAX_CACHES];
+    unsigned int num_caches;
+
+    num_caches = virt_get_caches(vms, caches);
table_offsets = g_array_new(false, true /* clear */,
                                          sizeof(uint32_t));
@@ -1276,8 +1280,8 @@ void virt_acpi_build(VirtMachineState *vms, 
AcpiBuildTables *tables)
if (!vmc->no_cpu_topology) {
          acpi_add_table(table_offsets, tables_blob);
-        build_pptt(tables_blob, tables->linker, ms,
-                   vms->oem_id, vms->oem_table_id, 0, NULL);
+        build_pptt(tables_blob, tables->linker, ms, vms->oem_id,
+                   vms->oem_table_id, num_caches, caches);
      }
acpi_add_table(table_offsets, tables_blob);
diff --git a/include/hw/acpi/cpu.h b/include/hw/acpi/cpu.h
index 2809dd8a91..04c821d2b9 100644
--- a/include/hw/acpi/cpu.h
+++ b/include/hw/acpi/cpu.h
@@ -69,6 +69,16 @@ void build_cpus_aml(Aml *table, MachineState *machine, 
CPUHotplugFeatures opts,
void acpi_cpu_ospm_status(CPUHotplugState *cpu_st, ACPIOSTInfoList ***list); +struct CPUPPTTCaches {
+    enum CacheType type;
+    uint32_t sets;
+    uint32_t size;
+    uint32_t level;
+    uint16_t linesize;
+    uint8_t attributes; /* write policy: 0x0 write back, 0x1 write through */
+    uint8_t associativity;
+};
+
  extern const VMStateDescription vmstate_cpu_hotplug;
  #define VMSTATE_CPU_HOTPLUG(cpuhp, state) \
      VMSTATE_STRUCT(cpuhp, state, 1, \

Otherwise, the ACPI table generation looks good to me.


Cheers,
Gustavo

Reply via email to