From: Eduardo Habkost
Always initialize CPUCaches structs with cache information, even
if legacy_cache=true. Use different CPUCaches struct for
CPUID[2], CPUID[4], and the AMD CPUID leaves.
This will simplify a lot the logic inside cpu_x86_cpuid().
Signed-off-by: Eduardo Habkost
Signed-off-by: Babu Moger
---
target/i386/cpu.c | 117 +++---
target/i386/cpu.h | 14 ---
2 files changed, 67 insertions(+), 64 deletions(-)
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index d95310f..5c9bdc9 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -1114,7 +1114,7 @@ struct X86CPUDefinition {
};
static CPUCaches epyc_cache_info = {
-.l1d_cache = {
+.l1d_cache = &(CPUCacheInfo) {
.type = DCACHE,
.level = 1,
.size = 32 * KiB,
@@ -1126,7 +1126,7 @@ static CPUCaches epyc_cache_info = {
.self_init = 1,
.no_invd_sharing = true,
},
-.l1i_cache = {
+.l1i_cache = &(CPUCacheInfo) {
.type = ICACHE,
.level = 1,
.size = 64 * KiB,
@@ -1138,7 +1138,7 @@ static CPUCaches epyc_cache_info = {
.self_init = 1,
.no_invd_sharing = true,
},
-.l2_cache = {
+.l2_cache = &(CPUCacheInfo) {
.type = UNIFIED_CACHE,
.level = 2,
.size = 512 * KiB,
@@ -1148,7 +1148,7 @@ static CPUCaches epyc_cache_info = {
.sets = 1024,
.lines_per_tag = 1,
},
-.l3_cache = {
+.l3_cache = &(CPUCacheInfo) {
.type = UNIFIED_CACHE,
.level = 3,
.size = 8 * MiB,
@@ -3342,9 +3342,8 @@ static void x86_cpu_load_def(X86CPU *cpu,
X86CPUDefinition *def, Error **errp)
env->features[w] = def->features[w];
}
-/* Store Cache information from the X86CPUDefinition if available */
-env->cache_info = def->cache_info;
-cpu->legacy_cache = def->cache_info ? 0 : 1;
+/* legacy-cache defaults to 'off' if CPU model provides cache info */
+cpu->legacy_cache = !def->cache_info;
/* Special cases not set in the X86CPUDefinition structs: */
/* TODO: in-kernel irqchip for hvf */
@@ -3695,21 +3694,11 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index,
uint32_t count,
if (!cpu->enable_l3_cache) {
*ecx = 0;
} else {
-if (env->cache_info && !cpu->legacy_cache) {
-*ecx = cpuid2_cache_descriptor(>cache_info->l3_cache);
-} else {
-*ecx = cpuid2_cache_descriptor(_l3_cache);
-}
-}
-if (env->cache_info && !cpu->legacy_cache) {
-*edx = (cpuid2_cache_descriptor(>cache_info->l1d_cache) <<
16) |
- (cpuid2_cache_descriptor(>cache_info->l1i_cache) <<
8) |
- (cpuid2_cache_descriptor(>cache_info->l2_cache));
-} else {
-*edx = (cpuid2_cache_descriptor(_l1d_cache) << 16) |
- (cpuid2_cache_descriptor(_l1i_cache) << 8) |
- (cpuid2_cache_descriptor(_l2_cache_cpuid2));
+*ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache);
}
+*edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) <<
16) |
+ (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) <<
8) |
+ (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache));
break;
case 4:
/* cache info: needed for Core compatibility */
@@ -3722,35 +3711,27 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index,
uint32_t count,
}
} else {
*eax = 0;
-CPUCacheInfo *l1d, *l1i, *l2, *l3;
-if (env->cache_info && !cpu->legacy_cache) {
-l1d = >cache_info->l1d_cache;
-l1i = >cache_info->l1i_cache;
-l2 = >cache_info->l2_cache;
-l3 = >cache_info->l3_cache;
-} else {
-l1d = _l1d_cache;
-l1i = _l1i_cache;
-l2 = _l2_cache;
-l3 = _l3_cache;
-}
switch (count) {
case 0: /* L1 dcache info */
-encode_cache_cpuid4(l1d, 1, cs->nr_cores,
+encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache,
+1, cs->nr_cores,
eax, ebx, ecx, edx);
break;
case 1: /* L1 icache info */
-encode_cache_cpuid4(l1i, 1, cs->nr_cores,
+encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache,
+1, cs->nr_cores,
eax, ebx, ecx, edx);
break;
case 2: /* L2 cache info */
-encode_cache_cpuid4(l2, cs->nr_threads, cs->nr_cores,
+encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache,
+