...
> TODO:
> - This patch adds is_host_compat_vendor(), while there are something
> like is_host_cpu_intel() from target/i386/kvm/vmsr_energy.c. A rework
> may help move those helpers to target/i386/cpu*.
vmsr_energy emulates RAPL in user space...but RAPL is not architectural
(no CPUID), so this case doesn't need to consider "compat" vendor.
> target/i386/cpu.h | 8 ++
> target/i386/kvm/kvm.c | 176 +++++++++++++++++++++++++++++++++++++++++-
> 2 files changed, 180 insertions(+), 4 deletions(-)
...
> +static bool is_host_compat_vendor(CPUX86State *env)
> +{
> + char host_vendor[CPUID_VENDOR_SZ + 1];
> + uint32_t host_cpuid_vendor1;
> + uint32_t host_cpuid_vendor2;
> + uint32_t host_cpuid_vendor3;
>
> + host_cpuid(0x0, 0, NULL, &host_cpuid_vendor1, &host_cpuid_vendor3,
> + &host_cpuid_vendor2);
> +
> + x86_cpu_vendor_words2str(host_vendor, host_cpuid_vendor1,
> + host_cpuid_vendor2, host_cpuid_vendor3);
We can use host_cpu_vendor_fms() (with a little change). If you like
this idea, pls feel free to pick my cleanup patch into your series.
> + /*
> + * Intel and Zhaoxin are compatible.
> + */
> + if ((g_str_equal(host_vendor, CPUID_VENDOR_INTEL) ||
> + g_str_equal(host_vendor, CPUID_VENDOR_ZHAOXIN1) ||
> + g_str_equal(host_vendor, CPUID_VENDOR_ZHAOXIN2)) &&
> + (IS_INTEL_CPU(env) || IS_ZHAOXIN_CPU(env))) {
> + return true;
> + }
> +
> + return env->cpuid_vendor1 == host_cpuid_vendor1 &&
> + env->cpuid_vendor2 == host_cpuid_vendor2 &&
> + env->cpuid_vendor3 == host_cpuid_vendor3;
Checking AMD directly makes the "compat" rule clear:
return g_str_equal(host_vendor, CPUID_VENDOR_AMD) &&
IS_AMD_CPU(env);
> +}
...
> if (env->mcg_cap) {
> kvm_msr_entry_add(cpu, MSR_MCG_STATUS, 0);
> kvm_msr_entry_add(cpu, MSR_MCG_CTL, 0);
> @@ -4871,6 +5024,21 @@ static int kvm_get_msrs(X86CPU *cpu)
> case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL0 + MAX_GP_COUNTERS - 1:
> env->msr_gp_evtsel[index - MSR_P6_EVNTSEL0] = msrs[i].data;
> break;
> + case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL0 + AMD64_NUM_COUNTERS - 1:
> + env->msr_gp_evtsel[index - MSR_K7_EVNTSEL0] = msrs[i].data;
> + break;
> + case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR0 + AMD64_NUM_COUNTERS - 1:
> + env->msr_gp_counters[index - MSR_K7_PERFCTR0] = msrs[i].data;
> + break;
> + case MSR_F15H_PERF_CTL0 ...
> + MSR_F15H_PERF_CTL0 + AMD64_NUM_COUNTERS_CORE * 2 - 1:
> + index = index - MSR_F15H_PERF_CTL0;
> + if (index & 0x1) {
> + env->msr_gp_counters[index] = msrs[i].data;
> + } else {
> + env->msr_gp_evtsel[index] = msrs[i].data;
This msr_gp_evtsel[] array's size is 18:
#define MAX_GP_COUNTERS (MSR_IA32_PERF_STATUS - MSR_P6_EVNTSEL0)
This formula is based on Intel's MSR, it's best to add a note that the
current size also meets AMD's needs. (No need to adjust the size, as
it will affect migration).
> + }
> + break;
> case HV_X64_MSR_HYPERCALL:
> env->msr_hv_hypercall = msrs[i].data;
> break;
Others LGTM!
Thanks,
Zhao