When the vCPU is in guest mode with nested NPT enabled, guest accesses to
IA32_PAT are redirected to the gPAT register, which is stored in
vmcb02->save.g_pat.
Non-guest accesses (e.g. from userspace) to IA32_PAT are always redirected
to hPAT, which is stored in vcpu->arch.pat.
This is architected behavior. It also makes it possible to restore a new
checkpoint on an old kernel with reasonable semantics. After the restore,
gPAT will be lost, and L2 will run on L1's PAT. Note that the old kernel
would have always run L2 on L1's PAT.
Fixes: 15038e147247 ("KVM: SVM: obey guest PAT")
Signed-off-by: Jim Mattson <[email protected]>
---
arch/x86/kvm/svm/nested.c | 9 ---------
arch/x86/kvm/svm/svm.c | 34 ++++++++++++++++++++++++++++------
arch/x86/kvm/svm/svm.h | 17 ++++++++++++++++-
3 files changed, 44 insertions(+), 16 deletions(-)
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 1ff2ede96094..08844bc51b3c 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -630,15 +630,6 @@ static int nested_svm_load_cr3(struct kvm_vcpu *vcpu,
unsigned long cr3,
return 0;
}
-void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm)
-{
- if (!svm->nested.vmcb02.ptr)
- return;
-
- /* FIXME: merge g_pat from vmcb01 and vmcb12. */
- svm_set_vmcb_gpat(svm->nested.vmcb02.ptr, svm->vmcb01.ptr->save.g_pat);
-}
-
static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb
*vmcb12)
{
bool new_vmcb12 = false;
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 08f145eb9215..b62c32c3942d 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -2852,6 +2852,20 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct
msr_data *msr_info)
case MSR_AMD64_DE_CFG:
msr_info->data = svm->msr_decfg;
break;
+ case MSR_IA32_CR_PAT:
+ /*
+ * When nested NPT is enabled, L2 has a separate PAT from L1.
+ * Guest accesses to IA32_PAT while running L2 target L2's gPAT;
+ * host-initiated accesses always target L1's hPAT for backward
+ * and forward KVM_GET_MSRS compatibility with older kernels.
+ */
+ WARN_ON_ONCE(msr_info->host_initiated && vcpu->wants_to_run);
+ if (!msr_info->host_initiated && is_guest_mode(vcpu) &&
+ nested_npt_enabled(svm))
+ msr_info->data = svm->nested.gpat;
+ else
+ msr_info->data = vcpu->arch.pat;
+ break;
default:
return kvm_get_msr_common(vcpu, msr_info);
}
@@ -2935,13 +2949,21 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct
msr_data *msr)
break;
case MSR_IA32_CR_PAT:
- ret = kvm_set_msr_common(vcpu, msr);
- if (ret)
- break;
+ if (!kvm_pat_valid(data))
+ return 1;
- svm_set_vmcb_gpat(svm->vmcb01.ptr, data);
- if (is_guest_mode(vcpu))
- nested_vmcb02_compute_g_pat(svm);
+ /*
+ * When nested NPT is enabled, L2 has a separate PAT from L1.
+ * Guest accesses to IA32_PAT while running L2 target L2's gPAT;
+ * host-initiated accesses always target L1's hPAT for backward
+ * and forward KVM_SET_MSRS compatibility with older kernels.
+ */
+ WARN_ON_ONCE(msr->host_initiated && vcpu->wants_to_run);
+ if (!msr->host_initiated && is_guest_mode(vcpu) &&
+ nested_npt_enabled(svm))
+ svm_set_gpat(svm, data);
+ else
+ svm_set_hpat(svm, data);
break;
case MSR_IA32_SPEC_CTRL:
if (!msr->host_initiated &&
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 42a4bf83b3aa..a0e94a2c51a1 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -590,6 +590,22 @@ static inline bool nested_npt_enabled(struct vcpu_svm *svm)
return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE;
}
+static inline void svm_set_gpat(struct vcpu_svm *svm, u64 data)
+{
+ svm->nested.gpat = data;
+ svm_set_vmcb_gpat(svm->nested.vmcb02.ptr, data);
+}
+
+static inline void svm_set_hpat(struct vcpu_svm *svm, u64 data)
+{
+ svm->vcpu.arch.pat = data;
+ if (npt_enabled) {
+ svm_set_vmcb_gpat(svm->vmcb01.ptr, data);
+ if (is_guest_mode(&svm->vcpu) && !nested_npt_enabled(svm))
+ svm_set_vmcb_gpat(svm->nested.vmcb02.ptr, data);
+ }
+}
+
static inline bool nested_vnmi_enabled(struct vcpu_svm *svm)
{
return guest_cpu_cap_has(&svm->vcpu, X86_FEATURE_VNMI) &&
@@ -816,7 +832,6 @@ void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm,
void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm,
struct vmcb_save_area *save);
void nested_sync_control_from_vmcb02(struct vcpu_svm *svm);
-void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm);
void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb);
extern struct kvm_x86_nested_ops svm_nested_ops;
--
2.53.0.rc2.204.g2597b5adb4-goog