On Fri, Feb 13, 2026, Jim Mattson wrote:
> On Fri, Feb 13, 2026 at 2:19 PM Sean Christopherson <[email protected]> wrote:
> > > > > > +           vmcb_set_gpat(svm->vmcb01.ptr, data);
> > > > > > +           if (is_guest_mode(&svm->vcpu) && 
> > > > > > !nested_npt_enabled(svm))
> > > > > > +                   vmcb_set_gpat(svm->nested.vmcb02.ptr, data);
> > > > > > +   }
> > > > > > +}
> > > > >
> > > > > Is it me, or is it a bit confusing that svm_set_gpat() sets L2's gPAT
> > > > > not L1's, and svm_set_hpat() calls vmcb_set_gpat()?
> > > >
> > > > It's not just you.  I don't find it confusing per se, more that it's 
> > > > really
> > > > subtle.
> > > >
> > > > > "gpat" means different things in the context of the VMCB or otherwise,
> > > > > which kinda makes sense but is also not super clear. Maybe
> > > > > svm_set_l1_gpat() and svm_set_l2_gpat() is more clear?
> > > >
> > > > I think just svm_set_l1_pat() and svm_set_l2_pat(), because gpat 
> > > > straight up
> > > > doesn't exist when NPT is disabled/unsupported.
> > >
> > > My intention was that "gpat" and "hpat" were from the perspective of the 
> > > vCPU.
> > >
> > > I dislike svm_set_l1_pat() and svm_set_l2_pat(). As you point out
> > > above, there is no independent L2 PAT when nested NPT is disabled. I
> > > think that's less obvious than the fact that there is no gPAT from the
> > > vCPU's perspective. My preference is to follow the APM terminology
> > > when possible. Making up our own terms just leads to confusion.
> >
> > How about svm_set_pat() and svm_get_gpat()?  Because hPAT doesn't exist 
> > when NPT
> > is unsupported/disabled, but KVM still needs to set the vCPU's emulated PAT 
> > value.
> 
> What if we don't break it up this way at all? Instead of distributing
> the logic between svm_[gs]set_msr() and a few helper functions, we
> could just have svm_[gs]et_msr() call svm_[gs]et_pat(), and all of the
> logic can go in these two functions.

I like it.  And AFAICT it largely Just Works, because the calls from
svm_set_nested_state() will always be routed to gpat since the calls are already
guarded with is_guest_mode() + nested_npt_enabled().

Side topic, either as a prep patch (to duplicate code) or as a follow-up patch
(to move the PAT handling in x86.c to vmx.c), the "common" handling of PAT 
should
be fully forked between VMX and SVM.  As of this patch, it's not just 
misleading,
it's actively dangerous since calling kvm_get_msr_common() for SVM would get the
wrong value.

FWIW, this is what I ended up with when hacking on top of your patches to see 
how
this played out.

---
 arch/x86/kvm/svm/nested.c |  4 +--
 arch/x86/kvm/svm/svm.c    | 64 +++++++++++++++++++++++++--------------
 arch/x86/kvm/svm/svm.h    | 19 +-----------
 arch/x86/kvm/vmx/vmx.c    | 10 ++++--
 arch/x86/kvm/x86.c        |  9 ------
 5 files changed, 51 insertions(+), 55 deletions(-)

diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index d854d29b0bd8..361f189d3967 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -2075,9 +2075,9 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
 
        if (nested_npt_enabled(svm)) {
                if (kvm_state->hdr.svm.flags & KVM_STATE_SVM_VALID_GPAT) {
-                       svm_set_gpat(svm, kvm_state->hdr.svm.gpat);
+                       svm_set_pat(vcpu, kvm_state->hdr.svm.gpat, true);
                } else {
-                       svm_set_gpat(svm, vcpu->arch.pat);
+                       svm_set_pat(vcpu, vcpu->arch.pat, true);
                        svm->nested.legacy_gpat_semantics = true;
                }
        }
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 93ce0c3232c6..94c3b3cadd54 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -251,6 +251,44 @@ int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
        return 0;
 }
 
+static bool svm_is_access_to_gpat(struct kvm_vcpu *vcpu, bool host_initiated)
+{
+       /*
+        * When nested NPT is enabled, L2 has a separate PAT from L1.  Guest
+        * accesses to IA32_PAT while running L2 target L2's gPAT;
+        * host-initiated accesses always target L1's hPAT for backward and
+        * forward KVM_SET_MSRS compatibility with older kernels.
+        */
+       WARN_ON_ONCE(host_initiated && vcpu->wants_to_run);
+
+       return !host_initiated && is_guest_mode(vcpu) &&
+              nested_npt_enabled(to_svm(vcpu));
+}
+
+void svm_set_pat(struct kvm_vcpu *vcpu, u64 pat, bool host_initiated)
+{
+       struct vcpu_svm *svm = to_svm(vcpu);
+
+       if (svm_is_access_to_gpat(vcpu, host_initiated)) {
+               vmcb_set_gpat(svm->nested.vmcb02.ptr, pat);
+               return;
+       }
+
+       svm->vcpu.arch.pat = pat;
+
+       if (!npt_enabled)
+               return;
+
+       vmcb_set_gpat(svm->vmcb01.ptr, pat);
+
+       if (svm->nested.legacy_gpat_semantics) {
+               svm->nested.save.g_pat = pat;
+               vmcb_set_gpat(svm->nested.vmcb02.ptr, pat);
+       } else if (is_guest_mode(&svm->vcpu) && !nested_npt_enabled(svm)) {
+               vmcb_set_gpat(svm->nested.vmcb02.ptr, pat);
+       }
+}
+
 static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
@@ -2838,16 +2876,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct 
msr_data *msr_info)
                msr_info->data = svm->msr_decfg;
                break;
        case MSR_IA32_CR_PAT:
-               /*
-                * When nested NPT is enabled, L2 has a separate PAT from
-                * L1.  Guest accesses to IA32_PAT while running L2 target
-                * L2's gPAT; host-initiated accesses always target L1's
-                * hPAT for backward and forward KVM_GET_MSRS compatibility
-                * with older kernels.
-                */
-               WARN_ON_ONCE(msr_info->host_initiated && vcpu->wants_to_run);
-               if (!msr_info->host_initiated && is_guest_mode(vcpu) &&
-                   nested_npt_enabled(svm))
+               if (svm_is_access_to_gpat(vcpu, msr_info->host_initiated))
                        msr_info->data = svm->nested.save.g_pat;
                else
                        msr_info->data = vcpu->arch.pat;
@@ -2937,19 +2966,8 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct 
msr_data *msr)
        case MSR_IA32_CR_PAT:
                if (!kvm_pat_valid(data))
                        return 1;
-               /*
-                * When nested NPT is enabled, L2 has a separate PAT from
-                * L1.  Guest accesses to IA32_PAT while running L2 target
-                * L2's gPAT; host-initiated accesses always target L1's
-                * hPAT for backward and forward KVM_SET_MSRS compatibility
-                * with older kernels.
-                */
-               WARN_ON_ONCE(msr->host_initiated && vcpu->wants_to_run);
-               if (!msr->host_initiated && is_guest_mode(vcpu) &&
-                   nested_npt_enabled(svm))
-                       svm_set_gpat(svm, data);
-               else
-                       svm_set_hpat(svm, data);
+
+               svm_set_pat(vcpu, data, msr->host_initiated);
                break;
        case MSR_IA32_SPEC_CTRL:
                if (!msr->host_initiated &&
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 0bb9fdcb489d..71502db3f679 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -616,24 +616,6 @@ static inline bool nested_npt_enabled(struct vcpu_svm *svm)
        return svm->nested.ctl.misc_ctl & SVM_MISC_ENABLE_NP;
 }
 
-static inline void svm_set_gpat(struct vcpu_svm *svm, u64 data)
-{
-       svm->nested.save.g_pat = data;
-       vmcb_set_gpat(svm->nested.vmcb02.ptr, data);
-}
-
-static inline void svm_set_hpat(struct vcpu_svm *svm, u64 data)
-{
-       svm->vcpu.arch.pat = data;
-       if (npt_enabled) {
-               vmcb_set_gpat(svm->vmcb01.ptr, data);
-               if (is_guest_mode(&svm->vcpu) && !nested_npt_enabled(svm))
-                       vmcb_set_gpat(svm->nested.vmcb02.ptr, data);
-       }
-       if (svm->nested.legacy_gpat_semantics)
-               svm_set_gpat(svm, data);
-}
-
 static inline bool nested_vnmi_enabled(struct vcpu_svm *svm)
 {
        return guest_cpu_cap_has(&svm->vcpu, X86_FEATURE_VNMI) &&
@@ -780,6 +762,7 @@ void svm_enable_lbrv(struct kvm_vcpu *vcpu);
 void svm_update_lbrv(struct kvm_vcpu *vcpu);
 
 int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
+void svm_set_pat(struct kvm_vcpu *vcpu, u64 pat, bool host_initiated);
 void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
 void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
 void disable_nmi_singlestep(struct vcpu_svm *svm);
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 967b58a8ab9d..546056e690eb 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -2141,6 +2141,9 @@ int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data 
*msr_info)
 #endif
        case MSR_EFER:
                return kvm_get_msr_common(vcpu, msr_info);
+       case MSR_IA32_CR_PAT:
+               msr_info->data = vcpu->arch.pat;
+               break;
        case MSR_IA32_TSX_CTRL:
                if (!msr_info->host_initiated &&
                    !(vcpu->arch.arch_capabilities & ARCH_CAP_TSX_CTRL_MSR))
@@ -2468,9 +2471,10 @@ int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data 
*msr_info)
                        return 1;
                goto find_uret_msr;
        case MSR_IA32_CR_PAT:
-               ret = kvm_set_msr_common(vcpu, msr_info);
-               if (ret)
-                       break;
+               if (!kvm_pat_valid(data))
+                       return 1;
+
+               vcpu->arch.pat = data;
 
                if (is_guest_mode(vcpu) &&
                    get_vmcs12(vcpu)->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 416899b5dbe4..41936f83a17f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4025,12 +4025,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct 
msr_data *msr_info)
                        return 1;
                }
                break;
-       case MSR_IA32_CR_PAT:
-               if (!kvm_pat_valid(data))
-                       return 1;
-
-               vcpu->arch.pat = data;
-               break;
        case MTRRphysBase_MSR(0) ... MSR_MTRRfix4K_F8000:
        case MSR_MTRRdefType:
                return kvm_mtrr_set_msr(vcpu, msr, data);
@@ -4436,9 +4430,6 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct 
msr_data *msr_info)
                msr_info->data = kvm_scale_tsc(rdtsc(), ratio) + offset;
                break;
        }
-       case MSR_IA32_CR_PAT:
-               msr_info->data = vcpu->arch.pat;
-               break;
        case MSR_MTRRcap:
        case MTRRphysBase_MSR(0) ... MSR_MTRRfix4K_F8000:
        case MSR_MTRRdefType:

base-commit: 7539434a6984ba5accfdd8e296fb834558f95df4
--

Reply via email to