On 3/12/24 08:54, Jan Beulich wrote:
On 11.03.2024 13:40, Vaishali Thakkar wrote:
--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -571,7 +571,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct 
cpu_user_regs *regs)
      if ( nestedhvm_paging_mode_hap(v) )
      {
          /* host nested paging + guest nested paging. */
-        n2vmcb->_np_enable = 1;
+        n2vmcb->_np = true;
nestedsvm_vmcb_set_nestedp2m(v, ns_vmcb, n2vmcb); @@ -585,7 +585,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs)
      else if ( paging_mode_hap(v->domain) )
      {
          /* host nested paging + guest shadow paging. */
-        n2vmcb->_np_enable = 1;
+        n2vmcb->_np = true;
          /* Keep h_cr3 as it is. */
          n2vmcb->_h_cr3 = n1vmcb->_h_cr3;
          /* When l1 guest does shadow paging
@@ -601,7 +601,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct 
cpu_user_regs *regs)
      else
      {
          /* host shadow paging + guest shadow paging. */
-        n2vmcb->_np_enable = 0;
+        n2vmcb->_np = false;
          n2vmcb->_h_cr3 = 0x0;
/* TODO: Once shadow-shadow paging is in place come back to here
@@ -706,7 +706,7 @@ nsvm_vcpu_vmentry(struct vcpu *v, struct cpu_user_regs 
*regs,
      }
/* nested paging for the guest */
-    svm->ns_hap_enabled = !!ns_vmcb->_np_enable;
+    svm->ns_hap_enabled = ns_vmcb->_np;
/* Remember the V_INTR_MASK in hostflags */
      svm->ns_hostflags.fields.vintrmask = 
!!ns_vmcb->_vintr.fields.intr_masking;
@@ -1084,7 +1084,7 @@ nsvm_vmcb_prepare4vmexit(struct vcpu *v, struct 
cpu_user_regs *regs)
      if ( nestedhvm_paging_mode_hap(v) )
      {
          /* host nested paging + guest nested paging. */
-        ns_vmcb->_np_enable = n2vmcb->_np_enable;
+        ns_vmcb->_np = n2vmcb->_np;
          ns_vmcb->_cr3 = n2vmcb->_cr3;
          /* The vmcb->h_cr3 is the shadowed h_cr3. The original
           * unshadowed guest h_cr3 is kept in ns_vmcb->h_cr3,
@@ -1093,7 +1093,7 @@ nsvm_vmcb_prepare4vmexit(struct vcpu *v, struct 
cpu_user_regs *regs)
      else if ( paging_mode_hap(v->domain) )
      {
          /* host nested paging + guest shadow paging. */
-        ns_vmcb->_np_enable = 0;
+        ns_vmcb->_np = false;
          /* Throw h_cr3 away. Guest is not allowed to set it or
           * it can break out, otherwise (security hole!) */
          ns_vmcb->_h_cr3 = 0x0;
@@ -1104,7 +1104,7 @@ nsvm_vmcb_prepare4vmexit(struct vcpu *v, struct 
cpu_user_regs *regs)
      else
      {
          /* host shadow paging + guest shadow paging. */
-        ns_vmcb->_np_enable = 0;
+        ns_vmcb->_np = false;
          ns_vmcb->_h_cr3 = 0x0;
          /* The vmcb->_cr3 is the shadowed cr3. The original
           * unshadowed guest cr3 is kept in ns_vmcb->_cr3,

While spotting the small issue below it occurred to me: Why is it that
vmcb_set_...() is open-coded everywhere here? I think this would be
pretty nice to avoid at the same time (for lines touched anyway, or in
a separate prereq patch, or alternatively [and only ideally] for all
other instances in a follow-on patch). Thoughts?

Yes, I noticed this too. My plan was to send a followup patch for
fixing all the instances where vmcb_set/get_...() can be used.
There are bunch of other vmcb bits (apart from the ones being
handled in this patchset) in this file and in svm.c which can
benefit from using VMCB accessors.

--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -473,7 +473,7 @@ static int svm_vmcb_restore(struct vcpu *v, struct 
hvm_hw_cpu *c)
if ( paging_mode_hap(v->domain) )
      {
-        vmcb_set_np_enable(vmcb, 1);
+        vmcb_set_np(vmcb, 1);

No switching to "true" here? (If the answer to the other question is
"No" for whatever reason, I'd nevertheless like to see this on adjusted,
which could then be done while committing.)

Sorry, I missed this instance. I'll fix it if I'll need to send another
revised patchset for some other fixes (based on review comments), else
feel free to fix it while committing. Thanks.

Jan

Reply via email to