From: Isaila Alexandru <aisa...@bitdefender.com>

This patch adds access rights for the NPT pages. The access rights are
saved in bits 59:56 of pte that are manipulated through p2m_set_access()
and p2m_get_access() functions.
The patch follows the ept logic.

Note: It was tested with xen-access write

Signed-off-by: Alexandru Isaila <aisa...@bitdefender.com>
---
 xen/arch/x86/mm/mem_access.c      |  4 +-
 xen/arch/x86/mm/p2m-pt.c          | 85 ++++++++++++++++++++++++++++++++-------
 xen/include/asm-x86/mem_access.h  |  2 +-
 xen/include/asm-x86/x86_64/page.h |  9 +++++
 4 files changed, 84 insertions(+), 16 deletions(-)

diff --git a/xen/arch/x86/mm/mem_access.c b/xen/arch/x86/mm/mem_access.c
index c0cd017..6fb7809 100644
--- a/xen/arch/x86/mm/mem_access.c
+++ b/xen/arch/x86/mm/mem_access.c
@@ -221,7 +221,9 @@ bool p2m_mem_access_check(paddr_t gpa, unsigned long gla,
         {
             req->u.mem_access.flags |= MEM_ACCESS_GLA_VALID;
             req->u.mem_access.gla = gla;
-
+        }
+        if ( npfec.gla_valid || cpu_has_svm )
+        {
             if ( npfec.kind == npfec_kind_with_gla )
                 req->u.mem_access.flags |= MEM_ACCESS_FAULT_WITH_GLA;
             else if ( npfec.kind == npfec_kind_in_gpt )
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index b8c5d2e..28e6718 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -68,7 +68,8 @@
 static unsigned long p2m_type_to_flags(const struct p2m_domain *p2m,
                                        p2m_type_t t,
                                        mfn_t mfn,
-                                       unsigned int level)
+                                       unsigned int level,
+                                       p2m_access_t access)
 {
     unsigned long flags;
     /*
@@ -87,23 +88,28 @@ static unsigned long p2m_type_to_flags(const struct 
p2m_domain *p2m,
     case p2m_ram_paged:
     case p2m_ram_paging_in:
     default:
-        return flags | _PAGE_NX_BIT;
+        flags |= _PAGE_NX_BIT;
+        break;
     case p2m_grant_map_ro:
-        return flags | P2M_BASE_FLAGS | _PAGE_NX_BIT;
+        flags |= P2M_BASE_FLAGS | _PAGE_NX_BIT;
+        break;
     case p2m_ioreq_server:
         flags |= P2M_BASE_FLAGS | _PAGE_RW | _PAGE_NX_BIT;
         if ( p2m->ioreq.flags & XEN_DMOP_IOREQ_MEM_ACCESS_WRITE )
-            return flags & ~_PAGE_RW;
-        return flags;
+            flags &= ~_PAGE_RW;
+        break;
     case p2m_ram_ro:
     case p2m_ram_logdirty:
     case p2m_ram_shared:
-        return flags | P2M_BASE_FLAGS;
+        flags |= P2M_BASE_FLAGS;
+        break;
     case p2m_ram_rw:
-        return flags | P2M_BASE_FLAGS | _PAGE_RW;
+        flags |= P2M_BASE_FLAGS | _PAGE_RW;
+        break;
     case p2m_grant_map_rw:
     case p2m_map_foreign:
-        return flags | P2M_BASE_FLAGS | _PAGE_RW | _PAGE_NX_BIT;
+        flags |= P2M_BASE_FLAGS | _PAGE_RW | _PAGE_NX_BIT;
+        break;
     case p2m_mmio_direct:
         if ( !rangeset_contains_singleton(mmio_ro_ranges, mfn_x(mfn)) )
             flags |= _PAGE_RW;
@@ -112,8 +118,38 @@ static unsigned long p2m_type_to_flags(const struct 
p2m_domain *p2m,
             flags |= _PAGE_PWT;
             ASSERT(!level);
         }
-        return flags | P2M_BASE_FLAGS | _PAGE_PCD;
+        flags |= P2M_BASE_FLAGS | _PAGE_PCD;
+        break;
     }
+
+    switch (access)
+    {
+        case p2m_access_r:
+        case p2m_access_w:
+            flags |= _PAGE_NX_BIT;
+            flags &= ~_PAGE_RW;
+            break;
+        case p2m_access_rw:
+            flags |= _PAGE_NX_BIT;
+            break;
+        case p2m_access_n:
+        case p2m_access_n2rwx:
+            flags |= _PAGE_NX_BIT;
+            flags &= ~_PAGE_RW;
+            break;
+        case p2m_access_rx:
+        case p2m_access_wx:
+        case p2m_access_rx2rw:
+            flags &= ~(_PAGE_NX_BIT | _PAGE_RW);
+            break;
+        case p2m_access_x:
+            flags &= ~_PAGE_RW;
+            break;
+        case p2m_access_rwx:
+        default:
+            break;
+    }
+    return flags;
 }
 
 
@@ -174,6 +210,17 @@ static void p2m_add_iommu_flags(l1_pgentry_t *p2m_entry,
         l1e_add_flags(*p2m_entry, iommu_nlevel_to_flags(nlevel, flags));
 }
 
+void p2m_set_access(intpte_t *entry, p2m_access_t access)
+{
+    *entry = (*entry & ~PAGE_ACCESS_BITFIELD_MASK) |
+        ((access & PAGE_ACCESS_MASK) << PAGE_ACCESS_START);
+}
+
+p2m_access_t p2m_get_access(intpte_t entry)
+{
+    return (p2m_access_t)((entry >> PAGE_ACCESS_START) & PAGE_ACCESS_MASK);
+}
+
 /* Returns: 0 for success, -errno for failure */
 static int
 p2m_next_level(struct p2m_domain *p2m, void **table,
@@ -201,6 +248,7 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
         new_entry = l1e_from_mfn(mfn, P2M_BASE_FLAGS | _PAGE_RW);
 
         p2m_add_iommu_flags(&new_entry, level, 
IOMMUF_readable|IOMMUF_writable);
+        p2m_set_access(&new_entry.l1, p2m->default_access);
         p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, level + 1);
     }
     else if ( flags & _PAGE_PSE )
@@ -249,6 +297,7 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
         {
             new_entry = l1e_from_pfn(pfn | (i << ((level - 1) * 
PAGETABLE_ORDER)),
                                      flags);
+            p2m_set_access(&new_entry.l1, p2m->default_access);
             p2m->write_p2m_entry(p2m, gfn, l1_entry + i, new_entry, level);
         }
 
@@ -256,6 +305,7 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
 
         new_entry = l1e_from_mfn(mfn, P2M_BASE_FLAGS | _PAGE_RW);
         p2m_add_iommu_flags(&new_entry, level, 
IOMMUF_readable|IOMMUF_writable);
+        p2m_set_access(&new_entry.l1, p2m->default_access);
         p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, level + 1);
     }
     else
@@ -420,8 +470,9 @@ static int do_recalc(struct p2m_domain *p2m, unsigned long 
gfn)
         if ( nt != ot )
         {
             unsigned long mfn = l1e_get_pfn(e);
+            p2m_access_t a = p2m_get_access(e.l1);
             unsigned long flags = p2m_type_to_flags(p2m, nt,
-                                                    _mfn(mfn), level);
+                                                    _mfn(mfn), level, a);
 
             if ( level )
             {
@@ -569,13 +620,14 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, 
mfn_t mfn,
         ASSERT(!mfn_valid(mfn) || p2mt != p2m_mmio_direct);
         l3e_content = mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt)
             ? p2m_l3e_from_pfn(mfn_x(mfn),
-                               p2m_type_to_flags(p2m, p2mt, mfn, 2))
+                               p2m_type_to_flags(p2m, p2mt, mfn, 2, p2ma))
             : l3e_empty();
         entry_content.l1 = l3e_content.l3;
 
         if ( entry_content.l1 != 0 )
             p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags);
 
+        p2m_set_access(&entry_content.l1, p2ma);
         p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 3);
         /* NB: paging_write_p2m_entry() handles tlb flushes properly */
     }
@@ -608,7 +660,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t 
mfn,
 
         if ( mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt) )
             entry_content = p2m_l1e_from_pfn(mfn_x(mfn),
-                                         p2m_type_to_flags(p2m, p2mt, mfn, 0));
+                                         p2m_type_to_flags(p2m, p2mt, mfn, 0, 
p2ma));
         else
             entry_content = l1e_empty();
 
@@ -630,6 +682,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t 
mfn,
             p2m->ioreq.entry_count--;
         }
 
+        p2m_set_access(&entry_content.l1, p2ma);
         /* level 1 entry */
         p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 1);
         /* NB: paging_write_p2m_entry() handles tlb flushes properly */
@@ -661,13 +714,14 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, 
mfn_t mfn,
         ASSERT(!mfn_valid(mfn) || p2mt != p2m_mmio_direct);
         l2e_content = mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt)
             ? p2m_l2e_from_pfn(mfn_x(mfn),
-                               p2m_type_to_flags(p2m, p2mt, mfn, 1))
+                               p2m_type_to_flags(p2m, p2mt, mfn, 1, p2ma))
             : l2e_empty();
         entry_content.l1 = l2e_content.l2;
 
         if ( entry_content.l1 != 0 )
             p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags);
 
+        p2m_set_access(&entry_content.l1, p2ma);
         p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 2);
         /* NB: paging_write_p2m_entry() handles tlb flushes properly */
     }
@@ -750,7 +804,7 @@ p2m_pt_get_entry(struct p2m_domain *p2m, gfn_t gfn_,
      * XXX we will return p2m_invalid for unmapped gfns */
     *t = p2m_mmio_dm;
     /* Not implemented except with EPT */
-    *a = p2m_access_rwx; 
+    *a = p2m_access_n;
 
     if ( gfn > p2m->max_mapped_pfn )
     {
@@ -813,6 +867,7 @@ pod_retry_l3:
                        l1_table_offset(addr));
             *t = p2m_recalc_type(recalc || _needs_recalc(flags),
                                  p2m_flags_to_type(flags), p2m, gfn);
+            *a = p2m_get_access(l3e->l3);
             unmap_domain_page(l3e);
 
             ASSERT(mfn_valid(mfn) || !p2m_is_ram(*t));
@@ -852,6 +907,7 @@ pod_retry_l2:
         mfn = _mfn(l2e_get_pfn(*l2e) + l1_table_offset(addr));
         *t = p2m_recalc_type(recalc || _needs_recalc(flags),
                              p2m_flags_to_type(flags), p2m, gfn);
+        *a = p2m_get_access(l2e->l2);
         unmap_domain_page(l2e);
         
         ASSERT(mfn_valid(mfn) || !p2m_is_ram(*t));
@@ -888,6 +944,7 @@ pod_retry_l1:
     }
     mfn = l1e_get_mfn(*l1e);
     *t = p2m_recalc_type(recalc || _needs_recalc(flags), l1t, p2m, gfn);
+    *a = p2m_get_access(l1e->l1);
     unmap_domain_page(l1e);
 
     ASSERT(mfn_valid(mfn) || !p2m_is_ram(*t) || p2m_is_paging(*t));
diff --git a/xen/include/asm-x86/mem_access.h b/xen/include/asm-x86/mem_access.h
index 4043c9f..34f2c07 100644
--- a/xen/include/asm-x86/mem_access.h
+++ b/xen/include/asm-x86/mem_access.h
@@ -46,7 +46,7 @@ bool p2m_mem_access_emulate_check(struct vcpu *v,
 /* Sanity check for mem_access hardware support */
 static inline bool p2m_mem_access_sanity_check(struct domain *d)
 {
-    return is_hvm_domain(d) && cpu_has_vmx && hap_enabled(d);
+    return is_hvm_domain(d) && hap_enabled(d);
 }
 
 #endif /*__ASM_X86_MEM_ACCESS_H__ */
diff --git a/xen/include/asm-x86/x86_64/page.h 
b/xen/include/asm-x86/x86_64/page.h
index 05a0334..d5f5418 100644
--- a/xen/include/asm-x86/x86_64/page.h
+++ b/xen/include/asm-x86/x86_64/page.h
@@ -159,6 +159,15 @@ static inline intpte_t put_pte_flags(unsigned int x)
  */
 #define _PAGE_GUEST_KERNEL (1U<<12)
 
+/*
+ * Bits 19:16 of a 24-bit flag mask are used to store p2m_access_t.
+ * This corresponds to bits 59:56 of a pte
+ */
+#define PAGE_ACCESS_START          59
+#define PAGE_ACCESS_LEN            4
+#define PAGE_ACCESS_MASK           ((1UL << PAGE_ACCESS_LEN) - 1)
+#define PAGE_ACCESS_BITFIELD_MASK  (PAGE_ACCESS_MASK << PAGE_ACCESS_START)
+
 #define PAGE_HYPERVISOR_RO      (__PAGE_HYPERVISOR_RO      | _PAGE_GLOBAL)
 #define PAGE_HYPERVISOR_RW      (__PAGE_HYPERVISOR_RW      | _PAGE_GLOBAL)
 #define PAGE_HYPERVISOR_RX      (__PAGE_HYPERVISOR_RX      | _PAGE_GLOBAL)
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

Reply via email to