Re: [PATCH] kvm-unit-tests: VMX: Add the framework of EPT

2013-09-04 Thread Arthur Chunqi Li
Hi Xiao Guangrong, Jun Nakajima, Yang Zhang, Gleb and Paolo,

If you have any ideas of how and which aspects should nested EPT be
tested, please tell me and I will write relevant test cases. Besides,
I'm so happy if you can help me review this patch or propose other
suggestions.

Thanks very mush,
Arthur

On Mon, Sep 2, 2013 at 5:38 PM, Arthur Chunqi Li yzt...@gmail.com wrote:
 There must have some minor revisions to be done in this patch, so this
 is mainly a RFC mail.

 Besides, I'm not quite clear what we should test in nested EPT
 modules, and I bet writers of nested EPT must have ideas to continue
 and refine this testing part. Any suggestions of which part and how to
 test nested EPT is welcome.

 Please help me CC EPT-related guys if anyone knows.

 Thanks,
 Arthur

 On Mon, Sep 2, 2013 at 5:26 PM, Arthur Chunqi Li yzt...@gmail.com wrote:
 Add a framework of EPT in nested VMX testing, including a set of
 functions to construct and read EPT paging structures and a simple
 read/write test of EPT remapping from guest to host.

 Signed-off-by: Arthur Chunqi Li yzt...@gmail.com
 ---
  x86/vmx.c   |  132 --
  x86/vmx.h   |   76 +++
  x86/vmx_tests.c |  156 
 +++
  3 files changed, 360 insertions(+), 4 deletions(-)

 diff --git a/x86/vmx.c b/x86/vmx.c
 index ca36d35..a156b71 100644
 --- a/x86/vmx.c
 +++ b/x86/vmx.c
 @@ -143,6 +143,132 @@ asm(
call hypercall\n\t
  );

 +/* EPT paging structure related functions */
 +/* install_ept_entry : Install a page to a given level in EPT
 +   @pml4 : addr of pml4 table
 +   @pte_level : level of PTE to set
 +   @guest_addr : physical address of guest
 +   @pte : pte value to set
 +   @pt_page : address of page table, NULL for a new page
 + */
 +void install_ept_entry(unsigned long *pml4,
 +   int pte_level,
 +   unsigned long guest_addr,
 +   unsigned long pte,
 +   unsigned long *pt_page)
 +{
 +   int level;
 +   unsigned long *pt = pml4;
 +   unsigned offset;
 +
 +   for (level = EPT_PAGE_LEVEL; level  pte_level; --level) {
 +   offset = (guest_addr  ((level-1) * EPT_PGDIR_WIDTH + 12))
 +EPT_PGDIR_MASK;
 +   if (!(pt[offset]  (EPT_RA | EPT_WA | EPT_EA))) {
 +   unsigned long *new_pt = pt_page;
 +   if (!new_pt)
 +   new_pt = alloc_page();
 +   else
 +   pt_page = 0;
 +   memset(new_pt, 0, PAGE_SIZE);
 +   pt[offset] = virt_to_phys(new_pt)
 +   | EPT_RA | EPT_WA | EPT_EA;
 +   }
 +   pt = phys_to_virt(pt[offset]  0xff000ull);
 +   }
 +   offset = ((unsigned long)guest_addr  ((level-1) *
 +   EPT_PGDIR_WIDTH + 12))  EPT_PGDIR_MASK;
 +   pt[offset] = pte;
 +}
 +
 +/* Map a page, @perm is the permission of the page */
 +void install_ept(unsigned long *pml4,
 +   unsigned long phys,
 +   unsigned long guest_addr,
 +   u64 perm)
 +{
 +   install_ept_entry(pml4, 1, guest_addr, (phys  PAGE_MASK) | perm, 0);
 +}
 +
 +/* Map a 1G-size page */
 +void install_1g_ept(unsigned long *pml4,
 +   unsigned long phys,
 +   unsigned long guest_addr,
 +   u64 perm)
 +{
 +   install_ept_entry(pml4, 3, guest_addr,
 +   (phys  PAGE_MASK) | perm | EPT_LARGE_PAGE, 0);
 +}
 +
 +/* Map a 2M-size page */
 +void install_2m_ept(unsigned long *pml4,
 +   unsigned long phys,
 +   unsigned long guest_addr,
 +   u64 perm)
 +{
 +   install_ept_entry(pml4, 2, guest_addr,
 +   (phys  PAGE_MASK) | perm | EPT_LARGE_PAGE, 0);
 +}
 +
 +/* setup_ept_range : Setup a range of 1:1 mapped page to EPT paging 
 structure.
 +   @start : start address of guest page
 +   @len : length of address to be mapped
 +   @map_1g : whether 1G page map is used
 +   @map_2m : whether 2M page map is used
 +   @perm : permission for every page
 + */
 +int setup_ept_range(unsigned long *pml4, unsigned long start,
 +   unsigned long len, int map_1g, int map_2m, u64 perm)
 +{
 +   u64 phys = start;
 +   u64 max = (u64)len + (u64)start;
 +
 +   if (map_1g) {
 +   while (phys + PAGE_SIZE_1G = max) {
 +   install_1g_ept(pml4, phys, phys, perm);
 +   phys += PAGE_SIZE_1G;
 +   }
 +   }
 +   if (map_2m) {
 +   while (phys + PAGE_SIZE_2M = max) {
 +   install_2m_ept(pml4, phys, phys, perm);
 +   phys += 

Re: [PATCH] kvm-unit-tests: VMX: Add the framework of EPT

2013-09-02 Thread Arthur Chunqi Li
There must have some minor revisions to be done in this patch, so this
is mainly a RFC mail.

Besides, I'm not quite clear what we should test in nested EPT
modules, and I bet writers of nested EPT must have ideas to continue
and refine this testing part. Any suggestions of which part and how to
test nested EPT is welcome.

Please help me CC EPT-related guys if anyone knows.

Thanks,
Arthur

On Mon, Sep 2, 2013 at 5:26 PM, Arthur Chunqi Li yzt...@gmail.com wrote:
 Add a framework of EPT in nested VMX testing, including a set of
 functions to construct and read EPT paging structures and a simple
 read/write test of EPT remapping from guest to host.

 Signed-off-by: Arthur Chunqi Li yzt...@gmail.com
 ---
  x86/vmx.c   |  132 --
  x86/vmx.h   |   76 +++
  x86/vmx_tests.c |  156 
 +++
  3 files changed, 360 insertions(+), 4 deletions(-)

 diff --git a/x86/vmx.c b/x86/vmx.c
 index ca36d35..a156b71 100644
 --- a/x86/vmx.c
 +++ b/x86/vmx.c
 @@ -143,6 +143,132 @@ asm(
call hypercall\n\t
  );

 +/* EPT paging structure related functions */
 +/* install_ept_entry : Install a page to a given level in EPT
 +   @pml4 : addr of pml4 table
 +   @pte_level : level of PTE to set
 +   @guest_addr : physical address of guest
 +   @pte : pte value to set
 +   @pt_page : address of page table, NULL for a new page
 + */
 +void install_ept_entry(unsigned long *pml4,
 +   int pte_level,
 +   unsigned long guest_addr,
 +   unsigned long pte,
 +   unsigned long *pt_page)
 +{
 +   int level;
 +   unsigned long *pt = pml4;
 +   unsigned offset;
 +
 +   for (level = EPT_PAGE_LEVEL; level  pte_level; --level) {
 +   offset = (guest_addr  ((level-1) * EPT_PGDIR_WIDTH + 12))
 +EPT_PGDIR_MASK;
 +   if (!(pt[offset]  (EPT_RA | EPT_WA | EPT_EA))) {
 +   unsigned long *new_pt = pt_page;
 +   if (!new_pt)
 +   new_pt = alloc_page();
 +   else
 +   pt_page = 0;
 +   memset(new_pt, 0, PAGE_SIZE);
 +   pt[offset] = virt_to_phys(new_pt)
 +   | EPT_RA | EPT_WA | EPT_EA;
 +   }
 +   pt = phys_to_virt(pt[offset]  0xff000ull);
 +   }
 +   offset = ((unsigned long)guest_addr  ((level-1) *
 +   EPT_PGDIR_WIDTH + 12))  EPT_PGDIR_MASK;
 +   pt[offset] = pte;
 +}
 +
 +/* Map a page, @perm is the permission of the page */
 +void install_ept(unsigned long *pml4,
 +   unsigned long phys,
 +   unsigned long guest_addr,
 +   u64 perm)
 +{
 +   install_ept_entry(pml4, 1, guest_addr, (phys  PAGE_MASK) | perm, 0);
 +}
 +
 +/* Map a 1G-size page */
 +void install_1g_ept(unsigned long *pml4,
 +   unsigned long phys,
 +   unsigned long guest_addr,
 +   u64 perm)
 +{
 +   install_ept_entry(pml4, 3, guest_addr,
 +   (phys  PAGE_MASK) | perm | EPT_LARGE_PAGE, 0);
 +}
 +
 +/* Map a 2M-size page */
 +void install_2m_ept(unsigned long *pml4,
 +   unsigned long phys,
 +   unsigned long guest_addr,
 +   u64 perm)
 +{
 +   install_ept_entry(pml4, 2, guest_addr,
 +   (phys  PAGE_MASK) | perm | EPT_LARGE_PAGE, 0);
 +}
 +
 +/* setup_ept_range : Setup a range of 1:1 mapped page to EPT paging 
 structure.
 +   @start : start address of guest page
 +   @len : length of address to be mapped
 +   @map_1g : whether 1G page map is used
 +   @map_2m : whether 2M page map is used
 +   @perm : permission for every page
 + */
 +int setup_ept_range(unsigned long *pml4, unsigned long start,
 +   unsigned long len, int map_1g, int map_2m, u64 perm)
 +{
 +   u64 phys = start;
 +   u64 max = (u64)len + (u64)start;
 +
 +   if (map_1g) {
 +   while (phys + PAGE_SIZE_1G = max) {
 +   install_1g_ept(pml4, phys, phys, perm);
 +   phys += PAGE_SIZE_1G;
 +   }
 +   }
 +   if (map_2m) {
 +   while (phys + PAGE_SIZE_2M = max) {
 +   install_2m_ept(pml4, phys, phys, perm);
 +   phys += PAGE_SIZE_2M;
 +   }
 +   }
 +   while (phys + PAGE_SIZE = max) {
 +   install_ept(pml4, phys, phys, perm);
 +   phys += PAGE_SIZE;
 +   }
 +   return 0;
 +}
 +
 +/* get_ept_pte : Get the PTE of a given level in EPT,
 +@level == 1 means get the latest level*/
 +unsigned long get_ept_pte(unsigned long *pml4,
 +   unsigned