RE: [PATCH V2 10/13] x86/hyper-v: Add HvFlushGuestAddressList hypercall support

2018-09-19 Thread Michael Kelley (EOSG)
From: Tianyu Lan  Sent: Monday, September 17, 2018 8:19 PM
>
>  #include 
>  #include 
>  #include 
>  #include 
> +#include 

Hopefully asm/kvm_host.h does not need to be #included, given
the new code structure.

> 
>  #include 
> 
> +/*
> + *  MAX_FLUSH_PAGES = "additional_pages" + 1. It's limited
> + *  by the bitwidth of "additional_pages" in union hv_gpa_page_range.
> + */
> +#define MAX_FLUSH_PAGES (2048)
> +
> +/*
> + * All input flush parameters are in single page. The max flush count
> + * is equal with how many entries of union hv_gpa_page_range can be
> + * populated in the input parameter page. MAX_FLUSH_REP_COUNT
> + * = (4096 - 16) / 8. (“Page Size” - "Address Space" - "Flags") /
> + * "GPA Range".
> + */
> +#define MAX_FLUSH_REP_COUNT (510)
> +

I would recommend putting the above two definitions in
hyperv-tlfs.h.  They are directly tied to the data structures defined
by Hyper-V in the TLFS.  Put MAX_FLUSH_PAGES immediately after
the definition for hv_gpa_page_range so that the dependency is
obvious.

For MAX_FLUSH_REP_COUNT, can you do the calculation in
the #define rather than just in the comment?  Alternatively, define
the gpa_list[] array to be of MAX_FLUSH_REP_COUNT size, and then
add a compile time assert that the size of struct
hv_guest_mapping_flush_list is exactly one page in size.   It's just
a good way to use the compiler to help check for mistakes.

Also prefix them both with HV_ since they will be more
globally visible as part of hyperv-tlfs.h.

>  int hyperv_flush_guest_mapping(u64 as)
>  {
>   struct hv_guest_mapping_flush **flush_pcpu;
> @@ -54,3 +71,89 @@ int hyperv_flush_guest_mapping(u64 as)
>   return ret;
>  }
>  EXPORT_SYMBOL_GPL(hyperv_flush_guest_mapping);
> +
> +static int fill_flush_list(union hv_gpa_page_range gpa_list[],
> + int offset, u64 start_gfn, u64 pages)
> +{
> + int gpa_n = offset;
> + u64 cur = start_gfn;
> + u64 additional_pages;
> +
> + do {
> + if (gpa_n >= MAX_FLUSH_REP_COUNT) {
> + pr_warn("Request exceeds HvFlushGuestList max flush 
> count.");
> + return -ENOSPC;

I wonder if the warning is really needed.  When the error is returned up
through the higher levels of code, won't the higher levels just fallback to
the non-enlightened flush code?  So nothing is actually goes wrong; it's just
that a slower code path gets taken.  A comment about such expectations
might be helpful.

> + }
> +
> + if (pages > MAX_FLUSH_PAGES) {
> + additional_pages = MAX_FLUSH_PAGES - 1;
> + pages -= MAX_FLUSH_PAGES;
> + } else {
> + additional_pages = pages - 1;
> + pages = 0;
> + }

The above code is really doing:

additional_pages = min(pages, MAX_FLUSH_PAGES) - 1;
pages -= additional_pages + 1;

And you might want to move the decrement of 'pages' down to the
bottom of the loop where you update the other loop variables.

> +
> + gpa_list[gpa_n].page.additional_pages = additional_pages;
> + gpa_list[gpa_n].page.largepage = false;
> + gpa_list[gpa_n].page.basepfn = cur;
> +
> + cur += additional_pages + 1;
> + gpa_n++;
> + } while (pages > 0);
> +
> + return gpa_n;
> +}
> +
> +int hyperv_flush_guest_mapping_range(u64 as, struct hyperv_tlb_range *range)
> +{
> + struct hv_guest_mapping_flush_list **flush_pcpu;
> + struct hv_guest_mapping_flush_list *flush;
> + u64 status = 0;
> + unsigned long flags;
> + int ret = -ENOTSUPP;
> + int gpa_n = 0;
> +
> + if (!hv_hypercall_pg)
> + goto fault;
> +
> + local_irq_save(flags);
> +
> + flush_pcpu = (struct hv_guest_mapping_flush_list **)
> + this_cpu_ptr(hyperv_pcpu_input_arg);
> +
> + flush = *flush_pcpu;
> + if (unlikely(!flush)) {
> + local_irq_restore(flags);
> + goto fault;
> + }
> +
> + flush->address_space = as;
> + flush->flags = 0;
> +
> + if (!range->flush_list)
> + gpa_n = fill_flush_list(flush->gpa_list, gpa_n,
> + range->start_gfn, range->pages);
> + else if (range->parse_flush_list_func)
> + gpa_n = range->parse_flush_list_func(flush->gpa_list, gpa_n,
> + range->flush_list, fill_flush_list);
> + else
> + gpa_n = -1;
> +
> + if (gpa_n < 0) {
> + local_irq_restore(flags);
> + goto fault;
> + }
> +
> + status = hv_do_rep_hypercall(HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_LIST,
> +  gpa_n, 0, flush, NULL);
> +
> + local_irq_restore(flags);
> +
> + if (!(status & HV_HYPERCALL_RESULT_MASK))
> + ret = 0;
> + else
> + ret = status;
> +fault:
> + return ret;
> +}
> +EXPORT_SYMBOL_GPL(hyperv_flush_guest_mapping_r

[PATCH V2 10/13] x86/hyper-v: Add HvFlushGuestAddressList hypercall support

2018-09-17 Thread Tianyu Lan
Hyper-V provides HvFlushGuestAddressList() hypercall to flush EPT tlb
with specified ranges. This patch is to add the hypercall support.

Signed-off-by: Lan Tianyu 
---
Change since v1:
   Add hyperv tlb flush struct to avoid use kvm tlb flush struct
in the hyperv file.
---
 arch/x86/hyperv/nested.c   | 103 +
 arch/x86/include/asm/hyperv-tlfs.h |  17 ++
 arch/x86/include/asm/mshyperv.h|  16 ++
 3 files changed, 136 insertions(+)

diff --git a/arch/x86/hyperv/nested.c b/arch/x86/hyperv/nested.c
index b8e60cc50461..40ddbfd54573 100644
--- a/arch/x86/hyperv/nested.c
+++ b/arch/x86/hyperv/nested.c
@@ -7,15 +7,32 @@
  *
  * Author : Lan Tianyu 
  */
+#define pr_fmt(fmt)  "Hyper-V: " fmt
 
 
 #include 
 #include 
 #include 
 #include 
+#include 
 
 #include 
 
+/*
+ *  MAX_FLUSH_PAGES = "additional_pages" + 1. It's limited
+ *  by the bitwidth of "additional_pages" in union hv_gpa_page_range.
+ */
+#define MAX_FLUSH_PAGES (2048)
+
+/*
+ * All input flush parameters are in single page. The max flush count
+ * is equal with how many entries of union hv_gpa_page_range can be
+ * populated in the input parameter page. MAX_FLUSH_REP_COUNT
+ * = (4096 - 16) / 8. (“Page Size” - "Address Space" - "Flags") /
+ * "GPA Range".
+ */
+#define MAX_FLUSH_REP_COUNT (510)
+
 int hyperv_flush_guest_mapping(u64 as)
 {
struct hv_guest_mapping_flush **flush_pcpu;
@@ -54,3 +71,89 @@ int hyperv_flush_guest_mapping(u64 as)
return ret;
 }
 EXPORT_SYMBOL_GPL(hyperv_flush_guest_mapping);
+
+static int fill_flush_list(union hv_gpa_page_range gpa_list[],
+   int offset, u64 start_gfn, u64 pages)
+{
+   int gpa_n = offset;
+   u64 cur = start_gfn;
+   u64 additional_pages;
+
+   do {
+   if (gpa_n >= MAX_FLUSH_REP_COUNT) {
+   pr_warn("Request exceeds HvFlushGuestList max flush 
count.");
+   return -ENOSPC;
+   }
+
+   if (pages > MAX_FLUSH_PAGES) {
+   additional_pages = MAX_FLUSH_PAGES - 1;
+   pages -= MAX_FLUSH_PAGES;
+   } else {
+   additional_pages = pages - 1;
+   pages = 0;
+   }
+
+   gpa_list[gpa_n].page.additional_pages = additional_pages;
+   gpa_list[gpa_n].page.largepage = false;
+   gpa_list[gpa_n].page.basepfn = cur;
+
+   cur += additional_pages + 1;
+   gpa_n++;
+   } while (pages > 0);
+
+   return gpa_n;
+}
+
+int hyperv_flush_guest_mapping_range(u64 as, struct hyperv_tlb_range *range)
+{
+   struct hv_guest_mapping_flush_list **flush_pcpu;
+   struct hv_guest_mapping_flush_list *flush;
+   u64 status = 0;
+   unsigned long flags;
+   int ret = -ENOTSUPP;
+   int gpa_n = 0;
+
+   if (!hv_hypercall_pg)
+   goto fault;
+
+   local_irq_save(flags);
+
+   flush_pcpu = (struct hv_guest_mapping_flush_list **)
+   this_cpu_ptr(hyperv_pcpu_input_arg);
+
+   flush = *flush_pcpu;
+   if (unlikely(!flush)) {
+   local_irq_restore(flags);
+   goto fault;
+   }
+
+   flush->address_space = as;
+   flush->flags = 0;
+
+   if (!range->flush_list)
+   gpa_n = fill_flush_list(flush->gpa_list, gpa_n,
+   range->start_gfn, range->pages);
+   else if (range->parse_flush_list_func)
+   gpa_n = range->parse_flush_list_func(flush->gpa_list, gpa_n,
+   range->flush_list, fill_flush_list);
+   else
+   gpa_n = -1;
+
+   if (gpa_n < 0) {
+   local_irq_restore(flags);
+   goto fault;
+   }
+
+   status = hv_do_rep_hypercall(HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_LIST,
+gpa_n, 0, flush, NULL);
+
+   local_irq_restore(flags);
+
+   if (!(status & HV_HYPERCALL_RESULT_MASK))
+   ret = 0;
+   else
+   ret = status;
+fault:
+   return ret;
+}
+EXPORT_SYMBOL_GPL(hyperv_flush_guest_mapping_range);
diff --git a/arch/x86/include/asm/hyperv-tlfs.h 
b/arch/x86/include/asm/hyperv-tlfs.h
index e977b6b3a538..512f22b4 100644
--- a/arch/x86/include/asm/hyperv-tlfs.h
+++ b/arch/x86/include/asm/hyperv-tlfs.h
@@ -353,6 +353,7 @@ struct hv_tsc_emulation_status {
 #define HVCALL_POST_MESSAGE0x005c
 #define HVCALL_SIGNAL_EVENT0x005d
 #define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE 0x00af
+#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_LIST 0x00b0
 
 #define HV_X64_MSR_VP_ASSIST_PAGE_ENABLE   0x0001
 #define HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT12
@@ -750,6 +751,22 @@ struct hv_guest_mapping_flush {
u64 flags;
 };
 
+/* HvFlushGuestPhysicalAddressList hypercall */
+union hv_gpa_page_range {
+   u64 address_space;
+   struct {
+