On Mon, Feb 04, 2019 at 10:15:52AM -0800, Alexander Duyck wrote:
> From: Alexander Duyck <alexander.h.du...@linux.intel.com>
> 
> Add guest support for providing free memory hints to the KVM hypervisor for
> freed pages huge TLB size or larger. I am restricting the size to
> huge TLB order and larger because the hypercalls are too expensive to be
> performing one per 4K page.

Even 2M pages start to get expensive with a TB guest.

Really it seems we want a virtio ring so we can pass a batch of these.
E.g. 256 entries, 2M each - that's more like it.

> Using the huge TLB order became the obvious
> choice for the order to use as it allows us to avoid fragmentation of higher
> order memory on the host.
> 
> I have limited the functionality so that it doesn't work when page
> poisoning is enabled. I did this because a write to the page after doing an
> MADV_DONTNEED would effectively negate the hint, so it would be wasting
> cycles to do so.

Again that's leaking host implementation detail into guest interface.

We are giving guest page hints to host that makes sense,
weird interactions with other features due to host
implementation details should be handled by host.




> Signed-off-by: Alexander Duyck <alexander.h.du...@linux.intel.com>
> ---
>  arch/x86/include/asm/page.h |   13 +++++++++++++
>  arch/x86/kernel/kvm.c       |   23 +++++++++++++++++++++++
>  2 files changed, 36 insertions(+)
> 
> diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
> index 7555b48803a8..4487ad7a3385 100644
> --- a/arch/x86/include/asm/page.h
> +++ b/arch/x86/include/asm/page.h
> @@ -18,6 +18,19 @@
>  
>  struct page;
>  
> +#ifdef CONFIG_KVM_GUEST
> +#include <linux/jump_label.h>
> +extern struct static_key_false pv_free_page_hint_enabled;
> +
> +#define HAVE_ARCH_FREE_PAGE
> +void __arch_free_page(struct page *page, unsigned int order);
> +static inline void arch_free_page(struct page *page, unsigned int order)
> +{
> +     if (static_branch_unlikely(&pv_free_page_hint_enabled))
> +             __arch_free_page(page, order);
> +}
> +#endif
> +
>  #include <linux/range.h>
>  extern struct range pfn_mapped[];
>  extern int nr_pfn_mapped;
> diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
> index 5c93a65ee1e5..09c91641c36c 100644
> --- a/arch/x86/kernel/kvm.c
> +++ b/arch/x86/kernel/kvm.c
> @@ -48,6 +48,7 @@
>  #include <asm/tlb.h>
>  
>  static int kvmapf = 1;
> +DEFINE_STATIC_KEY_FALSE(pv_free_page_hint_enabled);
>  
>  static int __init parse_no_kvmapf(char *arg)
>  {
> @@ -648,6 +649,15 @@ static void __init kvm_guest_init(void)
>       if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
>               apic_set_eoi_write(kvm_guest_apic_eoi_write);
>  
> +     /*
> +      * The free page hinting doesn't add much value if page poisoning
> +      * is enabled. So we only enable the feature if page poisoning is
> +      * no present.
> +      */
> +     if (!page_poisoning_enabled() &&
> +         kvm_para_has_feature(KVM_FEATURE_PV_UNUSED_PAGE_HINT))
> +             static_branch_enable(&pv_free_page_hint_enabled);
> +
>  #ifdef CONFIG_SMP
>       smp_ops.smp_prepare_cpus = kvm_smp_prepare_cpus;
>       smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
> @@ -762,6 +772,19 @@ static __init int kvm_setup_pv_tlb_flush(void)
>  }
>  arch_initcall(kvm_setup_pv_tlb_flush);
>  
> +void __arch_free_page(struct page *page, unsigned int order)
> +{
> +     /*
> +      * Limit hints to blocks no smaller than pageblock in
> +      * size to limit the cost for the hypercalls.
> +      */
> +     if (order < KVM_PV_UNUSED_PAGE_HINT_MIN_ORDER)
> +             return;
> +
> +     kvm_hypercall2(KVM_HC_UNUSED_PAGE_HINT, page_to_phys(page),
> +                    PAGE_SIZE << order);
> +}
> +
>  #ifdef CONFIG_PARAVIRT_SPINLOCKS
>  
>  /* Kick a cpu by its apicid. Used to wake up a halted vcpu */

Reply via email to