Re: [PATCH 4/4] KVM: PPC: Add hugepage support for IOMMU in-kernel handling

2013-06-15 Thread Benjamin Herrenschmidt
On Wed, 2013-06-05 at 16:11 +1000, Alexey Kardashevskiy wrote:

> @@ -185,7 +186,31 @@ static unsigned long kvmppc_realmode_gpa_to_hpa(struct 
> kvm_vcpu *vcpu,
>   unsigned long hva, hpa, pg_size = 0, offset;
>   unsigned long gfn = gpa >> PAGE_SHIFT;
>   bool writing = gpa & TCE_PCI_WRITE;
> + struct kvmppc_iommu_hugepage *hp;
>  
> + /*
> +  * Try to find an already used hugepage.
> +  * If it is not there, the kvmppc_lookup_pte() will return zero
> +  * as it won't do get_page() on a huge page in real mode
> +  * and therefore the request will be passed to the virtual mode.
> +  */
> + if (tt) {
> + spin_lock(&tt->hugepages_lock);
> + list_for_each_entry(hp, &tt->hugepages, list) {
> + if ((gpa < hp->gpa) || (gpa >= hp->gpa + hp->size))
> + continue;
> +
> + /* Calculate host phys address keeping flags and offset 
> in the page */
> + offset = gpa & (hp->size - 1);
> +
> + /* pte_pfn(pte) should return an address aligned to 
> pg_size */
> + hpa = (pte_pfn(hp->pte) << PAGE_SHIFT) + offset;
> + spin_unlock(&tt->hugepages_lock);
> +
> + return hpa;
> + }
> + spin_unlock(&tt->hugepages_lock);
> + }

Wow  this is run in real mode right ?

spin_lock() and spin_unlock() are a big no-no in real mode. If lockdep
and/or spinlock debugging are enabled and something goes pear-shaped
they are going to bring your whole system down in a blink in quite
horrible ways.

If you are going to do that, you need some kind of custom low-level
lock.

Also, I see that you are basically using a non-ordered list and doing a
linear search in it every time. That's going to COST !

You should really consider a more efficient data structure. You should
also be able to do something that doesn't require locks for readers.

>   /* Find a KVM memslot */
>   memslot = search_memslots(kvm_memslots(vcpu->kvm), gfn);
>   if (!memslot)
> @@ -237,6 +262,10 @@ static long kvmppc_clear_tce_real_mode(struct kvm_vcpu 
> *vcpu,
>   if (oldtce & TCE_PCI_WRITE)
>   SetPageDirty(page);
>  
> + /* Do not put a huge page and continue without error */
> + if (PageCompound(page))
> + continue;
> +
>   if (realmode_put_page(page)) {
>   ret = H_TOO_HARD;
>   break;
> @@ -282,7 +311,7 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned 
> long liobn,
>   if (iommu_tce_put_param_check(tbl, ioba, tce))
>   return H_PARAMETER;
>  
> - hpa = kvmppc_realmode_gpa_to_hpa(vcpu, tce, true);
> + hpa = kvmppc_realmode_gpa_to_hpa(vcpu, tt, tce, true);
>   if (hpa == ERROR_ADDR) {
>   vcpu->arch.tce_reason = H_TOO_HARD;
>   return H_TOO_HARD;
> @@ -295,6 +324,11 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned 
> long liobn,
>   if (unlikely(ret)) {
>   struct page *pg = realmode_pfn_to_page(hpa);
>   BUG_ON(!pg);
> +
> + /* Do not put a huge page and return an error */
> + if (!PageCompound(pg))
> + return H_HARDWARE;
> +
>   if (realmode_put_page(pg)) {
>   vcpu->arch.tce_reason = H_HARDWARE;
>   return H_TOO_HARD;
> @@ -351,7 +385,7 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
>   vcpu->arch.tce_tmp_num = 0;
>   vcpu->arch.tce_reason = 0;
>  
> - tces = (unsigned long *) kvmppc_realmode_gpa_to_hpa(vcpu,
> + tces = (unsigned long *) kvmppc_realmode_gpa_to_hpa(vcpu, NULL,
>   tce_list, false);
>   if ((unsigned long)tces == ERROR_ADDR)
>   return H_TOO_HARD;
> @@ -374,7 +408,7 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
>  
>   /* Translate TCEs and go get_page */
>   for (i = 0; i < npages; ++i) {
> - unsigned long hpa = kvmppc_realmode_gpa_to_hpa(vcpu,
> + unsigned long hpa = kvmppc_realmode_gpa_to_hpa(vcpu, tt,
>   vcpu->arch.tce_tmp[i], true);
>   if (hpa == ERROR_ADDR) {
>   vcpu->arch.tce_tmp_num = i;

Cheers,
Ben.


--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 3/4] KVM: PPC: Add support for IOMMU in-kernel handling

2013-06-15 Thread Benjamin Herrenschmidt
>  static pte_t kvmppc_lookup_pte(pgd_t *pgdir, unsigned long hva, bool writing,
> - unsigned long *pte_sizep)
> + unsigned long *pte_sizep, bool do_get_page)
>  {
>   pte_t *ptep;
>   unsigned int shift = 0;
> @@ -135,6 +136,14 @@ static pte_t kvmppc_lookup_pte(pgd_t *pgdir, unsigned 
> long hva, bool writing,
>   if (!pte_present(*ptep))
>   return __pte(0);
>  
> + /*
> +  * Put huge pages handling to the virtual mode.
> +  * The only exception is for TCE list pages which we
> +  * do need to call get_page() for.
> +  */
> + if ((*pte_sizep > PAGE_SIZE) && do_get_page)
> + return __pte(0);
> +
>   /* wait until _PAGE_BUSY is clear then set it atomically */
>   __asm__ __volatile__ (
>   "1: ldarx   %0,0,%3\n"
> @@ -148,6 +157,18 @@ static pte_t kvmppc_lookup_pte(pgd_t *pgdir, unsigned 
> long hva, bool writing,
>   : "cc");
>  
>   ret = pte;
> + if (do_get_page && pte_present(pte) && (!writing || pte_write(pte))) {
> + struct page *pg = NULL;
> + pg = realmode_pfn_to_page(pte_pfn(pte));
> + if (realmode_get_page(pg)) {
> + ret = __pte(0);
> + } else {
> + pte = pte_mkyoung(pte);
> + if (writing)
> + pte = pte_mkdirty(pte);
> + }
> + }
> + *ptep = pte;/* clears _PAGE_BUSY */
>  
>   return ret;
>  }

So now you are adding the clearing of _PAGE_BUSY that was missing for
your first patch, except that this is not enough since that means that
in the "emulated" case (ie, !do_get_page) you will in essence return
and then use a PTE that is not locked without any synchronization to
ensure that the underlying page doesn't go away... then you'll
dereference that page.

So either make everything use speculative get_page, or make the emulated
case use the MMU notifier to drop the operation in case of collision.

The former looks easier.

Also, any specific reason why you do:

  - Lock the PTE
  - get_page()
  - Unlock the PTE

Instead of

  - Read the PTE
  - get_page_unless_zero
  - re-check PTE

Like get_user_pages_fast() does ?

The former will be two atomic ops, the latter only one (faster), but
maybe you have a good reason why that can't work...

Cheers,
Ben.


--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 2/4] powerpc: Prepare to support kernel handling of IOMMU map/unmap

2013-06-15 Thread Benjamin Herrenschmidt
On Sun, 2013-06-16 at 14:26 +1000, Benjamin Herrenschmidt wrote:
> > +int realmode_get_page(struct page *page)
> > +{
> > + if (PageCompound(page))
> > + return -EAGAIN;
> > +
> > + get_page(page);
> > +
> > + return 0;
> > +}

Shouldn't it be get_page_unless_zero ?

Cheers,
Ben.

--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 2/4] powerpc: Prepare to support kernel handling of IOMMU map/unmap

2013-06-15 Thread Benjamin Herrenschmidt
> +#if defined(CONFIG_SPARSEMEM_VMEMMAP) || defined(CONFIG_FLATMEM)
> +int realmode_get_page(struct page *page)
> +{
> + if (PageCompound(page))
> + return -EAGAIN;
> +
> + get_page(page);
> +
> + return 0;
> +}
> +EXPORT_SYMBOL_GPL(realmode_get_page);
> +
> +int realmode_put_page(struct page *page)
> +{
> + if (PageCompound(page))
> + return -EAGAIN;
> +
> + if (!atomic_add_unless(&page->_count, -1, 1))
> + return -EAGAIN;
> +
> + return 0;
> +}
> +EXPORT_SYMBOL_GPL(realmode_put_page);
> +#endif

Several worries here, mostly that if the generic code ever changes
(something gets added to get_page() that makes it no-longer safe for use
in real mode for example, or some other condition gets added to
put_page()), we go out of sync and potentially end up with very hard and
very subtle bugs.

It might be worth making sure that:

 - This is reviewed by some generic VM people (and make sure they
understand why we need to do that)

 - A comment is added to get_page() and put_page() to make sure that if
they are changed in any way, dbl check the impact on our
realmode_get_page() (or "ping" us to make sure things are still ok).

Cheers,
Ben.


--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 1/4] KVM: PPC: Add support for multiple-TCE hcalls

2013-06-15 Thread Benjamin Herrenschmidt
On Wed, 2013-06-05 at 16:11 +1000, Alexey Kardashevskiy wrote:
> This adds real mode handlers for the H_PUT_TCE_INDIRECT and
> H_STUFF_TCE hypercalls for QEMU emulated devices such as IBMVIO
> devices or emulated PCI.  These calls allow adding multiple entries
> (up to 512) into the TCE table in one call which saves time on
> transition to/from real mode.
> 
> This adds a tce_tmp cache to kvm_vcpu_arch to save valid TCEs
> (copied from user and verified) before writing the whole list into
> the TCE table. This cache will be utilized more in the upcoming
> VFIO/IOMMU support to continue TCE list processing in the virtual
> mode in the case if the real mode handler failed for some reason.
> 
> This adds a guest physical to host real address converter
> and calls the existing H_PUT_TCE handler. The converting function
> is going to be fully utilized by upcoming VFIO supporting patches.
> 
> This also implements the KVM_CAP_PPC_MULTITCE capability,
> so in order to support the functionality of this patch, QEMU
> needs to query for this capability and set the "hcall-multi-tce"
> hypertas property only if the capability is present, otherwise
> there will be serious performance degradation.
> 
> Cc: David Gibson 
> Signed-off-by: Alexey Kardashevskiy 
> Signed-off-by: Paul Mackerras 
> 
> ---
> Changelog:
> 2013/06/05:
> * fixed mistype about IBMVIO in the commit message
> * updated doc and moved it to another section
> * changed capability number
> 
> 2013/05/21:
> * added kvm_vcpu_arch::tce_tmp
> * removed cleanup if put_indirect failed, instead we do not even start
> writing to TCE table if we cannot get TCEs from the user and they are
> invalid
> * kvmppc_emulated_h_put_tce is split to kvmppc_emulated_put_tce
> and kvmppc_emulated_validate_tce (for the previous item)
> * fixed bug with failthrough for H_IPI
> * removed all get_user() from real mode handlers
> * kvmppc_lookup_pte() added (instead of making lookup_linux_pte public)
> ---
>  Documentation/virtual/kvm/api.txt   |   17 ++
>  arch/powerpc/include/asm/kvm_host.h |2 +
>  arch/powerpc/include/asm/kvm_ppc.h  |   16 +-
>  arch/powerpc/kvm/book3s_64_vio.c|  118 ++
>  arch/powerpc/kvm/book3s_64_vio_hv.c |  266 
> +++
>  arch/powerpc/kvm/book3s_hv.c|   39 +
>  arch/powerpc/kvm/book3s_hv_rmhandlers.S |6 +
>  arch/powerpc/kvm/book3s_pr_papr.c   |   37 -
>  arch/powerpc/kvm/powerpc.c  |3 +
>  include/uapi/linux/kvm.h|1 +
>  10 files changed, 473 insertions(+), 32 deletions(-)
> 
> diff --git a/Documentation/virtual/kvm/api.txt 
> b/Documentation/virtual/kvm/api.txt
> index 5f91eda..6c082ff 100644
> --- a/Documentation/virtual/kvm/api.txt
> +++ b/Documentation/virtual/kvm/api.txt
> @@ -2362,6 +2362,23 @@ calls by the guest for that service will be passed to 
> userspace to be
>  handled.
>  
> 
> +4.83 KVM_CAP_PPC_MULTITCE
> +
> +Capability: KVM_CAP_PPC_MULTITCE
> +Architectures: ppc
> +Type: vm
> +
> +This capability tells the guest that multiple TCE entry add/remove hypercalls
> +handling is supported by the kernel. This significanly accelerates DMA
> +operations for PPC KVM guests.
> +
> +Unlike other capabilities in this section, this one does not have an ioctl.
> +Instead, when the capability is present, the H_PUT_TCE_INDIRECT and
> +H_STUFF_TCE hypercalls are to be handled in the host kernel and not passed to
> +the guest. Othwerwise it might be better for the guest to continue using 
> H_PUT_TCE
> +hypercall (if KVM_CAP_SPAPR_TCE or KVM_CAP_SPAPR_TCE_IOMMU are present).
> +
> +
>  5. The kvm_run structure
>  
>  
> diff --git a/arch/powerpc/include/asm/kvm_host.h 
> b/arch/powerpc/include/asm/kvm_host.h
> index af326cd..85d8f26 100644
> --- a/arch/powerpc/include/asm/kvm_host.h
> +++ b/arch/powerpc/include/asm/kvm_host.h
> @@ -609,6 +609,8 @@ struct kvm_vcpu_arch {
>   spinlock_t tbacct_lock;
>   u64 busy_stolen;
>   u64 busy_preempt;
> +
> + unsigned long *tce_tmp;/* TCE cache for TCE_PUT_INDIRECT hall */
>  #endif
>  };
>  
> diff --git a/arch/powerpc/include/asm/kvm_ppc.h 
> b/arch/powerpc/include/asm/kvm_ppc.h
> index a5287fe..e852921b 100644
> --- a/arch/powerpc/include/asm/kvm_ppc.h
> +++ b/arch/powerpc/include/asm/kvm_ppc.h
> @@ -133,8 +133,20 @@ extern int kvmppc_pseries_do_hcall(struct kvm_vcpu 
> *vcpu);
>  
>  extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
>   struct kvm_create_spapr_tce *args);
> -extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
> -  unsigned long ioba, unsigned long tce);
> +extern struct kvmppc_spapr_tce_table *kvmppc_find_tce_table(
> + struct kvm_vcpu *vcpu, unsigned long liobn);
> +extern long kvmppc_emulated_validate_tce(unsigned long tce);
> +extern void kvmppc_emulated_put_tce(struct kvmppc_spapr_tce_table *tt,
> + unsigned long ioba,