Hi all,
On Thu, 30 Jul 2020 19:16:10 +1000 Stephen Rothwell
wrote:
>
> Today's linux-next merge of the hmm tree got a conflict in:
>
> arch/powerpc/kvm/book3s_hv_uvmem.c
>
> between commit:
>
> f1b87ea8784b ("KVM: PPC: Book3S HV: Move kvmppc_svm_page_out up")
>
> from the kvm-ppc tree and commit:
>
> 5143192cd410 ("mm/migrate: add a flags parameter to migrate_vma")
>
> from the hmm tree.
>
> I fixed it up (see below) and can carry the fix as necessary. This
> is now fixed as far as linux-next is concerned, but any non trivial
> conflicts should be mentioned to your upstream maintainer when your tree
> is submitted for merging. You may also want to consider cooperating
> with the maintainer of the conflicting tree to minimise any particularly
> complex conflicts.
>
> diff --cc arch/powerpc/kvm/book3s_hv_uvmem.c
> index 0d49e3425a12,6850bd04bcb9..
> --- a/arch/powerpc/kvm/book3s_hv_uvmem.c
> +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c
> @@@ -496,94 -253,14 +496,95 @@@ unsigned long kvmppc_h_svm_init_start(s
> return ret;
> }
>
> -unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
> +/*
> + * Provision a new page on HV side and copy over the contents
> + * from secure memory using UV_PAGE_OUT uvcall.
> + * Caller must held kvm->arch.uvmem_lock.
> + */
> +static int __kvmppc_svm_page_out(struct vm_area_struct *vma,
> +unsigned long start,
> +unsigned long end, unsigned long page_shift,
> +struct kvm *kvm, unsigned long gpa)
> {
> -if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
> -return H_UNSUPPORTED;
> +unsigned long src_pfn, dst_pfn = 0;
> +struct migrate_vma mig;
> +struct page *dpage, *spage;
> +struct kvmppc_uvmem_page_pvt *pvt;
> +unsigned long pfn;
> +int ret = U_SUCCESS;
>
> -kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_DONE;
> -pr_info("LPID %d went secure\n", kvm->arch.lpid);
> -return H_SUCCESS;
> +memset(, 0, sizeof(mig));
> +mig.vma = vma;
> +mig.start = start;
> +mig.end = end;
> +mig.src = _pfn;
> +mig.dst = _pfn;
> - mig.src_owner = _uvmem_pgmap;
> ++mig.pgmap_owner = _uvmem_pgmap;
> ++mig.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
> +
> +/* The requested page is already paged-out, nothing to do */
> +if (!kvmppc_gfn_is_uvmem_pfn(gpa >> page_shift, kvm, NULL))
> +return ret;
> +
> +ret = migrate_vma_setup();
> +if (ret)
> +return -1;
> +
> +spage = migrate_pfn_to_page(*mig.src);
> +if (!spage || !(*mig.src & MIGRATE_PFN_MIGRATE))
> +goto out_finalize;
> +
> +if (!is_zone_device_page(spage))
> +goto out_finalize;
> +
> +dpage = alloc_page_vma(GFP_HIGHUSER, vma, start);
> +if (!dpage) {
> +ret = -1;
> +goto out_finalize;
> +}
> +
> +lock_page(dpage);
> +pvt = spage->zone_device_data;
> +pfn = page_to_pfn(dpage);
> +
> +/*
> + * This function is used in two cases:
> + * - When HV touches a secure page, for which we do UV_PAGE_OUT
> + * - When a secure page is converted to shared page, we *get*
> + * the page to essentially unmap the device page. In this
> + * case we skip page-out.
> + */
> +if (!pvt->skip_page_out)
> +ret = uv_page_out(kvm->arch.lpid, pfn << page_shift,
> + gpa, 0, page_shift);
> +
> +if (ret == U_SUCCESS)
> +*mig.dst = migrate_pfn(pfn) | MIGRATE_PFN_LOCKED;
> +else {
> +unlock_page(dpage);
> +__free_page(dpage);
> +goto out_finalize;
> +}
> +
> +migrate_vma_pages();
> +
> +out_finalize:
> +migrate_vma_finalize();
> +return ret;
> +}
> +
> +static inline int kvmppc_svm_page_out(struct vm_area_struct *vma,
> + unsigned long start, unsigned long end,
> + unsigned long page_shift,
> + struct kvm *kvm, unsigned long gpa)
> +{
> +int ret;
> +
> +mutex_lock(>arch.uvmem_lock);
> +ret = __kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa);
> +mutex_unlock(>arch.uvmem_lock);
> +
> +return ret;
> }
>
> /*
> @@@ -744,7 -400,20 +745,8 @@@ static int kvmppc_svm_page_in(struct vm
> mig.end = end;
> mig.src = _pfn;
> mig.dst = _pfn;
> + mig.flags = MIGRATE_VMA_SELECT_SYSTEM;
>
> -/*
> - * We come here with mmap_lock write lock held just for
> - * ksm_madvise(), otherwise we only need read mmap_lock.
> - * Hence downgrade to read lock once ksm_madvise() is done.
> - */
> -ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
> - MADV_UNMERGEABLE, >vm_flags);
> -mmap_write_downgrade(kvm->mm);
> -*downgrade = true;
> -if (ret)
> -