Without this fix, GIT gets confused. It generates incorrect function context for code changes. Weird, but true.
Cc: Paul Mackerras <pau...@ozlabs.org> Cc: Benjamin Herrenschmidt <b...@kernel.crashing.org> Cc: Michael Ellerman <m...@ellerman.id.au> Cc: Bharata B Rao <bhar...@linux.ibm.com> Cc: Aneesh Kumar K.V <aneesh.ku...@linux.ibm.com> Cc: Sukadev Bhattiprolu <suka...@linux.vnet.ibm.com> Cc: Laurent Dufour <lduf...@linux.ibm.com> Cc: Thiago Jung Bauermann <bauer...@linux.ibm.com> Cc: David Gibson <da...@gibson.dropbear.id.au> Cc: Claudio Carvalho <cclau...@linux.ibm.com> Cc: kvm-...@vger.kernel.org Cc: linuxppc-dev@lists.ozlabs.org Signed-off-by: Ram Pai <linux...@us.ibm.com> --- arch/powerpc/kvm/book3s_hv_uvmem.c | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c index 79b1202..ea4a1f1 100644 --- a/arch/powerpc/kvm/book3s_hv_uvmem.c +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c @@ -368,8 +368,7 @@ static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm) * Alloc a PFN from private device memory pool and copy page from normal * memory to secure memory using UV_PAGE_IN uvcall. */ -static int -kvmppc_svm_page_in(struct vm_area_struct *vma, unsigned long start, +static int kvmppc_svm_page_in(struct vm_area_struct *vma, unsigned long start, unsigned long end, unsigned long gpa, struct kvm *kvm, unsigned long page_shift, bool *downgrade) { @@ -436,8 +435,8 @@ static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm) * In the former case, uses dev_pagemap_ops.migrate_to_ram handler * to unmap the device page from QEMU's page tables. */ -static unsigned long -kvmppc_share_page(struct kvm *kvm, unsigned long gpa, unsigned long page_shift) +static unsigned long kvmppc_share_page(struct kvm *kvm, unsigned long gpa, + unsigned long page_shift) { int ret = H_PARAMETER; @@ -486,9 +485,9 @@ static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm) * H_PAGE_IN_SHARED flag makes the page shared which means that the same * memory in is visible from both UV and HV. */ -unsigned long -kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa, - unsigned long flags, unsigned long page_shift) +unsigned long kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa, + unsigned long flags, + unsigned long page_shift) { bool downgrade = false; unsigned long start, end; @@ -545,10 +544,10 @@ static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm) * Provision a new page on HV side and copy over the contents * from secure memory using UV_PAGE_OUT uvcall. */ -static int -kvmppc_svm_page_out(struct vm_area_struct *vma, unsigned long start, - unsigned long end, unsigned long page_shift, - struct kvm *kvm, unsigned long gpa) +static int kvmppc_svm_page_out(struct vm_area_struct *vma, + unsigned long start, + unsigned long end, unsigned long page_shift, + struct kvm *kvm, unsigned long gpa) { unsigned long src_pfn, dst_pfn = 0; struct migrate_vma mig; -- 1.8.3.1