Hi,

Thank you for the patch! Yet something to improve:

[auto build test ERROR on linus/master]
[cannot apply to v5.3-rc3 next-20190807]
[if your patch is applied to the wrong git tree, please drop us a note to help 
improve the system]

url:    
https://github.com/0day-ci/linux/commits/john-hubbard-gmail-com/powerpc-convert-put_page-to-put_user_page/20190805-132131
config: powerpc-allmodconfig (attached as .config)
compiler: powerpc64-linux-gcc (GCC) 7.4.0
reproduce:
        wget 
https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O 
~/bin/make.cross
        chmod +x ~/bin/make.cross
        # save the attached .config to linux build tree
        GCC_VERSION=7.4.0 make.cross ARCH=powerpc 

If you fix the issue, kindly add following tag
Reported-by: kbuild test robot <l...@intel.com>

All errors (new ones prefixed by >>):

   arch/powerpc/kvm/book3s_64_mmu_radix.c: In function 
'kvmppc_book3s_instantiate_page':
>> arch/powerpc/kvm/book3s_64_mmu_radix.c:879:4: error: too many arguments to 
>> function 'put_user_pages_dirty_lock'
       put_user_pages_dirty_lock(&page, 1, dirty);
       ^~~~~~~~~~~~~~~~~~~~~~~~~
   In file included from arch/powerpc/include/asm/io.h:29:0,
                    from include/linux/io.h:13,
                    from include/linux/irq.h:20,
                    from arch/powerpc/include/asm/hardirq.h:6,
                    from include/linux/hardirq.h:9,
                    from include/linux/kvm_host.h:7,
                    from arch/powerpc/kvm/book3s_64_mmu_radix.c:10:
   include/linux/mm.h:1061:6: note: declared here
    void put_user_pages_dirty_lock(struct page **pages, unsigned long npages);
         ^~~~~~~~~~~~~~~~~~~~~~~~~
--
   arch/powerpc/mm/book3s64/iommu_api.c: In function 'mm_iommu_unpin':
>> arch/powerpc/mm/book3s64/iommu_api.c:220:3: error: too many arguments to 
>> function 'put_user_pages_dirty_lock'
      put_user_pages_dirty_lock(&page, 1, dirty);
      ^~~~~~~~~~~~~~~~~~~~~~~~~
   In file included from include/linux/migrate.h:5:0,
                    from arch/powerpc/mm/book3s64/iommu_api.c:13:
   include/linux/mm.h:1061:6: note: declared here
    void put_user_pages_dirty_lock(struct page **pages, unsigned long npages);
         ^~~~~~~~~~~~~~~~~~~~~~~~~

vim +/put_user_pages_dirty_lock +879 arch/powerpc/kvm/book3s_64_mmu_radix.c

   765  
   766  int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
   767                                     unsigned long gpa,
   768                                     struct kvm_memory_slot *memslot,
   769                                     bool writing, bool kvm_ro,
   770                                     pte_t *inserted_pte, unsigned int 
*levelp)
   771  {
   772          struct kvm *kvm = vcpu->kvm;
   773          struct page *page = NULL;
   774          unsigned long mmu_seq;
   775          unsigned long hva, gfn = gpa >> PAGE_SHIFT;
   776          bool upgrade_write = false;
   777          bool *upgrade_p = &upgrade_write;
   778          pte_t pte, *ptep;
   779          unsigned int shift, level;
   780          int ret;
   781          bool large_enable;
   782  
   783          /* used to check for invalidations in progress */
   784          mmu_seq = kvm->mmu_notifier_seq;
   785          smp_rmb();
   786  
   787          /*
   788           * Do a fast check first, since __gfn_to_pfn_memslot doesn't
   789           * do it with !atomic && !async, which is how we call it.
   790           * We always ask for write permission since the common case
   791           * is that the page is writable.
   792           */
   793          hva = gfn_to_hva_memslot(memslot, gfn);
   794          if (!kvm_ro && __get_user_pages_fast(hva, 1, 1, &page) == 1) {
   795                  upgrade_write = true;
   796          } else {
   797                  unsigned long pfn;
   798  
   799                  /* Call KVM generic code to do the slow-path check */
   800                  pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
   801                                             writing, upgrade_p);
   802                  if (is_error_noslot_pfn(pfn))
   803                          return -EFAULT;
   804                  page = NULL;
   805                  if (pfn_valid(pfn)) {
   806                          page = pfn_to_page(pfn);
   807                          if (PageReserved(page))
   808                                  page = NULL;
   809                  }
   810          }
   811  
   812          /*
   813           * Read the PTE from the process' radix tree and use that
   814           * so we get the shift and attribute bits.
   815           */
   816          local_irq_disable();
   817          ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
   818          /*
   819           * If the PTE disappeared temporarily due to a THP
   820           * collapse, just return and let the guest try again.
   821           */
   822          if (!ptep) {
   823                  local_irq_enable();
   824                  if (page) {
   825                          if (upgrade_write)
   826                                  put_user_page(page);
   827                          else
   828                                  put_page(page);
   829                  }
   830                  return RESUME_GUEST;
   831          }
   832          pte = *ptep;
   833          local_irq_enable();
   834  
   835          /* If we're logging dirty pages, always map single pages */
   836          large_enable = !(memslot->flags & KVM_MEM_LOG_DIRTY_PAGES);
   837  
   838          /* Get pte level from shift/size */
   839          if (large_enable && shift == PUD_SHIFT &&
   840              (gpa & (PUD_SIZE - PAGE_SIZE)) ==
   841              (hva & (PUD_SIZE - PAGE_SIZE))) {
   842                  level = 2;
   843          } else if (large_enable && shift == PMD_SHIFT &&
   844                     (gpa & (PMD_SIZE - PAGE_SIZE)) ==
   845                     (hva & (PMD_SIZE - PAGE_SIZE))) {
   846                  level = 1;
   847          } else {
   848                  level = 0;
   849                  if (shift > PAGE_SHIFT) {
   850                          /*
   851                           * If the pte maps more than one page, bring 
over
   852                           * bits from the virtual address to get the real
   853                           * address of the specific single page we want.
   854                           */
   855                          unsigned long rpnmask = (1ul << shift) - 
PAGE_SIZE;
   856                          pte = __pte(pte_val(pte) | (hva & rpnmask));
   857                  }
   858          }
   859  
   860          pte = __pte(pte_val(pte) | _PAGE_EXEC | _PAGE_ACCESSED);
   861          if (writing || upgrade_write) {
   862                  if (pte_val(pte) & _PAGE_WRITE)
   863                          pte = __pte(pte_val(pte) | _PAGE_DIRTY);
   864          } else {
   865                  pte = __pte(pte_val(pte) & ~(_PAGE_WRITE | 
_PAGE_DIRTY));
   866          }
   867  
   868          /* Allocate space in the tree and write the PTE */
   869          ret = kvmppc_create_pte(kvm, kvm->arch.pgtable, pte, gpa, level,
   870                                  mmu_seq, kvm->arch.lpid, NULL, NULL);
   871          if (inserted_pte)
   872                  *inserted_pte = pte;
   873          if (levelp)
   874                  *levelp = level;
   875  
   876          if (page) {
   877                  bool dirty = !ret && (pte_val(pte) & _PAGE_WRITE);
   878                  if (upgrade_write)
 > 879                          put_user_pages_dirty_lock(&page, 1, dirty);
   880                  else {
   881                          if (dirty)
   882                                  set_page_dirty_lock(page);
   883                          put_page(page);
   884                  }
   885          }
   886  
   887          /* Increment number of large pages if we (successfully) 
inserted one */
   888          if (!ret) {
   889                  if (level == 1)
   890                          kvm->stat.num_2M_pages++;
   891                  else if (level == 2)
   892                          kvm->stat.num_1G_pages++;
   893          }
   894  
   895          return ret;
   896  }
   897  

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

Attachment: .config.gz
Description: application/gzip

Reply via email to