Hi Alistair,

Thank you for the patch! Yet something to improve:

[auto build test ERROR on kselftest/next]
[also build test ERROR on linus/master v5.11 next-20210218]
[cannot apply to hnaz-linux-mm/master]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]

url:    
https://github.com/0day-ci/linux/commits/Alistair-Popple/Add-support-for-SVM-atomics-in-Nouveau/20210219-100858
base:   
https://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest.git next
config: x86_64-randconfig-s021-20210217 (attached as .config)
compiler: gcc-9 (Debian 9.3.0-15) 9.3.0
reproduce:
        # apt-get install sparse
        # sparse version: v0.6.3-215-g0fb77bb6-dirty
        # 
https://github.com/0day-ci/linux/commit/bb5444811772d30b2e3bbaa44baeb8a4b3f03cec
        git remote add linux-review https://github.com/0day-ci/linux
        git fetch --no-tags linux-review 
Alistair-Popple/Add-support-for-SVM-atomics-in-Nouveau/20210219-100858
        git checkout bb5444811772d30b2e3bbaa44baeb8a4b3f03cec
        # save the attached .config to linux build tree
        make W=1 C=1 CF='-fdiagnostic-prefix -D__CHECK_ENDIAN__' ARCH=x86_64 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <l...@intel.com>

All errors (new ones prefixed by >>):

   ld: warning: orphan section `.data..decrypted' from 
`arch/x86/kernel/cpu/vmware.o' being placed in section `.data..decrypted'
   ld: warning: orphan section `.data..decrypted' from `arch/x86/kernel/kvm.o' 
being placed in section `.data..decrypted'
   ld: mm/memory.o: in function `do_swap_page':
>> mm/memory.c:3300: undefined reference to `hmm_remove_exclusive_entry'


vim +3300 mm/memory.c

  3270  
  3271  /*
  3272   * We enter with non-exclusive mmap_lock (to exclude vma changes,
  3273   * but allow concurrent faults), and pte mapped but not yet locked.
  3274   * We return with pte unmapped and unlocked.
  3275   *
  3276   * We return with the mmap_lock locked or unlocked in the same cases
  3277   * as does filemap_fault().
  3278   */
  3279  vm_fault_t do_swap_page(struct vm_fault *vmf)
  3280  {
  3281          struct vm_area_struct *vma = vmf->vma;
  3282          struct page *page = NULL, *swapcache;
  3283          swp_entry_t entry;
  3284          pte_t pte;
  3285          int locked;
  3286          int exclusive = 0;
  3287          vm_fault_t ret = 0;
  3288          void *shadow = NULL;
  3289  
  3290          if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, 
vmf->orig_pte))
  3291                  goto out;
  3292  
  3293          entry = pte_to_swp_entry(vmf->orig_pte);
  3294          if (unlikely(non_swap_entry(entry))) {
  3295                  if (is_migration_entry(entry)) {
  3296                          migration_entry_wait(vma->vm_mm, vmf->pmd,
  3297                                               vmf->address);
  3298                  } else if (is_device_exclusive_entry(entry)) {
  3299                          vmf->page = 
device_exclusive_entry_to_page(entry);
> 3300                          ret = hmm_remove_exclusive_entry(vmf);
  3301                  } else if (is_device_private_entry(entry)) {
  3302                          vmf->page = device_private_entry_to_page(entry);
  3303                          ret = 
vmf->page->pgmap->ops->migrate_to_ram(vmf);
  3304                  } else if (is_hwpoison_entry(entry)) {
  3305                          ret = VM_FAULT_HWPOISON;
  3306                  } else {
  3307                          print_bad_pte(vma, vmf->address, vmf->orig_pte, 
NULL);
  3308                          ret = VM_FAULT_SIGBUS;
  3309                  }
  3310                  goto out;
  3311          }
  3312  
  3313  
  3314          delayacct_set_flag(DELAYACCT_PF_SWAPIN);
  3315          page = lookup_swap_cache(entry, vma, vmf->address);
  3316          swapcache = page;
  3317  
  3318          if (!page) {
  3319                  struct swap_info_struct *si = swp_swap_info(entry);
  3320  
  3321                  if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
  3322                      __swap_count(entry) == 1) {
  3323                          /* skip swapcache */
  3324                          page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
  3325                                                          vmf->address);
  3326                          if (page) {
  3327                                  int err;
  3328  
  3329                                  __SetPageLocked(page);
  3330                                  __SetPageSwapBacked(page);
  3331                                  set_page_private(page, entry.val);
  3332  
  3333                                  /* Tell memcg to use swap ownership 
records */
  3334                                  SetPageSwapCache(page);
  3335                                  err = mem_cgroup_charge(page, 
vma->vm_mm,
  3336                                                          GFP_KERNEL);
  3337                                  ClearPageSwapCache(page);
  3338                                  if (err) {
  3339                                          ret = VM_FAULT_OOM;
  3340                                          goto out_page;
  3341                                  }
  3342  
  3343                                  shadow = 
get_shadow_from_swap_cache(entry);
  3344                                  if (shadow)
  3345                                          workingset_refault(page, 
shadow);
  3346  
  3347                                  lru_cache_add(page);
  3348                                  swap_readpage(page, true);
  3349                          }
  3350                  } else {
  3351                          page = swapin_readahead(entry, 
GFP_HIGHUSER_MOVABLE,
  3352                                                  vmf);
  3353                          swapcache = page;
  3354                  }
  3355  
  3356                  if (!page) {
  3357                          /*
  3358                           * Back out if somebody else faulted in this pte
  3359                           * while we released the pte lock.
  3360                           */
  3361                          vmf->pte = pte_offset_map_lock(vma->vm_mm, 
vmf->pmd,
  3362                                          vmf->address, &vmf->ptl);
  3363                          if (likely(pte_same(*vmf->pte, vmf->orig_pte)))
  3364                                  ret = VM_FAULT_OOM;
  3365                          delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
  3366                          goto unlock;
  3367                  }
  3368  
  3369                  /* Had to read the page from swap area: Major fault */
  3370                  ret = VM_FAULT_MAJOR;
  3371                  count_vm_event(PGMAJFAULT);
  3372                  count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
  3373          } else if (PageHWPoison(page)) {
  3374                  /*
  3375                   * hwpoisoned dirty swapcache pages are kept for killing
  3376                   * owner processes (which may be unknown at hwpoison 
time)
  3377                   */
  3378                  ret = VM_FAULT_HWPOISON;
  3379                  delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
  3380                  goto out_release;
  3381          }
  3382  
  3383          locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags);
  3384  
  3385          delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
  3386          if (!locked) {
  3387                  ret |= VM_FAULT_RETRY;
  3388                  goto out_release;
  3389          }
  3390  
  3391          /*
  3392           * Make sure try_to_free_swap or reuse_swap_page or swapoff did 
not
  3393           * release the swapcache from under us.  The page pin, and 
pte_same
  3394           * test below, are not enough to exclude that.  Even if it is 
still
  3395           * swapcache, we need to check that the page's swap has not 
changed.
  3396           */
  3397          if (unlikely((!PageSwapCache(page) ||
  3398                          page_private(page) != entry.val)) && swapcache)
  3399                  goto out_page;
  3400  
  3401          page = ksm_might_need_to_copy(page, vma, vmf->address);
  3402          if (unlikely(!page)) {
  3403                  ret = VM_FAULT_OOM;
  3404                  page = swapcache;
  3405                  goto out_page;
  3406          }
  3407  
  3408          cgroup_throttle_swaprate(page, GFP_KERNEL);
  3409  
  3410          /*
  3411           * Back out if somebody else already faulted in this pte.
  3412           */
  3413          vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, 
vmf->address,
  3414                          &vmf->ptl);
  3415          if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte)))
  3416                  goto out_nomap;
  3417  
  3418          if (unlikely(!PageUptodate(page))) {
  3419                  ret = VM_FAULT_SIGBUS;
  3420                  goto out_nomap;
  3421          }
  3422  
  3423          /*
  3424           * The page isn't present yet, go ahead with the fault.
  3425           *
  3426           * Be careful about the sequence of operations here.
  3427           * To get its accounting right, reuse_swap_page() must be called
  3428           * while the page is counted on swap but not yet in mapcount 
i.e.
  3429           * before page_add_anon_rmap() and swap_free(); 
try_to_free_swap()
  3430           * must be called after the swap_free(), or it will never 
succeed.
  3431           */
  3432  
  3433          inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
  3434          dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS);
  3435          pte = mk_pte(page, vma->vm_page_prot);
  3436          if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, 
NULL)) {
  3437                  pte = maybe_mkwrite(pte_mkdirty(pte), vma);
  3438                  vmf->flags &= ~FAULT_FLAG_WRITE;
  3439                  ret |= VM_FAULT_WRITE;
  3440                  exclusive = RMAP_EXCLUSIVE;
  3441          }
  3442          flush_icache_page(vma, page);
  3443          if (pte_swp_soft_dirty(vmf->orig_pte))
  3444                  pte = pte_mksoft_dirty(pte);
  3445          if (pte_swp_uffd_wp(vmf->orig_pte)) {
  3446                  pte = pte_mkuffd_wp(pte);
  3447                  pte = pte_wrprotect(pte);
  3448          }
  3449          set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
  3450          arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, 
vmf->orig_pte);
  3451          vmf->orig_pte = pte;
  3452  
  3453          /* ksm created a completely new copy */
  3454          if (unlikely(page != swapcache && swapcache)) {
  3455                  page_add_new_anon_rmap(page, vma, vmf->address, false);
  3456                  lru_cache_add_inactive_or_unevictable(page, vma);
  3457          } else {
  3458                  do_page_add_anon_rmap(page, vma, vmf->address, 
exclusive);
  3459          }
  3460  
  3461          swap_free(entry);
  3462          if (mem_cgroup_swap_full(page) ||
  3463              (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
  3464                  try_to_free_swap(page);
  3465          unlock_page(page);
  3466          if (page != swapcache && swapcache) {
  3467                  /*
  3468                   * Hold the lock to avoid the swap entry to be reused
  3469                   * until we take the PT lock for the pte_same() check
  3470                   * (to avoid false positives from pte_same). For
  3471                   * further safety release the lock after the swap_free
  3472                   * so that the swap count won't change under a
  3473                   * parallel locked swapcache.
  3474                   */
  3475                  unlock_page(swapcache);
  3476                  put_page(swapcache);
  3477          }
  3478  
  3479          if (vmf->flags & FAULT_FLAG_WRITE) {
  3480                  ret |= do_wp_page(vmf);
  3481                  if (ret & VM_FAULT_ERROR)
  3482                          ret &= VM_FAULT_ERROR;
  3483                  goto out;
  3484          }
  3485  
  3486          /* No need to invalidate - it was non-present before */
  3487          update_mmu_cache(vma, vmf->address, vmf->pte);
  3488  unlock:
  3489          pte_unmap_unlock(vmf->pte, vmf->ptl);
  3490  out:
  3491          return ret;
  3492  out_nomap:
  3493          pte_unmap_unlock(vmf->pte, vmf->ptl);
  3494  out_page:
  3495          unlock_page(page);
  3496  out_release:
  3497          put_page(page);
  3498          if (page != swapcache && swapcache) {
  3499                  unlock_page(swapcache);
  3500                  put_page(swapcache);
  3501          }
  3502          return ret;
  3503  }
  3504  

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-...@lists.01.org

Attachment: .config.gz
Description: application/gzip

_______________________________________________
Nouveau mailing list
Nouveau@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/nouveau

Reply via email to