The branch stable/13 has been updated by wulf:

URL: 
https://cgit.FreeBSD.org/src/commit/?id=847ffc18ddf8183fee8d959c75c62f317270ce88

commit 847ffc18ddf8183fee8d959c75c62f317270ce88
Author:     Vladimir Kondratyev <w...@freebsd.org>
AuthorDate: 2021-09-29 20:13:41 +0000
Commit:     Vladimir Kondratyev <w...@freebsd.org>
CommitDate: 2021-10-13 09:01:32 +0000

    LinuxKPI: Factor out vmf_insert_pfn_prot() routine
    
    from GEM and TTM page fault handlers and move it in to base system. This
    code is tightly integrated with LKPI mmap support to belong to drm-kmod.
    
    As this routine requires associated vm_object to be locked, it got
    additional _locked suffix.
    
    Reviewed by:    hselasky, markj
    Differential revision:  https://reviews.freebsd.org/D32068
    
    (cherry picked from commit f6823dac8fa6e9fc2926a866e9a0c4d43e38e236)
---
 sys/compat/linuxkpi/common/include/linux/mm.h | 23 +++++++++++++++++++
 sys/compat/linuxkpi/common/src/linux_page.c   | 32 +++++++++++++++++++++++++++
 2 files changed, 55 insertions(+)

diff --git a/sys/compat/linuxkpi/common/include/linux/mm.h 
b/sys/compat/linuxkpi/common/include/linux/mm.h
index 74709299ba1a..dc75ae4483c3 100644
--- a/sys/compat/linuxkpi/common/include/linux/mm.h
+++ b/sys/compat/linuxkpi/common/include/linux/mm.h
@@ -82,6 +82,9 @@ CTASSERT((VM_PROT_ALL & -(1 << 8)) == 0);
 #define        VM_FAULT_RETRY          (1 << 9)
 #define        VM_FAULT_FALLBACK       (1 << 10)
 
+#define        VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | 
VM_FAULT_SIGSEGV | \
+       VM_FAULT_HWPOISON |VM_FAULT_HWPOISON_LARGE | VM_FAULT_FALLBACK)
+
 #define        FAULT_FLAG_WRITE        (1 << 0)
 #define        FAULT_FLAG_MKWRITE      (1 << 1)
 #define        FAULT_FLAG_ALLOW_RETRY  (1 << 2)
@@ -183,6 +186,26 @@ io_remap_pfn_range(struct vm_area_struct *vma,
        return (0);
 }
 
+vm_fault_t
+lkpi_vmf_insert_pfn_prot_locked(struct vm_area_struct *vma, unsigned long addr,
+    unsigned long pfn, pgprot_t prot);
+
+static inline vm_fault_t
+vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
+    unsigned long pfn, pgprot_t prot)
+{
+       vm_fault_t ret;
+
+       VM_OBJECT_WLOCK(vma->vm_obj);
+       ret = lkpi_vmf_insert_pfn_prot_locked(vma, addr, pfn, prot);
+       VM_OBJECT_WUNLOCK(vma->vm_obj);
+
+       return (ret);
+}
+#define        vmf_insert_pfn_prot(...)        \
+       _Static_assert(false,           \
+"This function is always called in a loop. Consider using the locked version")
+
 static inline int
 apply_to_page_range(struct mm_struct *mm, unsigned long address,
     unsigned long size, pte_fn_t fn, void *data)
diff --git a/sys/compat/linuxkpi/common/src/linux_page.c 
b/sys/compat/linuxkpi/common/src/linux_page.c
index ee41366c53a6..cbe6d2530b91 100644
--- a/sys/compat/linuxkpi/common/src/linux_page.c
+++ b/sys/compat/linuxkpi/common/src/linux_page.c
@@ -275,3 +275,35 @@ is_vmalloc_addr(const void *addr)
 {
        return (vtoslab((vm_offset_t)addr & ~UMA_SLAB_MASK) != NULL);
 }
+
+vm_fault_t
+lkpi_vmf_insert_pfn_prot_locked(struct vm_area_struct *vma, unsigned long addr,
+    unsigned long pfn, pgprot_t prot)
+{
+       vm_object_t vm_obj = vma->vm_obj;
+       vm_page_t page;
+       vm_pindex_t pindex;
+
+       VM_OBJECT_ASSERT_WLOCKED(vm_obj);
+       pindex = OFF_TO_IDX(addr - vma->vm_start);
+       if (vma->vm_pfn_count == 0)
+               vma->vm_pfn_first = pindex;
+       MPASS(pindex <= OFF_TO_IDX(vma->vm_end));
+
+retry:
+       page = vm_page_grab(vm_obj, pindex, VM_ALLOC_NOCREAT);
+       if (page == NULL) {
+               page = PHYS_TO_VM_PAGE(IDX_TO_OFF(pfn));
+               if (!vm_page_busy_acquire(page, VM_ALLOC_WAITFAIL))
+                       goto retry;
+               if (vm_page_insert(page, vm_obj, pindex)) {
+                       vm_page_xunbusy(page);
+                       return (VM_FAULT_OOM);
+               }
+               vm_page_valid(page);
+       }
+       pmap_page_set_memattr(page, pgprot2cachemode(prot));
+       vma->vm_pfn_count++;
+
+       return (VM_FAULT_NOPAGE);
+}

Reply via email to