The branch main has been updated by wulf:

URL: 
https://cgit.FreeBSD.org/src/commit/?id=20ce906157dfc9ae6ee834f56f2bba154f37d653

commit 20ce906157dfc9ae6ee834f56f2bba154f37d653
Author:     Vladimir Kondratyev <[email protected]>
AuthorDate: 2025-12-17 21:31:11 +0000
Commit:     Vladimir Kondratyev <[email protected]>
CommitDate: 2025-12-17 21:31:11 +0000

    LinuxKPI: Move vmap-related code from linux_compat.c to linux_page.c
    
    Coming vmap_pfn() implementation requires is_vmalloc_addr() to be in
    the same file with other vmap-related code. Move code out from the
    overcrowded file. No functional changes intended.
    
    MFC after:      1 week
    Reviewed by:    bz
    Differential Revision:  https://reviews.freebsd.org/D54223
---
 sys/compat/linuxkpi/common/src/linux_compat.c | 111 -----------------------
 sys/compat/linuxkpi/common/src/linux_page.c   | 125 ++++++++++++++++++++++++++
 2 files changed, 125 insertions(+), 111 deletions(-)

diff --git a/sys/compat/linuxkpi/common/src/linux_compat.c 
b/sys/compat/linuxkpi/common/src/linux_compat.c
index ff0f477ea8cc..35430daf311d 100644
--- a/sys/compat/linuxkpi/common/src/linux_compat.c
+++ b/sys/compat/linuxkpi/common/src/linux_compat.c
@@ -1812,113 +1812,6 @@ const struct fileops linuxfileops = {
        .fo_flags = DFLAG_PASSABLE,
 };
 
-/*
- * Hash of vmmap addresses.  This is infrequently accessed and does not
- * need to be particularly large.  This is done because we must store the
- * caller's idea of the map size to properly unmap.
- */
-struct vmmap {
-       LIST_ENTRY(vmmap)       vm_next;
-       void                    *vm_addr;
-       unsigned long           vm_size;
-};
-
-struct vmmaphd {
-       struct vmmap *lh_first;
-};
-#define        VMMAP_HASH_SIZE 64
-#define        VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1)
-#define        VM_HASH(addr)   ((uintptr_t)(addr) >> PAGE_SHIFT) & 
VMMAP_HASH_MASK
-static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE];
-static struct mtx vmmaplock;
-
-static void
-vmmap_add(void *addr, unsigned long size)
-{
-       struct vmmap *vmmap;
-
-       vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL);
-       mtx_lock(&vmmaplock);
-       vmmap->vm_size = size;
-       vmmap->vm_addr = addr;
-       LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next);
-       mtx_unlock(&vmmaplock);
-}
-
-static struct vmmap *
-vmmap_remove(void *addr)
-{
-       struct vmmap *vmmap;
-
-       mtx_lock(&vmmaplock);
-       LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next)
-               if (vmmap->vm_addr == addr)
-                       break;
-       if (vmmap)
-               LIST_REMOVE(vmmap, vm_next);
-       mtx_unlock(&vmmaplock);
-
-       return (vmmap);
-}
-
-#if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || 
defined(__aarch64__) || defined(__riscv)
-void *
-_ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr)
-{
-       void *addr;
-
-       addr = pmap_mapdev_attr(phys_addr, size, attr);
-       if (addr == NULL)
-               return (NULL);
-       vmmap_add(addr, size);
-
-       return (addr);
-}
-#endif
-
-void
-iounmap(void *addr)
-{
-       struct vmmap *vmmap;
-
-       vmmap = vmmap_remove(addr);
-       if (vmmap == NULL)
-               return;
-#if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || 
defined(__aarch64__) || defined(__riscv)
-       pmap_unmapdev(addr, vmmap->vm_size);
-#endif
-       kfree(vmmap);
-}
-
-void *
-vmap(struct page **pages, unsigned int count, unsigned long flags, int prot)
-{
-       vm_offset_t off;
-       size_t size;
-
-       size = count * PAGE_SIZE;
-       off = kva_alloc(size);
-       if (off == 0)
-               return (NULL);
-       vmmap_add((void *)off, size);
-       pmap_qenter(off, pages, count);
-
-       return ((void *)off);
-}
-
-void
-vunmap(void *addr)
-{
-       struct vmmap *vmmap;
-
-       vmmap = vmmap_remove(addr);
-       if (vmmap == NULL)
-               return;
-       pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE);
-       kva_free((vm_offset_t)addr, vmmap->vm_size);
-       kfree(vmmap);
-}
-
 static char *
 devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, va_list ap)
 {
@@ -2887,9 +2780,6 @@ linux_compat_init(void *arg)
        INIT_LIST_HEAD(&pci_drivers);
        INIT_LIST_HEAD(&pci_devices);
        spin_lock_init(&pci_lock);
-       mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF);
-       for (i = 0; i < VMMAP_HASH_SIZE; i++)
-               LIST_INIT(&vmmaphead[i]);
        init_waitqueue_head(&linux_bit_waitq);
        init_waitqueue_head(&linux_var_waitq);
 
@@ -3000,7 +2890,6 @@ linux_compat_uninit(void *arg)
        free(__cpu_data, M_KMALLOC);
 #endif
 
-       mtx_destroy(&vmmaplock);
        spin_lock_destroy(&pci_lock);
        rw_destroy(&linux_vma_lock);
 }
diff --git a/sys/compat/linuxkpi/common/src/linux_page.c 
b/sys/compat/linuxkpi/common/src/linux_page.c
index c0d9d71ba1ce..57ca1401b912 100644
--- a/sys/compat/linuxkpi/common/src/linux_page.c
+++ b/sys/compat/linuxkpi/common/src/linux_page.c
@@ -318,12 +318,119 @@ lkpi_get_user_pages(unsigned long start, unsigned long 
nr_pages,
            !!(gup_flags & FOLL_WRITE), pages));
 }
 
+/*
+ * Hash of vmmap addresses.  This is infrequently accessed and does not
+ * need to be particularly large.  This is done because we must store the
+ * caller's idea of the map size to properly unmap.
+ */
+struct vmmap {
+       LIST_ENTRY(vmmap)       vm_next;
+       void                    *vm_addr;
+       unsigned long           vm_size;
+};
+
+struct vmmaphd {
+       struct vmmap *lh_first;
+};
+#define VMMAP_HASH_SIZE 64
+#define VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1)
+#define VM_HASH(addr)   ((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK
+static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE];
+static struct mtx vmmaplock;
+
 int
 is_vmalloc_addr(const void *addr)
 {
        return (vtoslab((vm_offset_t)addr & ~UMA_SLAB_MASK) != NULL);
 }
 
+static void
+vmmap_add(void *addr, unsigned long size)
+{
+       struct vmmap *vmmap;
+
+       vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL);
+       mtx_lock(&vmmaplock);
+       vmmap->vm_size = size;
+       vmmap->vm_addr = addr;
+       LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next);
+       mtx_unlock(&vmmaplock);
+}
+
+static struct vmmap *
+vmmap_remove(void *addr)
+{
+       struct vmmap *vmmap;
+
+       mtx_lock(&vmmaplock);
+       LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next)
+               if (vmmap->vm_addr == addr)
+                       break;
+       if (vmmap)
+               LIST_REMOVE(vmmap, vm_next);
+       mtx_unlock(&vmmaplock);
+
+       return (vmmap);
+}
+
+#if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || 
defined(__aarch64__) || defined(__riscv)
+void *
+_ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr)
+{
+       void *addr;
+
+       addr = pmap_mapdev_attr(phys_addr, size, attr);
+       if (addr == NULL)
+               return (NULL);
+       vmmap_add(addr, size);
+
+       return (addr);
+}
+#endif
+
+void
+iounmap(void *addr)
+{
+       struct vmmap *vmmap;
+
+       vmmap = vmmap_remove(addr);
+       if (vmmap == NULL)
+               return;
+#if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || 
defined(__aarch64__) || defined(__riscv)
+       pmap_unmapdev(addr, vmmap->vm_size);
+#endif
+       kfree(vmmap);
+}
+
+void *
+vmap(struct page **pages, unsigned int count, unsigned long flags, int prot)
+{
+       vm_offset_t off;
+       size_t size;
+
+       size = count * PAGE_SIZE;
+       off = kva_alloc(size);
+       if (off == 0)
+               return (NULL);
+       vmmap_add((void *)off, size);
+       pmap_qenter(off, pages, count);
+
+       return ((void *)off);
+}
+
+void
+vunmap(void *addr)
+{
+       struct vmmap *vmmap;
+
+       vmmap = vmmap_remove(addr);
+       if (vmmap == NULL)
+               return;
+       pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE);
+       kva_free((vm_offset_t)addr, vmmap->vm_size);
+       kfree(vmmap);
+}
+
 vm_fault_t
 lkpi_vmf_insert_pfn_prot_locked(struct vm_area_struct *vma, unsigned long addr,
     unsigned long pfn, pgprot_t prot)
@@ -575,3 +682,21 @@ linuxkpi__page_frag_cache_drain(struct page *page, size_t 
count __unused)
 
        linux_free_pages(page, 0);
 }
+
+static void
+lkpi_page_init(void *arg)
+{
+       int i;
+
+       mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF);
+       for (i = 0; i < VMMAP_HASH_SIZE; i++)
+               LIST_INIT(&vmmaphead[i]);
+}
+SYSINIT(lkpi_page, SI_SUB_DRIVERS, SI_ORDER_SECOND, lkpi_page_init, NULL);
+
+static void
+lkpi_page_uninit(void *arg)
+{
+       mtx_destroy(&vmmaplock);
+}
+SYSUNINIT(lkpi_page, SI_SUB_DRIVERS, SI_ORDER_SECOND, lkpi_page_uninit, NULL);

Reply via email to