On Tue, Feb 17, 2026, Ackerley Tng wrote:
> diff --git a/tools/testing/selftests/kvm/guest_memfd_test.c 
> b/tools/testing/selftests/kvm/guest_memfd_test.c
> index 618c937f3c90f..d16341a4a315d 100644
> --- a/tools/testing/selftests/kvm/guest_memfd_test.c
> +++ b/tools/testing/selftests/kvm/guest_memfd_test.c
> @@ -171,6 +171,77 @@ static void test_numa_allocation(int fd, size_t 
> total_size)
>       kvm_munmap(mem, total_size);
>  }
>  
> +static size_t getpmdsize(void)

This absolutely belongs in library/utility code.

> +{
> +     const char *path = "/sys/kernel/mm/transparent_hugepage/hpage_pmd_size";
> +     static size_t pmd_size = -1;
> +     FILE *fp;
> +
> +     if (pmd_size != -1)
> +             return pmd_size;
> +
> +     fp = fopen(path, "r");
> +     TEST_ASSERT(fp, "Couldn't open %s to read PMD size.", path);

This will likely assert on a kernel without THP support.

> +     TEST_ASSERT_EQ(fscanf(fp, "%lu", &pmd_size), 1);
> +
> +     TEST_ASSERT_EQ(fclose(fp), 0);

Please try to extend tools/testing/selftests/kvm/include/kvm_syscalls.h.

> +
> +     return pmd_size;
> +}
> +
> +static void test_collapse(struct kvm_vm *vm, uint64_t flags)
> +{
> +     const size_t pmd_size = getpmdsize();
> +     char *mem;
> +     off_t i;
> +     int fd;
> +
> +     fd = vm_create_guest_memfd(vm, pmd_size * 2,
> +                                GUEST_MEMFD_FLAG_MMAP |
> +                                GUEST_MEMFD_FLAG_INIT_SHARED);
> +
> +     /*
> +      * Use aligned address so that MADV_COLLAPSE will not be
> +      * filtered out early in the collapsing routine.

Please elaborate, the value below is way more magical than just being aligned.

> +      */
> +#define ALIGNED_ADDRESS ((void *)0x4000000000UL)

Use a "const void *" instead of #define inside a function.  And use one of the
appropriate size macros, e.g.

        const void *ALIGNED_ADDRESS = (void *)(SZ_1G * <some magic value>);

But why hardcode a virtual address in the first place?  If you a specific
alignment, just allocate enough virtual memory to be able to meet those 
alignment
requirements.

> +     mem = mmap(ALIGNED_ADDRESS, pmd_size, PROT_READ | PROT_WRITE,
> +                MAP_FIXED | MAP_SHARED, fd, 0);
> +     TEST_ASSERT_EQ(mem, ALIGNED_ADDRESS);
> +
> +     /*
> +      * Use reads to populate page table to avoid setting dirty
> +      * flag on page.
> +      */
> +     for (i = 0; i < pmd_size; i += getpagesize())
> +             READ_ONCE(mem[i]);
> +
> +     /*
> +      * Advising the use of huge pages in guest_memfd should be
> +      * fine...
> +      */
> +     TEST_ASSERT_EQ(madvise(mem, pmd_size, MADV_HUGEPAGE), 0);
> +
> +     /*
> +      * ... but collapsing folios must not be supported to avoid
> +      * mapping beyond shared ranges into host userspace page
> +      * tables.
> +      */
> +     TEST_ASSERT_EQ(madvise(mem, pmd_size, MADV_COLLAPSE), -1);
> +     TEST_ASSERT_EQ(errno, EINVAL);
> +
> +     /*
> +      * Removing from host page tables and re-faulting should be
> +      * fine; should not end up faulting in a collapsed/huge folio.
> +      */
> +     TEST_ASSERT_EQ(madvise(mem, pmd_size, MADV_DONTNEED), 0);
> +     READ_ONCE(mem[0]);
> +
> +     kvm_munmap(mem, pmd_size);
> +     kvm_close(fd);
> +}
> +
>  static void test_fault_sigbus(int fd, size_t accessible_size, size_t 
> map_size)
>  {
>       const char val = 0xaa;
> @@ -370,6 +441,7 @@ static void __test_guest_memfd(struct kvm_vm *vm, 
> uint64_t flags)
>                       gmem_test(mmap_supported, vm, flags);
>                       gmem_test(fault_overflow, vm, flags);
>                       gmem_test(numa_allocation, vm, flags);
> +                     test_collapse(vm, flags);

Why diverge from everything else?  Yeah, the size is different, but that's easy
enough to handle.  And presumably the THP query needs to be able to fail 
gracefully,
so something like this?

diff --git a/tools/testing/selftests/kvm/guest_memfd_test.c 
b/tools/testing/selftests/kvm/guest_memfd_test.c
index 618c937f3c90..e942adae1f59 100644
--- a/tools/testing/selftests/kvm/guest_memfd_test.c
+++ b/tools/testing/selftests/kvm/guest_memfd_test.c
@@ -350,14 +350,28 @@ static void test_guest_memfd_flags(struct kvm_vm *vm)
        }
 }
 
-#define gmem_test(__test, __vm, __flags)                               \
+#define __gmem_test(__test, __vm, __flags, __size)                     \
 do {                                                                   \
-       int fd = vm_create_guest_memfd(__vm, page_size * 4, __flags);   \
+       int fd = vm_create_guest_memfd(__vm, __size, __flags);          \
                                                                        \
-       test_##__test(fd, page_size * 4);                               \
+       test_##__test(fd, __size);                                      \
        close(fd);                                                      \
 } while (0)
 
+#define gmem_test(__test, __vm, __flags)                               \
+       __gmem_test(__test, __vm, __flags, page_size * 4)
+
+#define gmem_test_huge_pmd(__test, __vm, __flags)                      \
+do {                                                                   \
+       size_t pmd_size = kvm_get_thp_pmd_size();                       \
+                                                                       \
+       if (!pmd_size)                                                  \
+               break;                                                  \
+                                                                       \
+       __gmem_test(__test, __vm, __flags, pmd_size * 2);               \
+} while (0)
+
+
 static void __test_guest_memfd(struct kvm_vm *vm, uint64_t flags)
 {
        test_create_guest_memfd_multiple(vm);


Reply via email to