On 02.07.25 16:20, Muhammad Usama Anjum wrote:
Add test cases to test the correctness of PFN ZERO flag of pagemap_scan
ioctl. Test with normal pages backed memory and huge pages backed
memory.

Cc: David Hildenbrand <[email protected]>
Signed-off-by: Muhammad Usama Anjum <[email protected]>
---
The bug has been fixed [1].

[1] https://lore.kernel.org/all/[email protected]
Changes since v1:
- Skip if madvise() fails
- Skip test if use_zero_page isn't set to 1
- Keep on using memalign()+free() to allocate huge pages
---
  tools/testing/selftests/mm/pagemap_ioctl.c | 86 +++++++++++++++++++++-
  1 file changed, 85 insertions(+), 1 deletion(-)

diff --git a/tools/testing/selftests/mm/pagemap_ioctl.c 
b/tools/testing/selftests/mm/pagemap_ioctl.c
index 57b4bba2b45f3..976ab357f4651 100644
--- a/tools/testing/selftests/mm/pagemap_ioctl.c
+++ b/tools/testing/selftests/mm/pagemap_ioctl.c
@@ -1,4 +1,5 @@
  // SPDX-License-Identifier: GPL-2.0
+
  #define _GNU_SOURCE
  #include <stdio.h>
  #include <fcntl.h>
@@ -1480,6 +1481,86 @@ static void transact_test(int page_size)
                              extra_thread_faults);
  }
+bool is_use_zero_page_set(void)
+{
+       ssize_t bytes_read;
+       char buffer[2];
+       int fd;
+
+       fd = open("/sys/kernel/mm/transparent_hugepage/use_zero_page", 
O_RDONLY);
+       if (fd < 0)
+               return 0;
+
+       bytes_read = read(fd, buffer, sizeof(buffer) - 1);
+       if (bytes_read <= 0) {
+               close(fd);
+               return 0;
+       }
+
+       close(fd);
+       if (atoi(buffer) != 1)
+               return 0;
+
+       return 1;
+}

You should probably factor out detect_huge_zeropage() from cow.c into vm_utils.c, and let it return the result.

+
+void zeropfn_tests(void)
+{
+       unsigned long long mem_size;
+       struct page_region vec;
+       int i, ret;
+       char *mem;
+
+       /* Test with normal memory */
+       mem_size = 10 * page_size;
+       mem = mmap(NULL, mem_size, PROT_READ, MAP_PRIVATE | MAP_ANON, -1, 0);
+       if (mem == MAP_FAILED)
+               ksft_exit_fail_msg("error nomem\n");
+
+       /* Touch each page to ensure it's mapped */
+       for (i = 0; i < mem_size; i += page_size)
+               (void)((volatile char *)mem)[i];
+
+       ret = pagemap_ioctl(mem, mem_size, &vec, 1, 0,
+                           (mem_size / page_size), PAGE_IS_PFNZERO, 0, 0, 
PAGE_IS_PFNZERO);
+       if (ret < 0)
+               ksft_exit_fail_msg("error %d %d %s\n", ret, errno, 
strerror(errno));
+
+       ksft_test_result(ret == 1 && LEN(vec) == (mem_size / page_size),
+                        "%s all pages must have PFNZERO set\n", __func__);
+
+       munmap(mem, mem_size);
+
+       /* Test with huge page if user_zero_page is set to 1 */
+       if (!is_use_zero_page_set()) {
+               ksft_test_result_skip("%s use_zero_page not supported or set to 
1\n", __func__);
+               return;
+       }
+
+       mem_size = 10 * hpage_size;
+       mem = memalign(hpage_size, mem_size);
+       if (!mem)
+               ksft_exit_fail_msg("error nomem\n");

Didn't we discuss using mmap() instead?

See run_with_huge_zeropage() in cow.c on how to do the alignemnt yourself easily.


--
Cheers,

David / dhildenb


Reply via email to