FORCE_READ(*addr) ensures that the compiler will emit a load from addr. Several tests need to trigger such a load for every page in the range [addr, addr + len), ensuring that every page is faulted in, if it wasn't already.
Introduce a new helper force_read_pages_in_range() that does exactly that and replace existing loops with a call to it. Some of those loops have a different step size, but reading from every page is appropriate in all cases. Signed-off-by: Kevin Brodsky <[email protected]> --- tools/testing/selftests/mm/hugetlb-madvise.c | 9 +-------- tools/testing/selftests/mm/pfnmap.c | 16 ++++++---------- .../testing/selftests/mm/split_huge_page_test.c | 6 +----- tools/testing/selftests/mm/vm_util.h | 6 ++++++ 4 files changed, 14 insertions(+), 23 deletions(-) diff --git a/tools/testing/selftests/mm/hugetlb-madvise.c b/tools/testing/selftests/mm/hugetlb-madvise.c index 05d9d2805ae4..1f82568ae262 100644 --- a/tools/testing/selftests/mm/hugetlb-madvise.c +++ b/tools/testing/selftests/mm/hugetlb-madvise.c @@ -47,14 +47,7 @@ void write_fault_pages(void *addr, unsigned long nr_pages) void read_fault_pages(void *addr, unsigned long nr_pages) { - unsigned long i; - - for (i = 0; i < nr_pages; i++) { - unsigned long *addr2 = - ((unsigned long *)(addr + (i * huge_page_size))); - /* Prevent the compiler from optimizing out the entire loop: */ - FORCE_READ(*addr2); - } + force_read_pages_in_range(addr, nr_pages * huge_page_size); } int main(int argc, char **argv) diff --git a/tools/testing/selftests/mm/pfnmap.c b/tools/testing/selftests/mm/pfnmap.c index f546dfb10cae..35b0e3ed54cd 100644 --- a/tools/testing/selftests/mm/pfnmap.c +++ b/tools/testing/selftests/mm/pfnmap.c @@ -33,20 +33,17 @@ static void signal_handler(int sig) siglongjmp(sigjmp_buf_env, -EFAULT); } -static int test_read_access(char *addr, size_t size, size_t pagesize) +static int test_read_access(char *addr, size_t size) { - size_t offs; int ret; if (signal(SIGSEGV, signal_handler) == SIG_ERR) return -EINVAL; ret = sigsetjmp(sigjmp_buf_env, 1); - if (!ret) { - for (offs = 0; offs < size; offs += pagesize) - /* Force a read that the compiler cannot optimize out. */ - *((volatile char *)(addr + offs)); - } + if (!ret) + force_read_pages_in_range(addr, size); + if (signal(SIGSEGV, SIG_DFL) == SIG_ERR) return -EINVAL; @@ -138,7 +135,7 @@ FIXTURE_SETUP(pfnmap) SKIP(return, "Invalid file: '%s'. Not pfnmap'ed\n", file); /* ... and want to be able to read from them. */ - if (test_read_access(self->addr1, self->size1, self->pagesize)) + if (test_read_access(self->addr1, self->size1)) SKIP(return, "Cannot read-access mmap'ed '%s'\n", file); self->size2 = 0; @@ -243,8 +240,7 @@ TEST_F(pfnmap, fork) ASSERT_GE(pid, 0); if (!pid) { - EXPECT_EQ(test_read_access(self->addr1, self->size1, - self->pagesize), 0); + EXPECT_EQ(test_read_access(self->addr1, self->size1), 0); exit(0); } diff --git a/tools/testing/selftests/mm/split_huge_page_test.c b/tools/testing/selftests/mm/split_huge_page_test.c index 40799f3f0213..65a89ceca4a5 100644 --- a/tools/testing/selftests/mm/split_huge_page_test.c +++ b/tools/testing/selftests/mm/split_huge_page_test.c @@ -652,11 +652,7 @@ static int create_pagecache_thp_and_fd(const char *testfile, size_t fd_size, } madvise(*addr, fd_size, MADV_HUGEPAGE); - for (size_t i = 0; i < fd_size; i++) { - char *addr2 = *addr + i; - - FORCE_READ(*addr2); - } + force_read_pages_in_range(*addr, fd_size); if (!check_huge_file(*addr, fd_size / pmd_pagesize, pmd_pagesize)) { ksft_print_msg("No large pagecache folio generated, please provide a filesystem supporting large folio\n"); diff --git a/tools/testing/selftests/mm/vm_util.h b/tools/testing/selftests/mm/vm_util.h index 6ad32b1830f1..74bdf96161d7 100644 --- a/tools/testing/selftests/mm/vm_util.h +++ b/tools/testing/selftests/mm/vm_util.h @@ -54,6 +54,12 @@ static inline unsigned int pshift(void) return __page_shift; } +static inline void force_read_pages_in_range(char *addr, size_t len) +{ + for (size_t i = 0; i < len; i += psize()) + FORCE_READ(addr[i]); +} + bool detect_huge_zeropage(void); /* -- 2.51.2

