On Mon, Feb 24, 2025 at 02:47:14PM +0000, Brendan Jackman wrote:
> This is the bare minimum to illustrate what KUnit code would look like
> that covers the page allocator.
> 
> Even this trivial test illustrates a couple of nice things that are
> possible when testing via KUnit
> 
> 1. We can directly assert that the correct zone was used.
>    (Although note due to the simplistic setup, you can have any zone you
>    like as long as it's ZONE_NORMAL).
> 
> 2. We can assert that a page got freed. It's probably pretty unlikely
>    that we'd have a bug that actually causes a page to get leaked by the
>    allocator, but it serves as a good example of the kind of assertions
>    we can make by judicously peeking at allocator internals.
> 
> Signed-off-by: Brendan Jackman <jackm...@google.com>
> ---
>  mm/page_alloc_test.c | 139 
> ++++++++++++++++++++++++++++++++++++++++++++++++++-
>  1 file changed, 138 insertions(+), 1 deletion(-)
> 
> diff --git a/mm/page_alloc_test.c b/mm/page_alloc_test.c
> index 
> c6bcfcaf61b57ca35ad1b5fc48fd07d0402843bc..0c4effb151f4cd31ec6a696615a9b6ae4964b332
>  100644
> --- a/mm/page_alloc_test.c
> +++ b/mm/page_alloc_test.c
> @@ -26,6 +26,139 @@
>       }                                                                       
> \
>  })
>  
> +#define EXPECT_WITHIN_ZONE(test, page, zone) ({                              
>         \

ultranit: the '\' above is unaligned with the ones below.

> +     unsigned long pfn = page_to_pfn(page);                                  
> \
> +     unsigned long start_pfn = zone->zone_start_pfn;                         
> \
> +     unsigned long end_pfn = start_pfn + zone->spanned_pages;                
> \
> +                                                                             
> \
> +     KUNIT_EXPECT_TRUE_MSG(test,                                             
> \
> +             pfn >= start_pfn && pfn < end_pfn,                              
> \
> +             "Wanted PFN 0x%lx - 0x%lx, got 0x%lx",                          
> \
> +             start_pfn, end_pfn, pfn);                                       
> \
> +     KUNIT_EXPECT_PTR_EQ_MSG(test, page_zone(page), zone,                    
> \
> +             "Wanted %px (%s), got %px (%s)",                                
> \
> +             zone, zone->name, page_zone(page), page_zone(page)->name);      
> \
> +})
> +
> +static void action_nodemask_free(void *ctx)
> +{
> +     NODEMASK_FREE(ctx);
> +}
> +
> +/*
> + * Call __alloc_pages_noprof with a nodemask containing only the nid.
> + *
> + * Never returns NULL.
> + */
> +static inline struct page *alloc_pages_force_nid(struct kunit *test,
> +                                              gfp_t gfp, int order, int nid)
> +{
> +     NODEMASK_ALLOC(nodemask_t, nodemask, GFP_KERNEL);

For the sake of the test can't we just put the nodemask on the stack?

> +     struct page *page;
> +
> +     KUNIT_ASSERT_NOT_NULL(test, nodemask);
> +     kunit_add_action(test, action_nodemask_free, &nodemask);

Why aren't we just freeing the nodemask after using it, before we make
any assertions?

> +     nodes_clear(*nodemask);
> +     node_set(nid, *nodemask);
> +
> +     page = __alloc_pages_noprof(GFP_KERNEL, 0, nid, nodemask);
> +     KUNIT_ASSERT_NOT_NULL(test, page);
> +     return page;
> +}
> +
> +static inline bool page_on_buddy_list(struct page *want_page, struct 
> list_head *head)
> +{
> +     struct page *found_page;

nit: This is just an iterator, we can probably just call it 'page'.
'found' is confusing for me because we didn't really search for it.

> +
> +     list_for_each_entry(found_page, head, buddy_list) {
> +             if (found_page == want_page)
> +                     return true;
> +     }
> +
> +     return false;
> +}
> +
> +/* Test case parameters that are independent of alloc order.  */
> +static const struct {
> +     gfp_t gfp_flags;
> +     enum zone_type want_zone;
> +} alloc_fresh_gfps[] = {
> +     /*
> +      * The way we currently set up the isolated node, everything ends up in
> +      * ZONE_NORMAL.
> +      */
> +     { .gfp_flags = GFP_KERNEL,      .want_zone = ZONE_NORMAL },
> +     { .gfp_flags = GFP_ATOMIC,      .want_zone = ZONE_NORMAL },
> +     { .gfp_flags = GFP_USER,        .want_zone = ZONE_NORMAL },
> +     { .gfp_flags = GFP_DMA32,       .want_zone = ZONE_NORMAL },
> +};
> +
> +struct alloc_fresh_test_case {
> +     int order;
> +     int gfp_idx;
> +};
> +
> +/* Generate test cases as the cross product of orders and alloc_fresh_gfps.  
> */
> +static const void *alloc_fresh_gen_params(const void *prev, char *desc)
> +{
> +     /* Buffer to avoid allocations. */
> +     static struct alloc_fresh_test_case tc;
> +
> +     if (!prev) {
> +             /* First call */
> +             tc.order = 0;
> +             tc.gfp_idx = 0;
> +             return &tc;
> +     }

We need to set 'tc' here to whatever 'prev' is pointing at, right?

> +
> +     tc.gfp_idx++;
> +     if (tc.gfp_idx >= ARRAY_SIZE(alloc_fresh_gfps)) {
> +             tc.gfp_idx = 0;
> +             tc.order++;
> +     }
> +     if (tc.order > MAX_PAGE_ORDER)
> +             /* Finished. */
> +             return NULL;
> +
> +     snprintf(desc, KUNIT_PARAM_DESC_SIZE, "order %d %pGg\n",
> +             tc.order, &alloc_fresh_gfps[tc.gfp_idx].gfp_flags);
> +     return &tc;
> +}
> +
> +/* Smoke test: allocate from a node where everything is in a pristine state. 
> */
> +static void test_alloc_fresh(struct kunit *test)
> +{
> +     const struct alloc_fresh_test_case *tc = test->param_value;
> +     gfp_t gfp_flags = alloc_fresh_gfps[tc->gfp_idx].gfp_flags;
> +     enum zone_type want_zone_type = alloc_fresh_gfps[tc->gfp_idx].want_zone;
> +     struct zone *want_zone = 
> &NODE_DATA(isolated_node)->node_zones[want_zone_type];
> +     struct list_head *buddy_list;
> +     struct per_cpu_pages *pcp;
> +     struct page *page, *merged_page;
> +     int cpu;
> +
> +     page = alloc_pages_force_nid(test, gfp_flags, tc->order, isolated_node);
> +
> +     EXPECT_WITHIN_ZONE(test, page, want_zone);
> +
> +     cpu = get_cpu();
> +     __free_pages(page, 0);
> +     pcp = per_cpu_ptr(want_zone->per_cpu_pageset, cpu);
> +     put_cpu();
> +
> +     /*
> +      * Should end up back in the free area when drained. Because everything
> +      * is free, it should get buddy-merged up to the maximum order.
> +      */
> +     drain_zone_pages(want_zone, pcp);
> +     KUNIT_EXPECT_TRUE(test, PageBuddy(page));
> +     KUNIT_EXPECT_EQ(test, buddy_order(page), MAX_PAGE_ORDER);
> +     KUNIT_EXPECT_TRUE(test, list_empty(&pcp->lists[MIGRATE_UNMOVABLE]));
> +     merged_page = pfn_to_page(round_down(page_to_pfn(page), 1 << 
> MAX_PAGE_ORDER));
> +     buddy_list = 
> &want_zone->free_area[MAX_PAGE_ORDER].free_list[MIGRATE_UNMOVABLE];
> +     KUNIT_EXPECT_TRUE(test, page_on_buddy_list(merged_page, buddy_list));
> +}
> +
>  static void action_drain_pages_all(void *unused)
>  {
>       int cpu;
> @@ -144,7 +277,11 @@ static void depopulate_isolated_node(struct kunit_suite 
> *suite)
>       WARN_ON(add_memory(0, start, size, MMOP_ONLINE));
>       WARN_ON(walk_memory_blocks(start, size, NULL, memory_block_online_cb));
>  }
> -static struct kunit_case test_cases[] = { {} };
> +
> +static struct kunit_case test_cases[] = {
> +     KUNIT_CASE_PARAM(test_alloc_fresh, alloc_fresh_gen_params),
> +     {}
> +};
>  
>  struct kunit_suite page_alloc_test_suite = {
>       .name = "page_alloc",
> 
> -- 
> 2.48.1.601.g30ceb7b040-goog
> 

Reply via email to