The tests validate memory.cache file functionality in terms of limiting
the amount of page cache used and being able to correctly account it.

The page cache is populated by prefetching contents of a 1 GiB file. The
two cgroups used for testing limit cache usage at 512 MiB and 256 MiB
respectively.

The test to migrate a process from one cgroup to another is currently
marked as expected to fail as the functionality is not correctly
implemented in cgroups yet
(https://virtuozzo.atlassian.net/browse/VSTOR-119404).

The current error tolerance is set to 15%.

https://virtuozzo.atlassian.net/browse/VSTOR-112174

Feature: mm: Memory cgroup page cache limit

Signed-off-by: Dmitry Sepp <[email protected]>
---
 tools/testing/selftests/cgroup/Makefile     |   2 +
 tools/testing/selftests/cgroup/config       |   1 +
 tools/testing/selftests/cgroup/test_cache.c | 273 ++++++++++++++++++++
 3 files changed, 276 insertions(+)
 create mode 100644 tools/testing/selftests/cgroup/test_cache.c

diff --git a/tools/testing/selftests/cgroup/Makefile 
b/tools/testing/selftests/cgroup/Makefile
index 1b897152bab6..ff290321d7da 100644
--- a/tools/testing/selftests/cgroup/Makefile
+++ b/tools/testing/selftests/cgroup/Makefile
@@ -17,6 +17,7 @@ TEST_GEN_PROGS += test_kmem
 TEST_GEN_PROGS += test_memcontrol
 TEST_GEN_PROGS += test_pids
 TEST_GEN_PROGS += test_zswap
+TEST_GEN_PROGS += test_cache
 
 LOCAL_HDRS += $(selfdir)/clone3/clone3_selftests.h $(selfdir)/pidfd/pidfd.h
 
@@ -32,3 +33,4 @@ $(OUTPUT)/test_kmem: cgroup_util.c
 $(OUTPUT)/test_memcontrol: cgroup_util.c
 $(OUTPUT)/test_pids: cgroup_util.c
 $(OUTPUT)/test_zswap: cgroup_util.c
+$(OUTPUT)/test_cache: cgroup_util.c
diff --git a/tools/testing/selftests/cgroup/config 
b/tools/testing/selftests/cgroup/config
index 39f979690dd3..641ed9bc26b4 100644
--- a/tools/testing/selftests/cgroup/config
+++ b/tools/testing/selftests/cgroup/config
@@ -4,3 +4,4 @@ CONFIG_CGROUP_FREEZER=y
 CONFIG_CGROUP_SCHED=y
 CONFIG_MEMCG=y
 CONFIG_PAGE_COUNTER=y
+CONFIG_CACHESTAT_SYSCALL=y
diff --git a/tools/testing/selftests/cgroup/test_cache.c 
b/tools/testing/selftests/cgroup/test_cache.c
new file mode 100644
index 000000000000..865c500c8e21
--- /dev/null
+++ b/tools/testing/selftests/cgroup/test_cache.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  Copyright (C) 2025 Virtuozzo International GmbH. All rights reserved.
+ */
+
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <sys/mman.h>
+#include <sys/syscall.h>
+#include <sys/prctl.h>
+#include <linux/prctl.h>
+#include <linux/mman.h>
+#include <linux/limits.h>
+
+#include "../kselftest_harness.h"
+#include "cgroup_util.h"
+
+static char root[PATH_MAX];
+static const size_t file_size = MB(1024);
+static char *cg_test0_cache_max_str = "512M";
+static char *cg_test1_cache_max_str = "256M";
+
+static int vm_drop_caches(void)
+{
+       int fd;
+       int ret = EXIT_SUCCESS;
+
+       fd = open("/proc/sys/vm/drop_caches", O_WRONLY);
+       if (fd < 0) {
+               ksft_perror("failed to open drop_caches");
+               return EXIT_FAILURE;
+       }
+
+       if (write(fd, "3", 1) < 0) {
+               ksft_perror("failed to write to drop_caches");
+               ret = EXIT_FAILURE;
+       }
+
+       if (close(fd) < 0) {
+               ksft_perror("failed to close drop_caches");
+               ret = EXIT_FAILURE;
+       }
+
+       return ret;
+}
+
+struct test_context {
+       struct _test_data_cache_control *self;
+       struct __test_metadata *metadata;
+};
+
+FIXTURE(cache_control) {
+       int fd;
+       char *cg_test0;
+       char *cg_test1;
+       char *cg_curr;
+       int pipefd[2];
+       struct test_context ctx;
+};
+
+FIXTURE_VARIANT(cache_control) {
+};
+
+/* Define a mock variant as XFAIL_ADD() only accepts variants */
+FIXTURE_VARIANT_ADD(cache_control, default) {
+};
+
+FIXTURE_SETUP(cache_control)
+{
+       self->fd = get_temp_fd();
+       EXPECT_NE(-1, self->fd);
+       EXPECT_NE(-1, ftruncate(self->fd, file_size));
+
+       self->cg_test0 = cg_name(root, "kselftest0");
+       EXPECT_NE(-1, cg_create(self->cg_test0));
+       self->cg_test1 = cg_name(root, "kselftest1");
+       EXPECT_NE(-1, cg_create(self->cg_test1));
+
+       EXPECT_NE(-1, pipe(self->pipefd));
+
+       /* Store the context to make it accessible outside of the harness */
+       self->ctx.self = self;
+       self->ctx.metadata = _metadata;
+
+       EXPECT_EQ(EXIT_SUCCESS, vm_drop_caches());
+};
+
+FIXTURE_TEARDOWN(cache_control)
+{
+       EXPECT_NE(-1, close(self->fd));
+       EXPECT_NE(-1, cg_destroy(self->cg_test1));
+       free(self->cg_test1);
+       EXPECT_NE(-1, cg_destroy(self->cg_test0));
+       free(self->cg_test0);
+}
+
+static int access_file(const char *cgroup, void *arg)
+{
+       void *mmap_ptr;
+       char tmp = 's';
+       struct test_context *ctx = arg;
+       struct _test_data_cache_control *self = ctx->self;
+       struct __test_metadata *_metadata = ctx->metadata;
+
+       EXPECT_NE(-1, close(self->pipefd[0]));
+       /* Simplify cleanup */
+       EXPECT_NE(-1, prctl(PR_SET_PDEATHSIG, SIGTERM));
+
+       mmap_ptr = mmap(NULL, file_size, PROT_READ | PROT_WRITE,
+                             MAP_SHARED | MAP_POPULATE, self->fd, 0);
+       EXPECT_NE(MAP_FAILED, mmap_ptr);
+
+       EXPECT_NE(-1, write(self->pipefd[1], &tmp, 1));
+       EXPECT_NE(-1, close(self->pipefd[1]));
+
+       pause();
+
+       return EXIT_SUCCESS;
+}
+
+static int test_process_start(struct test_context *ctx)
+{
+       int child_pid;
+       char tmp;
+       struct _test_data_cache_control *self = ctx->self;
+       struct __test_metadata *_metadata = ctx->metadata;
+
+       child_pid = cg_run_nowait(self->cg_test0, access_file,
+                                 (void *)&self->ctx);
+       EXPECT_NE(-1, child_pid);
+       EXPECT_NE(-1, close(self->pipefd[1]));
+       self->cg_curr = self->cg_test0;
+
+       /* Wait for the child to enter cgroup */
+       EXPECT_NE(-1, cg_wait_for_proc_count(self->cg_test0, 1));
+
+       /* Wait for the child to populate the page cache */
+       EXPECT_NE(-1, read(self->pipefd[0], &tmp, 1));
+       EXPECT_NE(-1, close(self->pipefd[0]));
+
+       return child_pid;
+}
+
+static void test_process_stop(int child_pid, struct test_context *ctx)
+{
+       char buf[PAGE_SIZE];
+       struct _test_data_cache_control *self = ctx->self;
+       struct __test_metadata *_metadata = ctx->metadata;
+
+       EXPECT_NE(-1, cg_killall(self->cg_curr));
+
+       /* Wait for cgroup to be empty */
+       while (1) {
+               EXPECT_NE(-1, cg_read(self->cg_curr, "cgroup.procs", buf,
+                                     sizeof(buf)));
+               if (buf[0] == '\0')
+                       break;
+               usleep(1000);
+       }
+
+       EXPECT_NE(-1, waitpid(child_pid, NULL, 0));
+}
+
+static void verify_cache_usage(struct test_context *ctx)
+{
+       long cache_current;
+       long cache_max;
+       struct _test_data_cache_control *self = ctx->self;
+       struct __test_metadata *_metadata = ctx->metadata;
+
+       cache_current = cg_read_long(self->cg_curr, "memory.cache.current");
+       EXPECT_NE(-1, cache_current);
+
+       cache_max = cg_read_long(self->cg_curr, "memory.cache.max");
+       EXPECT_NE(-1, cache_max);
+
+       EXPECT_EQ(true, values_close(cache_current, cache_max, 15)) {
+               ksft_print_msg("Incorrect cache usage: current=%li max=%li\n",
+                       cache_current, cache_max);
+       }
+}
+
+static void verify_cache_accounting(struct test_context *ctx)
+{
+       long cache_current;
+       struct cachestat cs;
+       struct cachestat_range cs_range = { 0, file_size };
+       struct _test_data_cache_control *self = ctx->self;
+       struct __test_metadata *_metadata = ctx->metadata;
+
+       cache_current = cg_read_long(self->cg_curr, "memory.cache.current");
+       EXPECT_NE(-1, cache_current);
+
+       EXPECT_NE(-1, syscall(__NR_cachestat, self->fd, &cs_range, &cs, 0));
+       EXPECT_EQ(true, values_close(cache_current, cs.nr_cache * PAGE_SIZE, 
15)) {
+               ksft_print_msg("Incorrect cache accounting: cg=%li 
cachestat=%llu\n",
+               cache_current, cs.nr_cache * PAGE_SIZE);
+       }
+}
+
+TEST_F(cache_control, limit_before_access)
+{
+       int child_pid;
+
+       cg_write(self->cg_test0, "memory.cache.max", cg_test0_cache_max_str);
+
+       child_pid = test_process_start(&self->ctx);
+
+       verify_cache_usage(&self->ctx);
+       verify_cache_accounting(&self->ctx);
+
+       test_process_stop(child_pid, &self->ctx);
+}
+
+TEST_F(cache_control, limit_after_access)
+{
+       int child_pid;
+
+       child_pid = test_process_start(&self->ctx);
+
+       verify_cache_accounting(&self->ctx);
+
+       cg_write(self->cg_test0, "memory.cache.max", cg_test0_cache_max_str);
+       sleep(1);
+
+       verify_cache_usage(&self->ctx);
+       verify_cache_accounting(&self->ctx);
+
+       test_process_stop(child_pid, &self->ctx);
+}
+
+TEST_F(cache_control, limit_after_migration)
+{
+       int child_pid;
+
+       cg_write(self->cg_test0, "memory.cache.max", cg_test0_cache_max_str);
+       cg_write(self->cg_test1, "memory.cache.max", cg_test1_cache_max_str);
+
+       child_pid = test_process_start(&self->ctx);
+
+       EXPECT_NE(-1, cg_enter(self->cg_test1, child_pid));
+       self->cg_curr = self->cg_test1;
+
+       /* Wait for the child to enter cgroup */
+       EXPECT_NE(-1, cg_wait_for_proc_count(self->cg_test1, 1));
+       sleep(1);
+
+       verify_cache_usage(&self->ctx);
+       verify_cache_accounting(&self->ctx);
+
+       test_process_stop(child_pid, &self->ctx);
+}
+
+XFAIL_ADD(cache_control, default, limit_after_migration);
+
+int main(int argc, char *argv[])
+{
+       int ret;
+
+       if (geteuid())
+               ksft_exit_skip("needs root to run\n");
+
+       if (cg_find_unified_root(root, sizeof(root), NULL))
+               ksft_exit_skip("cgroup v2 isn't mounted\n");
+
+       ret = test_harness_run(argc, argv);
+
+       ksft_exit(ret);
+}
-- 
2.51.0

_______________________________________________
Devel mailing list
[email protected]
https://lists.openvz.org/mailman/listinfo/devel

Reply via email to