Kees Cook <[email protected]> writes:
- /** - * Error message on a free page can be false positive - * if binder shrinker ran during binder_alloc_free_buf - * calls above. - */ for (i = 0; i <= (end - 1) / PAGE_SIZE; i++) { if (list_empty(page_to_lru(alloc->pages[i]))) { pr_err_size_seq(sizes, seq);
Ah, I see the comment is removed entirely here. Better to move this hunk to patch 1, then.
This comment is actually "true" until we allow a test-specific freelist in this change, but I've moved the "for" line back to where it was in the previous change. Thanks for noting this!
@@ -162,8 +159,8 @@ static void binder_selftest_free_page(struct binder_alloc *alloc)int i; unsigned long count;
- while ((count = list_lru_count(&binder_freelist))) { - list_lru_walk(&binder_freelist, binder_alloc_free_page, + while ((count = list_lru_count(&binder_selftest_freelist))) { + list_lru_walk(&binder_selftest_freelist, binder_alloc_free_page, NULL, count); }
@@ -187,7 +184,7 @@ static void binder_selftest_alloc_free(struct binder_alloc *alloc,
/* Allocate from lru. */ binder_selftest_alloc_buf(alloc, buffers, sizes, seq); - if (list_lru_count(&binder_freelist)) + if (list_lru_count(&binder_selftest_freelist)) pr_err("lru list should be empty but is not\n");
binder_selftest_free_buf(alloc, buffers, sizes, seq, end);@@ -275,6 +272,20 @@ static void binder_selftest_alloc_offset(struct binder_alloc *alloc,} }
+int binder_selftest_alloc_get_page_count(struct binder_alloc *alloc) +{ + struct page *page; + int allocated = 0; + int i; + + for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { + page = alloc->pages[i]; + if (page) + allocated++; + } + return allocated; +} + /** * binder_selftest_alloc() - Test alloc and free of buffer pages. * @alloc: Pointer to alloc struct.@@ -286,6 +297,7 @@ static void binder_selftest_alloc_offset(struct binder_alloc *alloc,*/ void binder_selftest_alloc(struct binder_alloc *alloc) { + struct list_lru *prev_freelist; size_t end_offset[BUFFER_NUM];
if (!binder_selftest_run)@@ -293,14 +305,41 @@ void binder_selftest_alloc(struct binder_alloc *alloc)mutex_lock(&binder_selftest_lock); if (!binder_selftest_run || !alloc->mapped) goto done; + + prev_freelist = alloc->freelist; + + /*+ * It is not safe to modify this process's alloc->freelist if it has any + * pages on a freelist. Since the test runs before any binder ioctls can+ * be dealt with, none of its pages should be allocated yet. + */ + if (binder_selftest_alloc_get_page_count(alloc)) { + pr_err("process has existing alloc state\n"); + goto cleanup; + } + + if (list_lru_init(&binder_selftest_freelist)) { + pr_err("failed to init test freelist\n"); + goto cleanup; + } + + alloc->freelist = &binder_selftest_freelist; + pr_info("STARTED\n"); binder_selftest_alloc_offset(alloc, end_offset, 0); - binder_selftest_run = false; if (binder_selftest_failures > 0) pr_info("%d tests FAILED\n", binder_selftest_failures); else pr_info("PASSED\n");
+ if (list_lru_count(&binder_selftest_freelist)) + pr_err("expect test freelist to be empty\n"); + +cleanup: + /* Even if we didn't run the test, it's no longer thread-safe. */ + binder_selftest_run = false; + alloc->freelist = prev_freelist; + list_lru_destroy(&binder_selftest_freelist); done: mutex_unlock(&binder_selftest_lock); } -- 2.50.0.727.gbf7dc18ff4-goog
Otherwise looks good.
-- Tiffany Y. Yang

