On Thu, 2020-12-03 at 03:16 +0100, Mike Galbraith wrote:
> On Wed, 2020-12-02 at 23:08 +0100, Sebastian Andrzej Siewior wrote:
> Looks like...
>
> d8f117abb380 z3fold: fix use-after-free when freeing handles
>
> ...wasn't completely effective...

The top two hunks seem to have rendered the thing RT tolerant.

diff --git a/mm/z3fold.c b/mm/z3fold.c
index 18feaa0bc537..851d9f4f1644 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -537,7 +537,7 @@ static void __release_z3fold_page(struct z3fold_header 
*zhdr, bool locked)
        spin_unlock(&pool->lock);

        /* If there are no foreign handles, free the handles array */
-       read_lock(&zhdr->slots->lock);
+       write_lock(&zhdr->slots->lock);
        for (i = 0; i <= BUDDY_MASK; i++) {
                if (zhdr->slots->slot[i]) {
                        is_free = false;
@@ -546,7 +546,7 @@ static void __release_z3fold_page(struct z3fold_header 
*zhdr, bool locked)
        }
        if (!is_free)
                set_bit(HANDLES_ORPHANED, &zhdr->slots->pool);
-       read_unlock(&zhdr->slots->lock);
+       write_unlock(&zhdr->slots->lock);

        if (is_free)
                kmem_cache_free(pool->c_handle, zhdr->slots);
@@ -642,14 +642,16 @@ static inline void add_to_unbuddied(struct z3fold_pool 
*pool,
 {
        if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
                        zhdr->middle_chunks == 0) {
-               struct list_head *unbuddied = get_cpu_ptr(pool->unbuddied);
-
+               struct list_head *unbuddied;
                int freechunks = num_free_chunks(zhdr);
+
+               migrate_disable();
+               unbuddied = this_cpu_ptr(pool->unbuddied);
                spin_lock(&pool->lock);
                list_add(&zhdr->buddy, &unbuddied[freechunks]);
                spin_unlock(&pool->lock);
                zhdr->cpu = smp_processor_id();
-               put_cpu_ptr(pool->unbuddied);
+               migrate_enable();
        }
 }

@@ -886,8 +888,9 @@ static inline struct z3fold_header *__z3fold_alloc(struct 
z3fold_pool *pool,
        int chunks = size_to_chunks(size), i;

 lookup:
+       migrate_disable();
        /* First, try to find an unbuddied z3fold page. */
-       unbuddied = get_cpu_ptr(pool->unbuddied);
+       unbuddied = this_cpu_ptr(pool->unbuddied);
        for_each_unbuddied_list(i, chunks) {
                struct list_head *l = &unbuddied[i];

@@ -905,7 +908,7 @@ static inline struct z3fold_header *__z3fold_alloc(struct 
z3fold_pool *pool,
                    !z3fold_page_trylock(zhdr)) {
                        spin_unlock(&pool->lock);
                        zhdr = NULL;
-                       put_cpu_ptr(pool->unbuddied);
+                       migrate_enable();
                        if (can_sleep)
                                cond_resched();
                        goto lookup;
@@ -919,7 +922,7 @@ static inline struct z3fold_header *__z3fold_alloc(struct 
z3fold_pool *pool,
                    test_bit(PAGE_CLAIMED, &page->private)) {
                        z3fold_page_unlock(zhdr);
                        zhdr = NULL;
-                       put_cpu_ptr(pool->unbuddied);
+                       migrate_enable();
                        if (can_sleep)
                                cond_resched();
                        goto lookup;
@@ -934,7 +937,7 @@ static inline struct z3fold_header *__z3fold_alloc(struct 
z3fold_pool *pool,
                kref_get(&zhdr->refcount);
                break;
        }
-       put_cpu_ptr(pool->unbuddied);
+       migrate_enable();

        if (!zhdr) {
                int cpu;

Reply via email to