Similar to commit ce6fa91b9363 ("mm/slub.c: add a naive detection
of double free or corruption"), add a very cheap double-free check
for SLAB under CONFIG_SLAB_FREELIST_HARDENED. With this added, the
"SLAB_FREE_DOUBLE" LKDTM test passes under SLAB:

  lkdtm: Performing direct entry SLAB_FREE_DOUBLE
  lkdtm: Attempting double slab free ...
  ------------[ cut here ]------------
  WARNING: CPU: 2 PID: 2193 at mm/slab.c:757 ___cache _free+0x325/0x390

Signed-off-by: Kees Cook <keesc...@chromium.org>
---
 mm/slab.c | 14 ++++++++++++--
 1 file changed, 12 insertions(+), 2 deletions(-)

diff --git a/mm/slab.c b/mm/slab.c
index ebac5e400ad0..bbff6705ab2b 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -749,6 +749,16 @@ static void drain_alien_cache(struct kmem_cache *cachep,
        }
 }
 
+/* &alien->lock must be held by alien callers. */
+static __always_inline void __free_one(struct array_cache *ac, void *objp)
+{
+       /* Avoid trivial double-free. */
+       if (IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
+           WARN_ON_ONCE(ac->avail > 0 && ac->entry[ac->avail - 1] == objp))
+               return;
+       ac->entry[ac->avail++] = objp;
+}
+
 static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
                                int node, int page_node)
 {
@@ -767,7 +777,7 @@ static int __cache_free_alien(struct kmem_cache *cachep, 
void *objp,
                        STATS_INC_ACOVERFLOW(cachep);
                        __drain_alien_cache(cachep, ac, page_node, &list);
                }
-               ac->entry[ac->avail++] = objp;
+               __free_one(ac, objp);
                spin_unlock(&alien->lock);
                slabs_destroy(cachep, &list);
        } else {
@@ -3457,7 +3467,7 @@ void ___cache_free(struct kmem_cache *cachep, void *objp,
                }
        }
 
-       ac->entry[ac->avail++] = objp;
+       __free_one(ac, objp);
 }
 
 /**
-- 
2.25.1

Reply via email to