[Devel] [PATCH RHEL7 COMMIT] ms/mm, mempool: poison elements backed by slab allocator

Konstantin Khorenko khorenko at virtuozzo.com
Thu Sep 3 08:27:48 PDT 2015


The commit is pushed to "branch-rh7-3.10.0-229.7.2-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-229.7.2.vz7.6.6
------>
commit bbeaa6232872bec76a69e7cb6b41606f1cf61ad3
Author: Andrey Ryabinin <aryabinin at odin.com>
Date:   Thu Sep 3 19:27:48 2015 +0400

    ms/mm, mempool: poison elements backed by slab allocator
    
    https://jira.sw.ru/browse/PSBM-26429
    
    From: David Rientjes <rientjes at google.com>
    
    commit bdfedb76f4f5aa5e37380e3b71adee4a39f30fc6 upstream.
    
    Mempools keep elements in a reserved pool for contexts in which allocation
    may not be possible.  When an element is allocated from the reserved pool,
    its memory contents is the same as when it was added to the reserved pool.
    
    Because of this, elements lack any free poisoning to detect use-after-free
    errors.
    
    This patch adds free poisoning for elements backed by the slab allocator.
    This is possible because the mempool layer knows the object size of each
    element.
    
    When an element is added to the reserved pool, it is poisoned with
    POISON_FREE.  When it is removed from the reserved pool, the contents are
    checked for POISON_FREE.  If there is a mismatch, a warning is emitted to
    the kernel log.
    
    This is only effective for configs with CONFIG_DEBUG_SLAB or
    CONFIG_SLUB_DEBUG_ON.
    
    [fabio.estevam at freescale.com: use '%zu' for printing 'size_t' variable]
    [arnd at arndb.de: add missing include]
    Signed-off-by: David Rientjes <rientjes at google.com>
    Cc: Dave Kleikamp <shaggy at kernel.org>
    Cc: Christoph Hellwig <hch at lst.de>
    Cc: Sebastian Ott <sebott at linux.vnet.ibm.com>
    Cc: Mikulas Patocka <mpatocka at redhat.com>
    Cc: Catalin Marinas <catalin.marinas at arm.com>
    Signed-off-by: Fabio Estevam <fabio.estevam at freescale.com>
    Signed-off-by: Arnd Bergmann <arnd at arndb.de>
    Signed-off-by: Andrew Morton <akpm at linux-foundation.org>
    Signed-off-by: Linus Torvalds <torvalds at linux-foundation.org>
    
    Signed-off-by: Andrey Ryabinin <aryabinin at odin.com>
    
    Signed-off-by: Andrey Ryabinin <aryabinin at odin.com>
---
 mm/mempool.c | 94 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 92 insertions(+), 2 deletions(-)

diff --git a/mm/mempool.c b/mm/mempool.c
index 5499047..db146ad 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -6,25 +6,115 @@
  *  extreme VM load.
  *
  *  started by Ingo Molnar, Copyright (C) 2001
+ *  debugging by David Rientjes, Copyright (C) 2015
  */
 
 #include <linux/mm.h>
 #include <linux/slab.h>
+
+#include <linux/highmem.h>
+#include <linux/kmemleak.h>
 #include <linux/export.h>
 #include <linux/mempool.h>
 #include <linux/blkdev.h>
 #include <linux/writeback.h>
 
+#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
+static void poison_error(mempool_t *pool, void *element, size_t size,
+			 size_t byte)
+{
+	const int nr = pool->curr_nr;
+	const int start = max_t(int, byte - (BITS_PER_LONG / 8), 0);
+	const int end = min_t(int, byte + (BITS_PER_LONG / 8), size);
+	int i;
+
+	pr_err("BUG: mempool element poison mismatch\n");
+	pr_err("Mempool %p size %zu\n", pool, size);
+	pr_err(" nr=%d @ %p: %s0x", nr, element, start > 0 ? "... " : "");
+	for (i = start; i < end; i++)
+		pr_cont("%x ", *(u8 *)(element + i));
+	pr_cont("%s\n", end < size ? "..." : "");
+	dump_stack();
+}
+
+static void __check_element(mempool_t *pool, void *element, size_t size)
+{
+	u8 *obj = element;
+	size_t i;
+
+	for (i = 0; i < size; i++) {
+		u8 exp = (i < size - 1) ? POISON_FREE : POISON_END;
+
+		if (obj[i] != exp) {
+			poison_error(pool, element, size, i);
+			return;
+		}
+	}
+	memset(obj, POISON_INUSE, size);
+}
+
+static void check_element(mempool_t *pool, void *element)
+{
+	/* Mempools backed by slab allocator */
+	if (pool->free == mempool_free_slab || pool->free == mempool_kfree)
+		__check_element(pool, element, ksize(element));
+
+	/* Mempools backed by page allocator */
+	if (pool->free == mempool_free_pages) {
+		int order = (int)(long)pool->pool_data;
+		void *addr = kmap_atomic((struct page *)element);
+
+		__check_element(pool, addr, 1UL << (PAGE_SHIFT + order));
+		kunmap_atomic(addr);
+	}
+}
+
+static void __poison_element(void *element, size_t size)
+{
+	u8 *obj = element;
+
+	memset(obj, POISON_FREE, size - 1);
+	obj[size - 1] = POISON_END;
+}
+
+static void poison_element(mempool_t *pool, void *element)
+{
+	/* Mempools backed by slab allocator */
+	if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
+		__poison_element(element, ksize(element));
+
+	/* Mempools backed by page allocator */
+	if (pool->alloc == mempool_alloc_pages) {
+		int order = (int)(long)pool->pool_data;
+		void *addr = kmap_atomic((struct page *)element);
+
+		__poison_element(addr, 1UL << (PAGE_SHIFT + order));
+		kunmap_atomic(addr);
+	}
+}
+#else /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
+static inline void check_element(mempool_t *pool, void *element)
+{
+}
+static inline void poison_element(mempool_t *pool, void *element)
+{
+}
+#endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
+
 static void add_element(mempool_t *pool, void *element)
 {
 	BUG_ON(pool->curr_nr >= pool->min_nr);
+	poison_element(pool, element);
 	pool->elements[pool->curr_nr++] = element;
 }
 
 static void *remove_element(mempool_t *pool)
 {
-	BUG_ON(pool->curr_nr <= 0);
-	return pool->elements[--pool->curr_nr];
+	void *element = pool->elements[--pool->curr_nr];
+
+	BUG_ON(pool->curr_nr < 0);
+	check_element(pool, element);
+	return element;
 }
 
 /**



More information about the Devel mailing list