[Devel] [PATCH RHEL7 COMMIT] ms/mm: slab: free kmem_cache_node after destroy sysfs file

Konstantin Khorenko khorenko at virtuozzo.com
Wed Mar 16 02:55:54 PDT 2016


The commit is pushed to "branch-rh7-3.10.0-327.10.1.vz7.12.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-327.10.1.vz7.12.2
------>
commit fd3b061911dc10876298f8ba3b415db83e08fa03
Author: Dmitry Safonov <dsafonov at virtuozzo.com>
Date:   Wed Mar 16 13:55:53 2016 +0400

    ms/mm: slab: free kmem_cache_node after destroy sysfs file
    
    When slub_debug alloc_calls_show is enabled we will try to track
    location and user of slab object on each online node, kmem_cache_node
    structure and cpu_cache/cpu_slub shouldn't be freed till there is the
    last reference to sysfs file.
    
    This fixes the following panic:
    
       BUG: unable to handle kernel NULL pointer dereference at 0000000000000020
       IP:  list_locations+0x169/0x4e0
       PGD 257304067 PUD 438456067 PMD 0
       Oops: 0000 [#1] SMP
       CPU: 3 PID: 973074 Comm: cat ve: 0 Not tainted 3.10.0-229.7.2.ovz.9.30-00007-japdoll-dirty #2 9.30
       Hardware name: DEPO Computers To Be Filled By O.E.M./H67DE3, BIOS L1.60c 07/14/2011
       task: ffff88042a5dc5b0 ti: ffff88037f8d8000 task.ti: ffff88037f8d8000
       RIP: list_locations+0x169/0x4e0
       Call Trace:
         alloc_calls_show+0x1d/0x30
         slab_attr_show+0x1b/0x30
         sysfs_read_file+0x9a/0x1a0
         vfs_read+0x9c/0x170
         SyS_read+0x58/0xb0
         system_call_fastpath+0x16/0x1b
       Code: 5e 07 12 00 b9 00 04 00 00 3d 00 04 00 00 0f 4f c1 3d 00 04 00 00 89 45 b0 0f 84 c3 00 00 00 48 63 45 b0 49 8b 9c c4 f8 00 00 00 <48> 8b 43 20 48 85 c0 74 b6 48 89 df e8 46 37 44 00 48 8b 53 10
       CR2: 0000000000000020
    
    Separated __kmem_cache_release from __kmem_cache_shutdown which now
    called on slab_kmem_cache_release (after the last reference to sysfs
    file object has dropped).
    
    Reintroduced locking in free_partial as sysfs file might access cache's
    partial list after shutdowning - partial revert of the commit
    69cb8e6b7c29 ("slub: free slabs without holding locks").  Zap
    __remove_partial and use remove_partial (w/o underscores) as
    free_partial now takes list_lock which s partial revert for commit
    1e4dd9461fab ("slub: do not assert not having lock in removing freed
    partial")
    
    Signed-off-by: Dmitry Safonov <dsafonov at virtuozzo.com>
    
    Suggested-by: Vladimir Davydov <vdavydov at virtuozzo.com>
    Acked-by: Vladimir Davydov <vdavydov at virtuozzo.com>
    Cc: Christoph Lameter <cl at linux.com>
    Cc: Pekka Enberg <penberg at kernel.org>
    Cc: David Rientjes <rientjes at google.com>
    Cc: Joonsoo Kim <iamjoonsoo.kim at lge.com>
    Signed-off-by: Andrew Morton <akpm at linux-foundation.org>
    Signed-off-by: Linus Torvalds <torvalds at linux-foundation.org>
    
    ms commit: 52b4b950b50740bff507a62907e86710743c22e7
    ("mm: slab: free kmem_cache_node after destroy sysfs file")
    
    [ported from ms, resolved conflicts with commit 18bf854117c6 ("slab: use
    get_node() and kmem_cache_node() functions") and commit bf0dea23a9c0
    ("mm/slab: use percpu allocator for cpu cache")
    
    https://jira.sw.ru/browse/PSBM-43010
    
    Signed-off-by: Dmitry Safonov <dsafonov at virtuozzo.com>
---
 mm/slab.c        | 12 ++++++------
 mm/slab.h        |  1 +
 mm/slab_common.c |  1 +
 mm/slob.c        |  4 ++++
 mm/slub.c        | 38 +++++++++++++++++---------------------
 5 files changed, 29 insertions(+), 27 deletions(-)

diff --git a/mm/slab.c b/mm/slab.c
index 0c1c67c..3282316 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2391,7 +2391,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
 
 	err = setup_cpu_cache(cachep, gfp);
 	if (err) {
-		__kmem_cache_shutdown(cachep);
+		__kmem_cache_release(cachep);
 		return err;
 	}
 
@@ -2545,12 +2545,13 @@ int __kmem_cache_shrink(struct kmem_cache *cachep, bool deactivate)
 
 int __kmem_cache_shutdown(struct kmem_cache *cachep)
 {
+	return __kmem_cache_shrink(cachep, false);
+}
+
+void __kmem_cache_release(struct kmem_cache *cachep)
+{
 	int i;
 	struct kmem_cache_node *n;
-	int rc = __kmem_cache_shrink(cachep, false);
-
-	if (rc)
-		return rc;
 
 	for_each_online_cpu(i)
 	    kfree(cachep->array[i]);
@@ -2564,7 +2565,6 @@ int __kmem_cache_shutdown(struct kmem_cache *cachep)
 			kfree(n);
 		}
 	}
-	return 0;
 }
 
 /*
diff --git a/mm/slab.h b/mm/slab.h
index 5f6c6a2..57de63f 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -125,6 +125,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
 #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
 
 int __kmem_cache_shutdown(struct kmem_cache *);
+void __kmem_cache_release(struct kmem_cache *);
 int __kmem_cache_shrink(struct kmem_cache *, bool);
 void slab_kmem_cache_release(struct kmem_cache *);
 
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 9bae922..18226a6 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -503,6 +503,7 @@ void memcg_destroy_kmem_caches(struct mem_cgroup *memcg)
 
 void slab_kmem_cache_release(struct kmem_cache *s)
 {
+	__kmem_cache_release(s);
 	destroy_memcg_params(s);
 	kfree(s->name);
 	kmem_cache_free(kmem_cache, s);
diff --git a/mm/slob.c b/mm/slob.c
index abe2693..d377c61 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -620,6 +620,10 @@ int __kmem_cache_shutdown(struct kmem_cache *c)
 	return 0;
 }
 
+void __kmem_cache_release(struct kmem_cache *c)
+{
+}
+
 int __kmem_cache_shrink(struct kmem_cache *d, bool deactivate)
 {
 	return 0;
diff --git a/mm/slub.c b/mm/slub.c
index 7a4ad43..80c7b79 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1525,18 +1525,12 @@ static inline void add_partial(struct kmem_cache_node *n,
 	__add_partial(n, page, tail);
 }
 
-static inline void
-__remove_partial(struct kmem_cache_node *n, struct page *page)
-{
-	list_del(&page->lru);
-	n->nr_partial--;
-}
-
 static inline void remove_partial(struct kmem_cache_node *n,
 					struct page *page)
 {
 	lockdep_assert_held(&n->list_lock);
-	__remove_partial(n, page);
+	list_del(&page->lru);
+	n->nr_partial--;
 }
 
 /*
@@ -2938,6 +2932,12 @@ static void free_kmem_cache_nodes(struct kmem_cache *s)
 	}
 }
 
+void __kmem_cache_release(struct kmem_cache *s)
+{
+	free_percpu(s->cpu_slab);
+	free_kmem_cache_nodes(s);
+}
+
 static int init_kmem_cache_nodes(struct kmem_cache *s)
 {
 	int node;
@@ -3198,28 +3198,31 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
 
 /*
  * Attempt to free all partial slabs on a node.
- * This is called from kmem_cache_close(). We must be the last thread
- * using the cache and therefore we do not need to lock anymore.
+ * This is called from __kmem_cache_shutdown(). We must take list_lock
+ * because sysfs file might still access partial list after the shutdowning.
  */
 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
 {
 	struct page *page, *h;
 
+	BUG_ON(irqs_disabled());
+	spin_lock_irq(&n->list_lock);
 	list_for_each_entry_safe(page, h, &n->partial, lru) {
 		if (!page->inuse) {
-			__remove_partial(n, page);
+			remove_partial(n, page);
 			discard_slab(s, page);
 		} else {
 			list_slab_objects(s, page,
-			"Objects remaining in %s on kmem_cache_close()");
+			"Objects remaining in %s on __kmem_cache_shutdown()");
 		}
 	}
+	spin_unlock_irq(&n->list_lock);
 }
 
 /*
  * Release all resources used by a slab cache.
  */
-static inline int kmem_cache_close(struct kmem_cache *s)
+int __kmem_cache_shutdown(struct kmem_cache *s)
 {
 	int node;
 
@@ -3232,16 +3235,9 @@ static inline int kmem_cache_close(struct kmem_cache *s)
 		if (n->nr_partial || slabs_node(s, node))
 			return 1;
 	}
-	free_percpu(s->cpu_slab);
-	free_kmem_cache_nodes(s);
 	return 0;
 }
 
-int __kmem_cache_shutdown(struct kmem_cache *s)
-{
-	return kmem_cache_close(s);
-}
-
 /********************************************************************
  *		Kmalloc subsystem
  *******************************************************************/
@@ -3850,7 +3846,7 @@ int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
 	memcg_propagate_slab_attrs(s);
 	err = sysfs_slab_add(s);
 	if (err)
-		kmem_cache_close(s);
+		__kmem_cache_release(s);
 
 	return err;
 }


More information about the Devel mailing list