[Devel] [PATCH RHEL7 COMMIT] ms/slub: Make cpu partial slab support configurable

Konstantin Khorenko khorenko at virtuozzo.com
Thu Jan 10 17:16:25 MSK 2019


The commit is pushed to "branch-rh7-3.10.0-957.1.3.vz7.83.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-957.1.3.vz7.83.4
------>
commit e0e16ab86019812516360bb73d1c836b74069997
Author: Joonsoo Kim <iamjoonsoo.kim at lge.com>
Date:   Thu Jan 10 17:16:23 2019 +0300

    ms/slub: Make cpu partial slab support configurable
    
    CPU partial support can introduce level of indeterminism that is not
    wanted in certain context (like a realtime kernel). Make it
    configurable.
    
    This patch is based on Christoph Lameter's "slub: Make cpu partial slab
    support configurable V2".
    
    Acked-by: Christoph Lameter <cl at linux.com>
    Signed-off-by: Joonsoo Kim <iamjoonsoo.kim at lge.com>
    Signed-off-by: Pekka Enberg <penberg at kernel.org>
    
    https://jira.sw.ru/browse/PSBM-83199
    [ Setting SLUB_CPU_PARTIAL=n save us about a 1G in dvd-store test ]
    (cherry picked from commit 345c905d13a4ec9f774b6b4bc038fe4aef26cced)
    Signed-off-by: Andrey Ryabinin <aryabinin at virtuozzo.com>
---
 init/Kconfig | 11 +++++++++++
 mm/slub.c    | 28 +++++++++++++++++++++-------
 2 files changed, 32 insertions(+), 7 deletions(-)

diff --git a/init/Kconfig b/init/Kconfig
index 707e56a6c511..7edff968e2b6 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1641,6 +1641,17 @@ config SLOB
 
 endchoice
 
+config SLUB_CPU_PARTIAL
+	default y
+	depends on SLUB
+	bool "SLUB per cpu partial cache"
+	help
+	  Per cpu partial caches accellerate objects allocation and freeing
+	  that is local to a processor at the price of more indeterminism
+	  in the latency of the free. On overflow these caches will be cleared
+	  which requires the taking of locks that may cause latency spikes.
+	  Typically one would choose no for a realtime system.
+
 config MMAP_ALLOW_UNINITIALIZED
 	bool "Allow mmapped anonymous memory to be uninitialized"
 	depends on EXPERT && !MMU
diff --git a/mm/slub.c b/mm/slub.c
index 7e56d25ba399..e7dd99b09681 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -132,6 +132,15 @@ static inline void *fixup_red_left(struct kmem_cache *s, void *p)
 	return p;
 }
 
+static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
+{
+#ifdef CONFIG_SLUB_CPU_PARTIAL
+	return !kmem_cache_debug(s);
+#else
+	return false;
+#endif
+}
+
 /*
  * Issues still to be resolved:
  *
@@ -1725,7 +1734,8 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
 			put_cpu_partial(s, page, 0);
 			stat(s, CPU_PARTIAL_NODE);
 		}
-		if (kmem_cache_debug(s) || available > s->cpu_partial / 2)
+		if (!kmem_cache_has_cpu_partial(s)
+			|| available > s->cpu_partial / 2)
 			break;
 
 	}
@@ -2039,6 +2049,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, void *freel
 static void unfreeze_partials(struct kmem_cache *s,
 		struct kmem_cache_cpu *c)
 {
+#ifdef CONFIG_SLUB_CPU_PARTIAL
 	struct kmem_cache_node *n = NULL, *n2 = NULL;
 	struct page *page, *discard_page = NULL;
 
@@ -2093,6 +2104,7 @@ static void unfreeze_partials(struct kmem_cache *s,
 		discard_slab(s, page);
 		stat(s, FREE_SLAB);
 	}
+#endif
 }
 
 /*
@@ -2106,6 +2118,7 @@ static void unfreeze_partials(struct kmem_cache *s,
  */
 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
 {
+#ifdef CONFIG_SLUB_CPU_PARTIAL
 	struct page *oldpage;
 	int pages;
 	int pobjects;
@@ -2152,6 +2165,7 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
 		local_irq_restore(flags);
 	}
 	preempt_enable();
+#endif
 }
 
 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
@@ -2678,8 +2692,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
 		new.inuse -= cnt;
 		if ((!new.inuse || !prior) && !was_frozen) {
 
-			if (!kmem_cache_debug(s) && !prior) {
-
+			if (kmem_cache_has_cpu_partial(s) && !prior) {
 				/*
 				 * Slab was on no list before and will be partially empty
 				 * We can defer the list move and instead freeze it.
@@ -2733,8 +2746,9 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
 	 * Objects left in the slab. If it was not on the partial list before
 	 * then add it.
 	 */
-	if (kmem_cache_debug(s) && unlikely(!prior)) {
-		remove_full(s, n, page);
+	if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
+		if (kmem_cache_debug(s))
+			remove_full(s, n, page);
 		add_partial(n, page, DEACTIVATE_TO_TAIL);
 		stat(s, FREE_ADD_PARTIAL);
 	}
@@ -3441,7 +3455,7 @@ static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
 	 *    per node list when we run out of per cpu objects. We only fetch 50%
 	 *    to keep some capacity around for frees.
 	 */
-	if (kmem_cache_debug(s))
+	if (!kmem_cache_has_cpu_partial(s))
 		s->cpu_partial = 0;
 	else if (s->size >= PAGE_SIZE)
 		s->cpu_partial = 2;
@@ -4921,7 +4935,7 @@ static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
 	err = strict_strtoul(buf, 10, &objects);
 	if (err)
 		return err;
-	if (objects && kmem_cache_debug(s))
+	if (objects && !kmem_cache_has_cpu_partial(s))
 		return -EINVAL;
 
 	s->cpu_partial = objects;



More information about the Devel mailing list