[Devel] [PATCH RHEL7 COMMIT] ms/mm: memcontrol: use vmalloc fallback for large kmem memcg arrays

Konstantin Khorenko khorenko at virtuozzo.com
Tue Nov 7 12:59:18 MSK 2017


The commit is pushed to "branch-rh7-3.10.0-693.1.1.vz7.37.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-693.1.1.vz7.37.22
------>
commit 82946e31776cc710b9b79c9dfacc0c74405c9e0c
Author: Johannes Weiner <hannes at cmpxchg.org>
Date:   Tue Nov 7 12:59:17 2017 +0300

    ms/mm: memcontrol: use vmalloc fallback for large kmem memcg arrays
    
    commit f80c7dab95a1f0f968acbafe4426ee9525b6f6ab upstream.
    
    For quick per-memcg indexing, slab caches and list_lru structures
    maintain linear arrays of descriptors. As the number of concurrent
    memory cgroups in the system goes up, this requires large contiguous
    allocations (8k cgroups = order-5, 16k cgroups = order-6 etc.) for
    every existing slab cache and list_lru, which can easily fail on
    loaded systems. E.g.:
    
    mkdir: page allocation failure: order:5, mode:0x14040c0(GFP_KERNEL|__GFP_COMP), nodemask=(null)
    CPU: 1 PID: 6399 Comm: mkdir Not tainted 4.13.0-mm1-00065-g720bbe532b7c-dirty #481
    Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-20170228_101828-anatol 04/01/2014
    Call Trace:
     dump_stack+0x70/0x9d
     warn_alloc+0xd6/0x170
     ? __alloc_pages_direct_compact+0x4c/0x110
     __alloc_pages_nodemask+0xf50/0x1430
     ? __lock_acquire+0xd19/0x1360
     ? memcg_update_all_list_lrus+0x2e/0x2e0
     ? __mutex_lock+0x7c/0x950
     ? memcg_update_all_list_lrus+0x2e/0x2e0
     alloc_pages_current+0x60/0xc0
     kmalloc_order_trace+0x29/0x1b0
     __kmalloc+0x1f4/0x320
     memcg_update_all_list_lrus+0xca/0x2e0
     mem_cgroup_css_alloc+0x612/0x670
     cgroup_apply_control_enable+0x19e/0x360
     cgroup_mkdir+0x322/0x490
     kernfs_iop_mkdir+0x55/0x80
     vfs_mkdir+0xd0/0x120
     SyS_mkdirat+0x6c/0xe0
     SyS_mkdir+0x14/0x20
     entry_SYSCALL_64_fastpath+0x18/0xad
    RIP: 0033:0x7f9ff36cee87
    RSP: 002b:00007ffc7612d758 EFLAGS: 00000202 ORIG_RAX: 0000000000000053
    RAX: ffffffffffffffda RBX: 00007ffc7612da48 RCX: 00007f9ff36cee87
    RDX: 00000000000001ff RSI: 00000000000001ff RDI: 00007ffc7612de86
    RBP: 0000000000000002 R08: 00000000000001ff R09: 0000000000401db0
    R10: 00000000000001e2 R11: 0000000000000202 R12: 0000000000000000
    R13: 00007ffc7612da40 R14: 0000000000000000 R15: 0000000000000000
    Mem-Info:
    active_anon:2965 inactive_anon:19 isolated_anon:0
     active_file:100270 inactive_file:98846 isolated_file:0
     unevictable:0 dirty:0 writeback:0 unstable:0
     slab_reclaimable:7328 slab_unreclaimable:16402
     mapped:771 shmem:52 pagetables:278 bounce:0
     free:13718 free_pcp:0 free_cma:0
    
    This output is from an artificial reproducer, but we have repeatedly
    observed order-7 failures in production in the Facebook fleet. These
    systems become useless as they cannot run more jobs, even though there
    is plenty of memory to allocate 128 individual pages.
    
    Use kvmalloc and kvzalloc to fall back to vmalloc space if these
    arrays prove too large for allocating them physically contiguous.
    
    Link: http://lkml.kernel.org/r/20170918184919.20644-1-hannes@cmpxchg.org
    Signed-off-by: Johannes Weiner <hannes at cmpxchg.org>
    Reviewed-by: Josef Bacik <jbacik at fb.com>
    Acked-by: Michal Hocko <mhocko at suse.com>
    Acked-by: Vladimir Davydov <vdavydov.dev at gmail.com>
    Signed-off-by: Andrew Morton <akpm at linux-foundation.org>
    Signed-off-by: Linus Torvalds <torvalds at linux-foundation.org>
    
    https://jira.sw.ru/browse/PSBM-76752
    Signed-off-by: Andrey Ryabinin <aryabinin at virtuozzo.com>
---
 mm/list_lru.c    | 17 +++++++++++------
 mm/slab_common.c | 20 ++++++++++++++------
 2 files changed, 25 insertions(+), 12 deletions(-)

diff --git a/mm/list_lru.c b/mm/list_lru.c
index 5adc6621..91dccc1 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -322,13 +322,13 @@ static int memcg_init_list_lru_node(struct list_lru_node *nlru)
 	struct list_lru_memcg *memcg_lrus;
 	int size = memcg_nr_cache_ids;
 
-	memcg_lrus = kmalloc(sizeof(*memcg_lrus) +
+	memcg_lrus = kvmalloc(sizeof(*memcg_lrus) +
 			     size * sizeof(void *), GFP_KERNEL);
 	if (!memcg_lrus)
 		return -ENOMEM;
 
 	if (__memcg_init_list_lru_node(memcg_lrus, 0, size)) {
-		kfree(memcg_lrus);
+		kvfree(memcg_lrus);
 		return -ENOMEM;
 	}
 	rcu_assign_pointer(nlru->memcg_lrus, memcg_lrus);
@@ -346,7 +346,12 @@ static void memcg_destroy_list_lru_node(struct list_lru_node *nlru)
 	 */
 	memcg_lrus = rcu_dereference_check(nlru->memcg_lrus, true);
 	__memcg_destroy_list_lru_node(memcg_lrus, 0, memcg_nr_cache_ids);
-	kfree(memcg_lrus);
+	kvfree(memcg_lrus);
+}
+
+static void free_list_lru_memcg(struct rcu_head *head)
+{
+	kvfree(container_of(head, struct list_lru_memcg, rcu));
 }
 
 static int memcg_update_list_lru_node(struct list_lru_node *nlru,
@@ -359,12 +364,12 @@ static int memcg_update_list_lru_node(struct list_lru_node *nlru,
 
 	/* list_lrus_mutex is held, nobody can change memcg_lrus. Silence RCU */
 	old = rcu_dereference_check(nlru->memcg_lrus, true);
-	new = kmalloc(sizeof(*new) + new_size * sizeof(void *), GFP_KERNEL);
+	new = kvmalloc(sizeof(*new) + new_size * sizeof(void *), GFP_KERNEL);
 	if (!new)
 		return -ENOMEM;
 
 	if (__memcg_init_list_lru_node(new, old_size, new_size)) {
-		kfree(new);
+		kvfree(new);
 		return -ENOMEM;
 	}
 
@@ -381,7 +386,7 @@ static int memcg_update_list_lru_node(struct list_lru_node *nlru,
 	rcu_assign_pointer(nlru->memcg_lrus, new);
 	spin_unlock_irq(&nlru->lock);
 
-	kfree_rcu(old, rcu);
+	call_rcu(&old->rcu, free_list_lru_memcg);
 	return 0;
 }
 
diff --git a/mm/slab_common.c b/mm/slab_common.c
index b24d35d..049f155 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -133,9 +133,9 @@ static int init_memcg_params(struct kmem_cache *s,
 	if (!memcg_nr_cache_ids)
 		return 0;
 
-	arr = kzalloc(sizeof(struct memcg_cache_array) +
-		      memcg_nr_cache_ids * sizeof(void *),
-		      GFP_KERNEL);
+	arr = kvzalloc(sizeof(struct memcg_cache_array) +
+		       memcg_nr_cache_ids * sizeof(void *),
+		       GFP_KERNEL);
 	if (!arr)
 		return -ENOMEM;
 
@@ -146,7 +146,15 @@ static int init_memcg_params(struct kmem_cache *s,
 static void destroy_memcg_params(struct kmem_cache *s)
 {
 	if (is_root_cache(s))
-		kfree(rcu_access_pointer(s->memcg_params.memcg_caches));
+		kvfree(rcu_access_pointer(s->memcg_params.memcg_caches));
+}
+
+static void free_memcg_params(struct rcu_head *rcu)
+{
+	struct memcg_cache_array *old;
+
+	old = container_of(rcu, struct memcg_cache_array, rcu);
+	kvfree(old);
 }
 
 static int update_memcg_params(struct kmem_cache *s, int new_array_size)
@@ -156,7 +164,7 @@ static int update_memcg_params(struct kmem_cache *s, int new_array_size)
 	if (!is_root_cache(s))
 		return 0;
 
-	new = kzalloc(sizeof(struct memcg_cache_array) +
+	new = kvzalloc(sizeof(struct memcg_cache_array) +
 		      new_array_size * sizeof(void *), GFP_KERNEL);
 	if (!new)
 		return -ENOMEM;
@@ -169,7 +177,7 @@ static int update_memcg_params(struct kmem_cache *s, int new_array_size)
 
 	rcu_assign_pointer(s->memcg_params.memcg_caches, new);
 	if (old)
-		kfree_rcu(old, rcu);
+		call_rcu(&old->rcu, free_memcg_params);
 	return 0;
 }
 


More information about the Devel mailing list