[Devel] [PATCH RHEL7 COMMIT] mm/memcg: reclaim memory.cache.limit_in_bytes from background

Konstantin Khorenko khorenko at virtuozzo.com
Mon Jul 8 13:28:40 MSK 2019


The commit is pushed to "vz7.96.12" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-957.12.2.vz7.96.12
------>
commit 34baf56e9e17a60c483d8751a7ae2c821807bdf5
Author: Andrey Ryabinin <aryabinin at virtuozzo.com>
Date:   Mon Jul 8 13:28:38 2019 +0300

    mm/memcg: reclaim memory.cache.limit_in_bytes from background
    
    Reclaiming memory above memory.cache.limit_in_bytes always in direct
    reclaim mode adds to much of a cost for vstorage. Instead of direct
    reclaim allow to overflow memory.cache.limit_in_bytes but launch
    the reclaim in background task.
    
    https://pmc.acronis.com/browse/VSTOR-24395
    https://jira.sw.ru/browse/PSBM-94761
    Signed-off-by: Andrey Ryabinin <aryabinin at virtuozzo.com>
---
 mm/memcontrol.c | 37 ++++++++++++++-----------------------
 1 file changed, 14 insertions(+), 23 deletions(-)

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 0e6911b2dcc3..0357a8c341c4 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3002,11 +3002,15 @@ static void reclaim_high(struct mem_cgroup *memcg,
 			 unsigned int nr_pages,
 			 gfp_t gfp_mask)
 {
+
 	do {
-		if (page_counter_read(&memcg->memory) <= memcg->high)
-			continue;
+		if (page_counter_read(&memcg->memory) > memcg->high)
+			try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, 0);
+
+		if (page_counter_read(&memcg->cache) > memcg->cache.limit)
+			try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask,
+						MEM_CGROUP_RECLAIM_NOSWAP);
 
-		try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, 0);
 	} while ((memcg = parent_mem_cgroup(memcg)));
 }
 
@@ -3067,11 +3071,6 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, bool kmem_charge
 			goto charge;
 		}
 
-		if (cache_charge && page_counter_try_charge(
-				&memcg->cache, nr_pages, &counter)) {
-			refill_stock(memcg, nr_pages);
-			goto charge;
-		}
 		goto done;
 	}
 
@@ -3097,19 +3096,6 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, bool kmem_charge
 		}
 	}
 
-	if (!mem_over_limit && cache_charge) {
-		if (!page_counter_try_charge(&memcg->cache, nr_pages, &counter))
-			goto done_restock;
-
-		flags |= MEM_CGROUP_RECLAIM_NOSWAP;
-		mem_over_limit = mem_cgroup_from_counter(counter, cache);
-		page_counter_uncharge(&memcg->memory, batch);
-		if (do_swap_account)
-			page_counter_uncharge(&memcg->memsw, batch);
-		if (kmem_charge)
-			page_counter_uncharge(&memcg->kmem, batch);
-	}
-
 	if (!mem_over_limit)
 		goto done_restock;
 
@@ -3222,8 +3208,6 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, bool kmem_charge
 		page_counter_uncharge(&memcg->memory, batch);
 		if (do_swap_account)
 			page_counter_uncharge(&memcg->memsw, batch);
-		if (cache_charge)
-			page_counter_uncharge(&memcg->cache, nr_pages);
 		if (kmem_charge) {
 			WARN_ON_ONCE(1);
 			page_counter_uncharge(&memcg->kmem, nr_pages);
@@ -3235,6 +3219,9 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, bool kmem_charge
 	if (batch > nr_pages)
 		refill_stock(memcg, batch - nr_pages);
 done:
+	if (cache_charge)
+		page_counter_charge(&memcg->cache, nr_pages);
+
 	/*
 	 * If the hierarchy is above the normal consumption range, schedule
 	 * reclaim on returning to userland.  We can perform reclaim here
@@ -3254,7 +3241,11 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, bool kmem_charge
 			current->memcg_nr_pages_over_high += batch;
 			set_notify_resume(current);
 			break;
+		} else if (page_counter_read(&memcg->cache) > memcg->cache.limit) {
+			if (!work_pending(&memcg->high_work))
+				schedule_work(&memcg->high_work);
 		}
+
 	} while ((memcg = parent_mem_cgroup(memcg)));
 
 	return 0;



More information about the Devel mailing list