[Devel] [PATCH RHEL8 COMMIT] mm/memcg: reclaim memory.cache.limit_in_bytes from background

Konstantin Khorenko khorenko at virtuozzo.com
Mon Oct 5 12:00:58 MSK 2020


The commit is pushed to "branch-rh8-4.18.0-193.6.3.vz8.4.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh8-4.18.0-193.6.3.vz8.4.11
------>
commit 6fd293dbf5518d786f325db3b0f352385795f4bf
Author: Andrey Ryabinin <aryabinin at virtuozzo.com>
Date:   Mon Oct 5 12:00:58 2020 +0300

    mm/memcg: reclaim memory.cache.limit_in_bytes from background
    
    Reclaiming memory above memory.cache.limit_in_bytes always in direct
    reclaim mode adds to much of a cost for vstorage. Instead of direct
    reclaim allow to overflow memory.cache.limit_in_bytes but launch
    the reclaim in background task.
    
    https://pmc.acronis.com/browse/VSTOR-24395
    https://jira.sw.ru/browse/PSBM-94761
    Signed-off-by: Andrey Ryabinin <aryabinin at virtuozzo.com>
---
 mm/memcontrol.c | 42 ++++++++++++++++++------------------------
 1 file changed, 18 insertions(+), 24 deletions(-)

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index ee09f5f6da6c..14727037bcd0 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2211,11 +2211,16 @@ static void reclaim_high(struct mem_cgroup *memcg,
 			 unsigned int nr_pages,
 			 gfp_t gfp_mask)
 {
+
 	do {
-		if (page_counter_read(&memcg->memory) <= memcg->high)
-			continue;
-		memcg_memory_event(memcg, MEMCG_HIGH);
-		try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true);
+
+		if (page_counter_read(&memcg->memory) > memcg->high) {
+			memcg_memory_event(memcg, MEMCG_HIGH);
+			try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true);
+		}
+
+		if (page_counter_read(&memcg->cache) > memcg->cache.max)
+			try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, false);
 	} while ((memcg = parent_mem_cgroup(memcg)));
 }
 
@@ -2270,13 +2275,8 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, bool kmem_charge
 			refill_stock(memcg, nr_pages);
 			goto charge;
 		}
-
-		if (cache_charge && !page_counter_try_charge(
-				&memcg->cache, nr_pages, &counter)) {
-			refill_stock(memcg, nr_pages);
-			goto charge;
-		}
-		return 0;
+		css_get_many(&memcg->css, batch);
+		goto done;
 	}
 
 charge:
@@ -2301,19 +2301,6 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, bool kmem_charge
 		}
 	}
 
-	if (!mem_over_limit && cache_charge) {
-		if (page_counter_try_charge(&memcg->cache, nr_pages, &counter))
-			goto done_restock;
-
-		may_swap = false;
-		mem_over_limit = mem_cgroup_from_counter(counter, cache);
-		page_counter_uncharge(&memcg->memory, batch);
-		if (do_memsw_account())
-			page_counter_uncharge(&memcg->memsw, batch);
-		if (kmem_charge)
-			page_counter_uncharge(&memcg->kmem, nr_pages);
-	}
-
 	if (!mem_over_limit)
 		goto done_restock;
 
@@ -2437,6 +2424,9 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, bool kmem_charge
 	css_get_many(&memcg->css, batch);
 	if (batch > nr_pages)
 		refill_stock(memcg, batch - nr_pages);
+done:
+	if (cache_charge)
+		page_counter_charge(&memcg->cache, nr_pages);
 
 	/*
 	 * If the hierarchy is above the normal consumption range, schedule
@@ -2457,7 +2447,11 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, bool kmem_charge
 			current->memcg_nr_pages_over_high += batch;
 			set_notify_resume(current);
 			break;
+		} else if (page_counter_read(&memcg->cache) > memcg->cache.max) {
+			if (!work_pending(&memcg->high_work))
+				schedule_work(&memcg->high_work);
 		}
+
 	} while ((memcg = parent_mem_cgroup(memcg)));
 
 	return 0;


More information about the Devel mailing list