[Devel] [PATCH RHEL7 COMMIT] ms/memcg: issue memory.high reclaim after refilling percpu stock

Konstantin Khorenko khorenko at virtuozzo.com
Fri Aug 28 03:22:20 PDT 2015


The commit is pushed to "branch-rh7-3.10.0-229.7.2-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-229.7.2.vz7.6.3
------>
commit c315808e33a89086d0dac4624c1fa6f4fe1f8051
Author: Vladimir Davydov <vdavydov at parallels.com>
Date:   Fri Aug 28 14:22:20 2015 +0400

    ms/memcg: issue memory.high reclaim after refilling percpu stock
    
    Currently, we dive into memory.high reclaim before reflling percpu
    stock. As a result, if we successfully charge a batch for a percpu stock
    while exceeding memory.high, others won't be able to use it until we
    finish and will probably have to reclaim themselves, which may lead to
    overreclaim. This patch therefore moves memory.high reclaim after
    refilling stocks. This is how it works upstream.
    
    I haven't seen any negative effects caused by this backport mistake, but
    let's stick to the mainstream behavior anyways.
    
    Fixes: 4038cd0e029dd ("ms/memcg: port memory.high")
    Signed-off-by: Vladimir Davydov <vdavydov at parallels.com>
---
 mm/memcontrol.c | 35 +++++++++++++++++------------------
 1 file changed, 17 insertions(+), 18 deletions(-)

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 37e81d3..5f3e0ac 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2730,10 +2730,10 @@ static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
 
 	if (likely(!ret)) {
 		if (!do_swap_account)
-			goto done;
+			return CHARGE_OK;
 		ret = res_counter_charge(&memcg->memsw, csize, &fail_res);
 		if (likely(!ret))
-			goto done;
+			return CHARGE_OK;
 
 		res_counter_uncharge(&memcg->res, csize);
 		mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw);
@@ -2790,21 +2790,6 @@ static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
 		return CHARGE_OOM_DIE;
 
 	return CHARGE_RETRY;
-
-done:
-	if (!(gfp_mask & __GFP_WAIT))
-		goto out;
-	/*
-	 * If the hierarchy is above the normal consumption range,
-	 * make the charging task trim their excess contribution.
-	 */
-	do {
-		if (res_counter_read_u64(&memcg->res, RES_USAGE) <= memcg->high)
-			continue;
-		try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, false);
-	} while ((memcg = parent_mem_cgroup(memcg)));
-out:
-	return CHARGE_OK;
 }
 
 /*
@@ -2836,7 +2821,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
 {
 	unsigned int batch = max(CHARGE_BATCH, nr_pages);
 	int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
-	struct mem_cgroup *memcg = NULL;
+	struct mem_cgroup *memcg = NULL, *iter;
 	int ret;
 
 	/*
@@ -2950,6 +2935,20 @@ again:
 
 	if (batch > nr_pages)
 		refill_stock(memcg, batch - nr_pages);
+
+	/*
+	 * If the hierarchy is above the normal consumption range,
+	 * make the charging task trim their excess contribution.
+	 */
+	iter = memcg;
+	do {
+		if (!(gfp_mask & __GFP_WAIT))
+			break;
+		if (res_counter_read_u64(&iter->res, RES_USAGE) <= iter->high)
+			continue;
+		try_to_free_mem_cgroup_pages(iter, nr_pages, gfp_mask, false);
+	} while ((iter = parent_mem_cgroup(iter)));
+
 	css_put(&memcg->css);
 done:
 	*ptr = memcg;



More information about the Devel mailing list