[Devel] [PATCH RHEL7 COMMIT] mm/memcg: Use per-cpu stock charges for ->kmem and ->cache counters
Vasily Averin
vvs at virtuozzo.com
Tue Jul 21 17:59:50 MSK 2020
The commit is pushed to "branch-rh7-3.10.0-1127.10.1.vz7.162.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-1127.10.1.vz7.162.13
------>
commit ef2a900bbfcb696536f14020f71112f966fd67c4
Author: Andrey Ryabinin <aryabinin at virtuozzo.com>
Date: Tue Jul 21 17:59:50 2020 +0300
mm/memcg: Use per-cpu stock charges for ->kmem and ->cache counters
Currently we use per-cpu stocks to do precharges of the ->memory and ->memsw
counters. Do this for the ->kmem and ->cache as well to decrease contention
on these counters as well.
https://jira.sw.ru/browse/PSBM-101300
Signed-off-by: Andrey Ryabinin <aryabinin at virtuozzo.com>
---
mm/memcontrol.c | 71 +++++++++++++++++++++++++++++++++++++--------------------
1 file changed, 46 insertions(+), 25 deletions(-)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 15cd07144d5a0..fa7b2cad93add 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2811,6 +2811,8 @@ void mem_cgroup_update_page_stat(struct page *page,
struct memcg_stock_pcp {
struct mem_cgroup *cached; /* this never be root cgroup */
unsigned int nr_pages;
+ unsigned int cache_nr_pages;
+ unsigned int kmem_nr_pages;
struct work_struct work;
unsigned long flags;
#define FLUSHING_CACHED_CHARGE 0
@@ -2829,7 +2831,8 @@ static DEFINE_MUTEX(percpu_charge_mutex);
*
* returns true if successful, false otherwise.
*/
-static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
+static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages,
+ bool cache, bool kmem)
{
struct memcg_stock_pcp *stock;
bool ret = false;
@@ -2838,9 +2841,19 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
return ret;
stock = &get_cpu_var(memcg_stock);
- if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
- stock->nr_pages -= nr_pages;
- ret = true;
+ if (memcg == stock->cached) {
+ if (cache && stock->cache_nr_pages >= nr_pages) {
+ stock->cache_nr_pages -= nr_pages;
+ ret = true;
+ }
+ if (kmem && stock->kmem_nr_pages >= nr_pages) {
+ stock->kmem_nr_pages -= nr_pages;
+ ret = true;
+ }
+ if (!cache && !kmem && stock->nr_pages >= nr_pages) {
+ stock->nr_pages -= nr_pages;
+ ret = true;
+ }
}
put_cpu_var(memcg_stock);
return ret;
@@ -2852,12 +2865,20 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
static void drain_stock(struct memcg_stock_pcp *stock)
{
struct mem_cgroup *old = stock->cached;
+ unsigned long nr_pages = stock->nr_pages + stock->cache_nr_pages + stock->kmem_nr_pages;
+
+ if (stock->cache_nr_pages)
+ page_counter_uncharge(&old->cache, stock->cache_nr_pages);
+ if (stock->kmem_nr_pages)
+ page_counter_uncharge(&old->kmem, stock->kmem_nr_pages);
- if (stock->nr_pages) {
- page_counter_uncharge(&old->memory, stock->nr_pages);
+ if (nr_pages) {
+ page_counter_uncharge(&old->memory, nr_pages);
if (do_swap_account)
- page_counter_uncharge(&old->memsw, stock->nr_pages);
+ page_counter_uncharge(&old->memsw, nr_pages);
stock->nr_pages = 0;
+ stock->kmem_nr_pages = 0;
+ stock->cache_nr_pages = 0;
}
stock->cached = NULL;
}
@@ -2888,7 +2909,8 @@ static void __init memcg_stock_init(void)
* Cache charges(val) to local per_cpu area.
* This will be consumed by consume_stock() function, later.
*/
-static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
+static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages,
+ bool cache, bool kmem)
{
struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
@@ -2896,7 +2918,13 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
drain_stock(stock);
stock->cached = memcg;
}
- stock->nr_pages += nr_pages;
+
+ if (cache)
+ stock->cache_nr_pages += nr_pages;
+ else if (kmem)
+ stock->kmem_nr_pages += nr_pages;
+ else
+ stock->nr_pages += nr_pages;
put_cpu_var(memcg_stock);
}
@@ -2917,7 +2945,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync)
struct mem_cgroup *memcg;
memcg = stock->cached;
- if (!memcg || !stock->nr_pages)
+ if (!memcg || !(stock->nr_pages + stock->kmem_nr_pages + stock->cache_nr_pages))
continue;
if (!mem_cgroup_same_or_subtree(root_memcg, memcg))
continue;
@@ -3108,21 +3136,13 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, bool kmem_charge
bool drained = false;
if (mem_cgroup_is_root(memcg))
- goto done;
+ return 0;
retry:
flags = 0;
- if (consume_stock(memcg, nr_pages)) {
- if (kmem_charge && !page_counter_try_charge(
- &memcg->kmem, nr_pages, &counter)) {
- refill_stock(memcg, nr_pages);
- goto charge;
- }
-
+ if (consume_stock(memcg, nr_pages, cache_charge, kmem_charge))
goto done;
- }
-charge:
mem_over_limit = NULL;
if (page_counter_try_charge(&memcg->memory, batch, &counter)) {
if (do_swap_account && !page_counter_try_charge(
@@ -3135,7 +3155,7 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, bool kmem_charge
mem_over_limit = mem_cgroup_from_counter(counter, memory);
if (!mem_over_limit && kmem_charge) {
- if (!page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
+ if (!page_counter_try_charge(&memcg->kmem, batch, &counter)) {
flags |= MEM_CGROUP_RECLAIM_KMEM;
mem_over_limit = mem_cgroup_from_counter(counter, kmem);
page_counter_uncharge(&memcg->memory, batch);
@@ -3264,12 +3284,13 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, bool kmem_charge
goto bypass;
}
- if (batch > nr_pages)
- refill_stock(memcg, batch - nr_pages);
-done:
if (cache_charge)
- page_counter_charge(&memcg->cache, nr_pages);
+ page_counter_charge(&memcg->cache, batch);
+
+ if (batch > nr_pages)
+ refill_stock(memcg, batch - nr_pages, cache_charge, kmem_charge);
+done:
/*
* If the hierarchy is above the normal consumption range, schedule
* reclaim on returning to userland. We can perform reclaim here
More information about the Devel
mailing list