[Devel] [PATCH RHEL7 COMMIT] mm/memcontrol: offload offline reclaim to background work.
Konstantin Khorenko
khorenko at virtuozzo.com
Tue Mar 10 12:30:58 MSK 2020
The commit is pushed to "branch-rh7-3.10.0-1062.12.1.vz7.145.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-1062.12.1.vz7.145.1
------>
commit 6c8429f981c8bbdc4f8d6badb8e209230fb75ec5
Author: Andrey Ryabinin <aryabinin at virtuozzo.com>
Date: Tue Mar 10 12:30:58 2020 +0300
mm/memcontrol: offload offline reclaim to background work.
Reclaiming memcg memory during offline may take significant
amount of time. It's better to offload heavy work to decrease
cgroup_mutex held time.
https://jira.sw.ru/browse/PSBM-101639
Signed-off-by: Andrey Ryabinin <aryabinin at virtuozzo.com>
v2: typo fix: s/high_work/offline_reclaim_work/
---
mm/memcontrol.c | 31 +++++++++++++++++++++----------
1 file changed, 21 insertions(+), 10 deletions(-)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 4846e7a9bf63b..7e61d1c66c753 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -316,6 +316,7 @@ struct mem_cgroup {
/* vmpressure notifications */
struct vmpressure vmpressure;
struct work_struct high_work;
+ struct work_struct offline_reclaim_work;
/*
* the counter to account for kernel memory usage.
@@ -6520,6 +6521,22 @@ struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
}
EXPORT_SYMBOL(parent_mem_cgroup);
+static void offline_reclaim_func(struct work_struct *work)
+{
+ struct mem_cgroup *memcg;
+ int nr_retries = 5;
+
+ memcg = container_of(work, struct mem_cgroup, offline_reclaim_work);
+ lru_add_drain_all();
+
+ while (nr_retries && page_counter_read(&memcg->memory))
+ if (!try_to_free_mem_cgroup_pages(memcg, -1UL, GFP_KERNEL,
+ MEM_CGROUP_RECLAIM_NOSWAP))
+ nr_retries--;
+
+ lru_add_drain();
+}
+
static void __init mem_cgroup_soft_limit_tree_init(void)
{
struct mem_cgroup_tree_per_node *rtpn;
@@ -6569,6 +6586,7 @@ mem_cgroup_css_alloc(struct cgroup *cont)
INIT_LIST_HEAD(&memcg->oom_notify);
memcg->move_charge_at_immigrate = 0;
INIT_WORK(&memcg->high_work, high_work_func);
+ INIT_WORK(&memcg->offline_reclaim_work, offline_reclaim_func);
mutex_init(&memcg->thresholds_lock);
spin_lock_init(&memcg->move_lock);
vmpressure_init(&memcg->vmpressure);
@@ -6663,16 +6681,7 @@ static void mem_cgroup_invalidate_reclaim_iterators(struct mem_cgroup *memcg)
static void mem_cgroup_free_all(struct mem_cgroup *memcg)
{
- int nr_retries = 5;
-
- lru_add_drain_all();
-
- while (nr_retries && page_counter_read(&memcg->memory))
- if (!try_to_free_mem_cgroup_pages(memcg, -1UL, GFP_KERNEL,
- MEM_CGROUP_RECLAIM_NOSWAP))
- nr_retries--;
-
- lru_add_drain();
+ schedule_work(&memcg->offline_reclaim_work);
}
static void mem_cgroup_css_offline(struct cgroup *cont)
@@ -6723,6 +6732,8 @@ static void mem_cgroup_css_free(struct cgroup *cont)
{
struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+ flush_work(&memcg->offline_reclaim_work);
+
/*
* XXX: css_offline() would be where we should reparent all
* memory to prepare the cgroup for destruction. However,
More information about the Devel
mailing list