[Devel] [PATCH RHEL7 COMMIT] mm/workingset: fix workingset_activation() with memcgroups enabled

Konstantin Khorenko khorenko at virtuozzo.com
Tue May 21 18:34:57 MSK 2019


The commit is pushed to "branch-rh7-3.10.0-957.12.2.vz7.96.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-957.12.2.vz7.96.1
------>
commit d58a73c5986e66b0b9ee2ef6da500d3d43d92a45
Author: Andrey Ryabinin <aryabinin at virtuozzo.com>
Date:   Tue May 21 18:34:55 2019 +0300

    mm/workingset: fix workingset_activation() with memcgroups enabled
    
    workingset_activation() mistakenly does nothing when memory cgroups are
    enabled. Commit f6a8b015027e ("ms/mm: workingset: per-cgroup cache
    thrash detection") also changed mem_cgroup_begin_update_page_stat()
    to return memcg. But commit didn't make it right, memcg returns only
    when memcg is moving and NULL otherwise. Instead of mocking with
    mem_cgroup_begin_update_page_stat() obtain memcg from page in
    workingset_activation() directly.
    
    Fixes: f6a8b015027e ("ms/mm: workingset: per-cgroup cache thrash detection")
    Signed-off-by: Andrey Ryabinin <aryabinin at virtuozzo.com>
---
 include/linux/memcontrol.h | 10 +++++-----
 mm/memcontrol.c            |  9 +++++----
 mm/workingset.c            | 17 ++++++++++-------
 3 files changed, 20 insertions(+), 16 deletions(-)

diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 575584dc1651..aa8cef097055 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -181,21 +181,21 @@ static inline void mem_cgroup_put(struct mem_cgroup *memcg)
 	css_put(mem_cgroup_css(memcg));
 }
 
-struct mem_cgroup *__mem_cgroup_begin_update_page_stat(struct page *page, bool *locked,
+void __mem_cgroup_begin_update_page_stat(struct page *page, bool *locked,
 					 unsigned long *flags);
 
 extern atomic_t memcg_moving;
 
-static inline struct mem_cgroup *mem_cgroup_begin_update_page_stat(struct page *page,
+static inline void mem_cgroup_begin_update_page_stat(struct page *page,
 					bool *locked, unsigned long *flags)
 {
 	if (mem_cgroup_disabled())
-		return NULL;
+		return;
+
 	rcu_read_lock();
 	*locked = false;
 	if (atomic_read(&memcg_moving))
-		return __mem_cgroup_begin_update_page_stat(page, locked, flags);
-	return NULL;
+		__mem_cgroup_begin_update_page_stat(page, locked, flags);
 }
 
 void __mem_cgroup_end_update_page_stat(struct page *page,
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index cd31ec167f13..91fb5ee896f5 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2703,7 +2703,7 @@ bool mem_cgroup_oom_synchronize(bool handle)
  * account and taking the move_lock in the slowpath.
  */
 
-struct mem_cgroup *__mem_cgroup_begin_update_page_stat(struct page *page,
+void __mem_cgroup_begin_update_page_stat(struct page *page,
 				bool *locked, unsigned long *flags)
 {
 	struct mem_cgroup *memcg;
@@ -2713,7 +2713,8 @@ struct mem_cgroup *__mem_cgroup_begin_update_page_stat(struct page *page,
 again:
 	memcg = pc->mem_cgroup;
 	if (unlikely(!memcg || !PageCgroupUsed(pc)))
-		return NULL;
+		return;
+
 	/*
 	 * If this memory cgroup is not under account moving, we don't
 	 * need to take move_lock_mem_cgroup(). Because we already hold
@@ -2721,7 +2722,7 @@ struct mem_cgroup *__mem_cgroup_begin_update_page_stat(struct page *page,
 	 * rcu_read_unlock() if mem_cgroup_stolen() == true.
 	 */
 	if (!mem_cgroup_stolen(memcg))
-		return NULL;
+		return;
 
 	move_lock_mem_cgroup(memcg, flags);
 	if (memcg != pc->mem_cgroup || !PageCgroupUsed(pc)) {
@@ -2729,7 +2730,7 @@ struct mem_cgroup *__mem_cgroup_begin_update_page_stat(struct page *page,
 		goto again;
 	}
 	*locked = true;
-	return memcg;
+	return;
 }
 
 void __mem_cgroup_end_update_page_stat(struct page *page, unsigned long *flags)
diff --git a/mm/workingset.c b/mm/workingset.c
index 1d7bdbf7a5d6..1598d2b02314 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -315,11 +315,17 @@ bool workingset_refault(void *shadow)
  */
 void workingset_activation(struct page *page)
 {
-	struct mem_cgroup *memcg;
+	struct mem_cgroup *memcg = NULL;
+	struct page_cgroup *pc;
 	struct lruvec *lruvec;
 	bool locked;
 	unsigned long flags;
 
+	mem_cgroup_begin_update_page_stat(page, &locked, &flags);
+	pc = lookup_page_cgroup(page);
+	if (likely(PageCgroupUsed(pc)))
+		memcg = pc->mem_cgroup;
+
 	/*
 	 * Filter non-memcg pages here, e.g. unmap can call
 	 * mark_page_accessed() on VDSO pages.
@@ -327,15 +333,12 @@ void workingset_activation(struct page *page)
 	 * XXX: See workingset_refault() - this should return
 	 * root_mem_cgroup even for !CONFIG_MEMCG.
 	 */
-	if (!mem_cgroup_disabled())
-		return;
-
-	memcg = mem_cgroup_begin_update_page_stat(page, &locked, &flags);
-	if (!memcg)
-		return;
+	if (!mem_cgroup_disabled() && !memcg)
+		goto out;
 
 	lruvec = mem_cgroup_zone_lruvec(page_zone(page), memcg);
 	atomic_long_inc(&lruvec->inactive_age);
+out:
 	mem_cgroup_end_update_page_stat(page, &locked, &flags);
 }
 



More information about the Devel mailing list