[Devel] [PATCH vz8.4 1/4] ve/memcg: Fix /proc/meminfo virtualization (eliminate double recursion)

Konstantin Khorenko khorenko at virtuozzo.com
Thu Jul 22 17:23:53 MSK 2021


This patch partially reverts commit 47f1b6c1d8e5 ("ve/memcg: Make
virtualization of /proc/meminfo view inside CT recursive")

In vz8 we have both memcg->vmstats (recursive) and
memcg->vmstats_local (non-recursive), and mem_page_state_recursive()
brought by the reverted commit does double recursion,
so revert that logic, but leave the other stuff.

Fixes: 47f1b6c1d8e5 ("ve/memcg: Make virtualization of /proc/meminfo
view inside CT recursive")

To_merge: 38a2f168a441 ("ve/proc: virtualize /proc/meminfo in a Container")

https://jira.sw.ru/browse/PSBM-131992

Signed-off-by: Konstantin Khorenko <khorenko at virtuozzo.com>
---
 mm/memcontrol.c | 26 +++++++-------------------
 1 file changed, 7 insertions(+), 19 deletions(-)

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index d3aa1cf20796..c1e23f0bbe91 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -4151,29 +4151,17 @@ void mem_cgroup_get_nr_pages(struct mem_cgroup *memcg, unsigned long *pages)
 		pages[lru] += mem_cgroup_nr_lru_pages(memcg, BIT(lru), true);
 }
 
-static unsigned long mem_page_state_recursive(struct mem_cgroup *memcg,
-					      int idx)
-{
-	struct mem_cgroup *iter;
-	unsigned long val = 0;
-
-	for_each_mem_cgroup_tree(iter, memcg)
-		val += memcg_page_state(iter, idx);
-
-	return val;
-}
-
 void mem_cgroup_fill_meminfo(struct mem_cgroup *memcg, struct meminfo *mi)
 {
 	memset(&mi->pages, 0, sizeof(mi->pages));
 	mem_cgroup_get_nr_pages(memcg, mi->pages);
 
-	mi->slab_reclaimable = mem_page_state_recursive(memcg, NR_SLAB_RECLAIMABLE_B);
-	mi->slab_unreclaimable = mem_page_state_recursive(memcg, NR_SLAB_UNRECLAIMABLE_B);
-	mi->cached = mem_page_state_recursive(memcg, NR_FILE_PAGES);
-	mi->shmem = mem_page_state_recursive(memcg, NR_SHMEM);
-	mi->dirty_pages = mem_page_state_recursive(memcg, NR_FILE_DIRTY);
-	mi->writeback_pages = mem_page_state_recursive(memcg, NR_WRITEBACK);
+	mi->slab_reclaimable	= memcg_page_state(memcg, NR_SLAB_RECLAIMABLE_B);
+	mi->slab_unreclaimable	= memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE_B);
+	mi->cached		= memcg_page_state(memcg, NR_FILE_PAGES);
+	mi->shmem		= memcg_page_state(memcg, NR_SHMEM);
+	mi->dirty_pages		= memcg_page_state(memcg, NR_FILE_DIRTY);
+	mi->writeback_pages	= memcg_page_state(memcg, NR_WRITEBACK);
 
 	/* locked pages are accounted per zone */
 	/* mi->locked = 0; */
@@ -4184,7 +4172,7 @@ void mem_cgroup_fill_meminfo(struct mem_cgroup *memcg, struct meminfo *mi)
 	 * are not taken into account. These values reflect reservation of
 	 * physycal memory and they are not relevant for CT.
 	 */
-	mi->available = mi->si->freeram;
+	mi->available  = mi->si->freeram;
 	mi->available += mi->pages[LRU_ACTIVE_FILE] +
 			 mi->pages[LRU_INACTIVE_FILE];
 	mi->available += mi->slab_reclaimable;
-- 
2.28.0



More information about the Devel mailing list