[Devel] [PATCH rh7 v2 20/21] ms/mm: memcontrol: teach uncharge_list to deal with kmem pages

Andrey Ryabinin aryabinin at virtuozzo.com
Thu Jan 12 01:47:37 PST 2017


From: Vladimir Davydov <vdavydov at virtuozzo.com>

Page table pages are batched-freed in release_pages on most
architectures.  If we want to charge them to kmemcg (this is what is
done later in this series), we need to teach mem_cgroup_uncharge_list to
handle kmem pages.

Link: http://lkml.kernel.org/r/18d5c09e97f80074ed25b97a7d0f32b95d875717.1464079538.git.vdavydov@virtuozzo.com
Signed-off-by: Vladimir Davydov <vdavydov at virtuozzo.com>
Cc: Johannes Weiner <hannes at cmpxchg.org>
Cc: Michal Hocko <mhocko at kernel.org>
Cc: Eric Dumazet <eric.dumazet at gmail.com>
Cc: Minchan Kim <minchan at kernel.org>
Signed-off-by: Andrew Morton <akpm at linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds at linux-foundation.org>

https://jira.sw.ru/browse/PSBM-51558
(cherry picked from commit 5e8d35f849b1969b900695ae191326bfacf6bfc6)
Signed-off-by: Andrey Ryabinin <aryabinin at virtuozzo.com>
---
 mm/memcontrol.c | 37 +++++++++++++++++++++----------------
 1 file changed, 21 insertions(+), 16 deletions(-)

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 6c11788..0183a9c 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -6922,15 +6922,16 @@ void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg)
 static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
 			   unsigned long nr_mem, unsigned long nr_memsw,
 			   unsigned long nr_anon, unsigned long nr_file,
-			   unsigned long nr_huge, struct page *dummy_page)
+			   unsigned long nr_huge, unsigned long nr_kmem,
+			   struct page *dummy_page)
 {
 	unsigned long flags;
 
 	if (!mem_cgroup_is_root(memcg)) {
 		if (nr_mem)
-			page_counter_uncharge(&memcg->memory, nr_mem);
+			page_counter_uncharge(&memcg->memory, nr_mem + nr_kmem);
 		if (nr_memsw)
-			page_counter_uncharge(&memcg->memsw, nr_memsw);
+			page_counter_uncharge(&memcg->memsw, nr_memsw + nr_kmem);
 
 		memcg_oom_recover(memcg);
 	}
@@ -6952,6 +6953,7 @@ static void uncharge_list(struct list_head *page_list)
 	unsigned long nr_anon = 0;
 	unsigned long nr_file = 0;
 	unsigned long nr_huge = 0;
+	unsigned long nr_kmem = 0;
 	unsigned long pgpgout = 0;
 	unsigned long nr_mem = 0;
 	struct list_head *next;
@@ -6981,23 +6983,26 @@ static void uncharge_list(struct list_head *page_list)
 		if (memcg != pc->mem_cgroup) {
 			if (memcg) {
 				uncharge_batch(memcg, pgpgout, nr_mem, nr_memsw,
-					       nr_anon, nr_file, nr_huge, page);
-				pgpgout = nr_mem = nr_memsw = 0;
+					nr_anon, nr_file, nr_huge, nr_kmem, page);
+				pgpgout = nr_mem = nr_memsw = nr_kmem = 0;
 				nr_anon = nr_file = nr_huge = 0;
 			}
 			memcg = pc->mem_cgroup;
 		}
 
-		if (PageTransHuge(page)) {
-			nr_pages <<= compound_order(page);
-			VM_BUG_ON_PAGE(!PageTransHuge(page), page);
-			nr_huge += nr_pages;
-		}
-
-		if (PageAnon(page))
-			nr_anon += nr_pages;
-		else
-			nr_file += nr_pages;
+		if (!PageKmemcg(page)) {
+			if (PageTransHuge(page)) {
+				nr_pages <<= compound_order(page);
+				VM_BUG_ON_PAGE(!PageTransHuge(page), page);
+				nr_huge += nr_pages;
+			}
+			if (PageAnon(page))
+				nr_anon += nr_pages;
+			else
+				nr_file += nr_pages;
+			pgpgout++;
+		} else
+			nr_kmem += 1 << compound_order(page);
 
 		if (pc->flags & PCG_MEM)
 			nr_mem += nr_pages;
@@ -7010,7 +7015,7 @@ static void uncharge_list(struct list_head *page_list)
 
 	if (memcg)
 		uncharge_batch(memcg, pgpgout, nr_mem, nr_memsw,
-			       nr_anon, nr_file, nr_huge, page);
+			       nr_anon, nr_file, nr_huge, nr_kmem, page);
 }
 
 /**
-- 
2.10.2



More information about the Devel mailing list