[Devel] [PATCH rh7 20/21] ms/mm: memcontrol: teach uncharge_list to deal with kmem pages

Andrey Ryabinin aryabinin at virtuozzo.com
Tue Nov 1 02:21:39 PDT 2016


From: Vladimir Davydov <vdavydov at virtuozzo.com>

Page table pages are batched-freed in release_pages on most
architectures.  If we want to charge them to kmemcg (this is what is
done later in this series), we need to teach mem_cgroup_uncharge_list to
handle kmem pages.

Link: http://lkml.kernel.org/r/18d5c09e97f80074ed25b97a7d0f32b95d875717.1464079538.git.vdavydov@virtuozzo.com
Signed-off-by: Vladimir Davydov <vdavydov at virtuozzo.com>
Cc: Johannes Weiner <hannes at cmpxchg.org>
Cc: Michal Hocko <mhocko at kernel.org>
Cc: Eric Dumazet <eric.dumazet at gmail.com>
Cc: Minchan Kim <minchan at kernel.org>
Signed-off-by: Andrew Morton <akpm at linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds at linux-foundation.org>

https://jira.sw.ru/browse/PSBM-51558
(cherry picked from commit 5e8d35f849b1969b900695ae191326bfacf6bfc6)
Signed-off-by: Andrey Ryabinin <aryabinin at virtuozzo.com>
---
 mm/memcontrol.c | 37 +++++++++++++++++++++----------------
 1 file changed, 21 insertions(+), 16 deletions(-)

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index cb7657e..880fa5b 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -6967,17 +6967,18 @@ void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg)
 static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
 			   unsigned long nr_mem, unsigned long nr_memsw,
 			   unsigned long nr_anon, unsigned long nr_file,
-			   unsigned long nr_huge, struct page *dummy_page)
+			   unsigned long nr_huge, unsigned long nr_kmem,
+			   struct page *dummy_page)
 {
 	unsigned long flags;
 
 	if (!mem_cgroup_is_root(memcg)) {
 		if (nr_mem)
 			res_counter_uncharge(&memcg->res,
-					     nr_mem * PAGE_SIZE);
+					     (nr_mem + nr_kmem) * PAGE_SIZE);
 		if (nr_memsw)
 			res_counter_uncharge(&memcg->memsw,
-					     nr_memsw * PAGE_SIZE);
+					     (nr_memsw + nr_kmem) * PAGE_SIZE);
 		memcg_oom_recover(memcg);
 	}
 
@@ -6998,6 +6999,7 @@ static void uncharge_list(struct list_head *page_list)
 	unsigned long nr_anon = 0;
 	unsigned long nr_file = 0;
 	unsigned long nr_huge = 0;
+	unsigned long nr_kmem = 0;
 	unsigned long pgpgout = 0;
 	unsigned long nr_mem = 0;
 	struct list_head *next;
@@ -7027,23 +7029,26 @@ static void uncharge_list(struct list_head *page_list)
 		if (memcg != pc->mem_cgroup) {
 			if (memcg) {
 				uncharge_batch(memcg, pgpgout, nr_mem, nr_memsw,
-					       nr_anon, nr_file, nr_huge, page);
-				pgpgout = nr_mem = nr_memsw = 0;
+					nr_anon, nr_file, nr_huge, nr_kmem, page);
+				pgpgout = nr_mem = nr_memsw = nr_kmem = 0;
 				nr_anon = nr_file = nr_huge = 0;
 			}
 			memcg = pc->mem_cgroup;
 		}
 
-		if (PageTransHuge(page)) {
-			nr_pages <<= compound_order(page);
-			VM_BUG_ON_PAGE(!PageTransHuge(page), page);
-			nr_huge += nr_pages;
-		}
-
-		if (PageAnon(page))
-			nr_anon += nr_pages;
-		else
-			nr_file += nr_pages;
+		if (!PageKmemcg(page)) {
+			if (PageTransHuge(page)) {
+				nr_pages <<= compound_order(page);
+				VM_BUG_ON_PAGE(!PageTransHuge(page), page);
+				nr_huge += nr_pages;
+			}
+			if (PageAnon(page))
+				nr_anon += nr_pages;
+			else
+				nr_file += nr_pages;
+			pgpgout++;
+		} else
+			nr_kmem += 1 << compound_order(page);
 
 		if (pc->flags & PCG_MEM)
 			nr_mem += nr_pages;
@@ -7056,7 +7061,7 @@ static void uncharge_list(struct list_head *page_list)
 
 	if (memcg)
 		uncharge_batch(memcg, pgpgout, nr_mem, nr_memsw,
-			       nr_anon, nr_file, nr_huge, page);
+			       nr_anon, nr_file, nr_huge, nr_kmem, page);
 }
 
 /**
-- 
2.7.3



More information about the Devel mailing list