[Devel] [PATCH RHEL7 COMMIT] mm/page_alloc.c: check if page cgroup still in use during alloc/free.

Konstantin Khorenko khorenko at virtuozzo.com
Thu Aug 8 18:29:26 MSK 2019


The commit is pushed to "branch-rh7-3.10.0-957.21.3.vz7.106.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-957.21.3.vz7.106.10
------>
commit add89a682a2f2dc567fb8d4002317cf010c355ed
Author: Andrey Ryabinin <aryabinin at virtuozzo.com>
Date:   Wed Aug 7 15:47:06 2019 +0300

    mm/page_alloc.c: check if page cgroup still in use during alloc/free.
    
    Add debug check to free_page() patch to make sure that we don't
    allocate/free pages with memcg marked as used.
    
    This debug patch is to be reverted before the release.
    
    https://jira.sw.ru/browse/PSBM-96036
    Signed-off-by: Andrey Ryabinin <aryabinin at virtuozzo.com>
---
 mm/page_alloc.c  | 22 ++++++++++++++++++++++
 mm/page_cgroup.c |  3 ++-
 2 files changed, 24 insertions(+), 1 deletion(-)

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 78c68ba512e9..c4bbf9c75a7e 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -975,6 +975,25 @@ void __meminit reserve_bootmem_region(unsigned long start, unsigned long end)
 	}
 }
 
+static void check_memcg(struct page *page)
+{
+	struct page_cgroup *pc;
+	extern int page_cgroup_inited;
+
+	if (!page_cgroup_inited)
+		return;
+
+	pc = lookup_page_cgroup(page);
+	/*
+	 * Fast unlocked return. Theoretically might have changed, have to
+	 * check again after locking.
+	 */
+	if (!pc || !PageCgroupUsed(pc))
+		return;
+
+	BUG_ON(1);
+}
+
 static bool free_pages_prepare(struct page *page, unsigned int order)
 {
 	int i;
@@ -987,6 +1006,8 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
 	if (PageAnon(page))
 		page->mapping = NULL;
 	memcg_kmem_uncharge_pages(page, order);
+	check_memcg(page);
+
 	for (i = 0; i < (1 << order); i++) {
 		bad += free_pages_check(page + i);
 		if (static_key_false(&zero_free_pages))
@@ -1580,6 +1601,7 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
 	arch_alloc_page(page, order);
 	kernel_map_pages(page, 1 << order, 1);
 	kasan_alloc_pages(page, order);
+	check_memcg(page);
 
 	if (gfp_flags & __GFP_ZERO)
 		prep_zero_page(page, order, gfp_flags);
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index 452f59a4fe90..a856d993f274 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -266,7 +266,7 @@ static int __meminit page_cgroup_callback(struct notifier_block *self,
 }
 
 #endif
-
+int page_cgroup_inited;
 void __init page_cgroup_init(void)
 {
 	unsigned long pfn;
@@ -309,6 +309,7 @@ void __init page_cgroup_init(void)
 	printk(KERN_INFO "please try 'cgroup_disable=memory' option if you "
 			 "don't want memory cgroups\n");
 	invoke_page_ext_init_callbacks();
+	page_cgroup_inited = true;
 	return;
 oom:
 	printk(KERN_CRIT "try 'cgroup_disable=memory' boot option\n");



More information about the Devel mailing list