[Devel] [PATCH 1/5] mm: introduce page vz extension (using page_ext)

Alexander Mikhalitsyn alexander.mikhalitsyn at virtuozzo.com
Fri Jul 30 17:06:38 MSK 2021


This module allows to manage per-page data.
We will use it in memcg page cache limiting feature
to store additional flag on page.

We may also use this in the future in many other
circumstances (for debugging and so on).

See also mm/page_owner.c

https://jira.sw.ru/browse/PSBM-131957

Signed-off-by: Alexander Mikhalitsyn <alexander.mikhalitsyn at virtuozzo.com>
---
 include/linux/page_ext.h   | 36 ++++++++++++++++++++
 include/linux/page_vzext.h | 33 +++++++++++++++++++
 mm/Makefile                |  1 +
 mm/huge_memory.c           |  3 +-
 mm/migrate.c               |  3 +-
 mm/page_alloc.c            |  7 ++--
 mm/page_ext.c              |  2 ++
 mm/page_vzext.c            | 67 ++++++++++++++++++++++++++++++++++++++
 8 files changed, 147 insertions(+), 5 deletions(-)
 create mode 100644 include/linux/page_vzext.h
 create mode 100644 mm/page_vzext.c

diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
index 1916cf2d49c8..70f3bdf51744 100644
--- a/include/linux/page_ext.h
+++ b/include/linux/page_ext.h
@@ -5,6 +5,7 @@
 #include <linux/types.h>
 #include <linux/stacktrace.h>
 #include <linux/stackdepot.h>
+#include <linux/page_owner.h>
 
 struct pglist_data;
 struct page_ext_operations {
@@ -51,6 +52,28 @@ static inline void page_ext_init(void)
 
 struct page_ext *lookup_page_ext(struct page *page);
 
+extern void reset_page_vzext(struct page *page, unsigned int order);
+extern void split_page_vzext(struct page *page, unsigned int order);
+extern void copy_page_vzext(struct page *oldpage, struct page *newpage);
+
+static inline void reset_page_ext(struct page *page, unsigned int order)
+{
+	reset_page_owner(page, order);
+	reset_page_vzext(page, order);
+}
+
+static inline void split_page_ext(struct page *page, unsigned int order)
+{
+	split_page_owner(page, order);
+	split_page_vzext(page, order);
+}
+
+static inline void copy_page_ext(struct page *oldpage, struct page *newpage)
+{
+	copy_page_owner(oldpage, newpage);
+	copy_page_vzext(oldpage, newpage);
+}
+
 #else /* !CONFIG_PAGE_EXTENSION */
 struct page_ext;
 
@@ -70,5 +93,18 @@ static inline void page_ext_init(void)
 static inline void page_ext_init_flatmem(void)
 {
 }
+
+static inline void reset_page_ext(struct page *page, unsigned int order)
+{
+}
+
+static inline void split_page_ext(struct page *page, unsigned int order)
+{
+}
+
+static inline void copy_page_ext(struct page *oldpage, struct page *newpage)
+{
+}
+
 #endif /* CONFIG_PAGE_EXTENSION */
 #endif /* __LINUX_PAGE_EXT_H */
diff --git a/include/linux/page_vzext.h b/include/linux/page_vzext.h
new file mode 100644
index 000000000000..54aeccb7cbdc
--- /dev/null
+++ b/include/linux/page_vzext.h
@@ -0,0 +1,33 @@
+/*
+ *  mm/page_vzext.c
+ *
+ *  Copyright (c) 2021 Virtuozzo International GmbH. All rights reserved.
+ *
+ */
+
+#ifndef __LINUX_PAGE_VZEXT_H
+#define __LINUX_PAGE_VZEXT_H
+
+#include <linux/page_ext.h>
+
+extern struct page_ext_operations page_vzext_ops;
+
+extern void reset_page_vzext(struct page *page, unsigned int order);
+extern void split_page_vzext(struct page *page, unsigned int order);
+extern void copy_page_vzext(struct page *oldpage, struct page *newpage);
+
+struct page_vzext {
+	unsigned long vzflags;
+};
+
+static inline struct page_vzext *get_page_vzext(struct page *page)
+{
+	struct page_ext *page_ext = lookup_page_ext(page);
+
+	if (unlikely(!page_ext))
+		return NULL;
+
+	return (void *)page_ext + page_vzext_ops.offset;
+}
+
+#endif /* __LINUX_PAGE_VZEXT_H */
diff --git a/mm/Makefile b/mm/Makefile
index 3ab9c8d28816..41ecdcb4caa8 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -86,6 +86,7 @@ obj-$(CONFIG_DEBUG_KMEMLEAK) += kmemleak.o
 obj-$(CONFIG_DEBUG_KMEMLEAK_TEST) += kmemleak-test.o
 obj-$(CONFIG_DEBUG_RODATA_TEST) += rodata_test.o
 obj-$(CONFIG_PAGE_OWNER) += page_owner.o
+obj-$(CONFIG_PAGE_EXTENSION) += page_vzext.o
 obj-$(CONFIG_CLEANCACHE) += cleancache.o
 obj-$(CONFIG_MEMORY_ISOLATION) += page_isolation.o
 obj-$(CONFIG_ZPOOL)	+= zpool.o
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 71acb7d1b253..cd8f5c07fe73 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -35,6 +35,7 @@
 #include <linux/oom.h>
 #include <linux/numa.h>
 #include <linux/page_owner.h>
+#include <linux/page_vzext.h>
 
 #include <asm/tlb.h>
 #include <asm/pgalloc.h>
@@ -2388,7 +2389,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
 
 	ClearPageCompound(head);
 
-	split_page_owner(head, HPAGE_PMD_ORDER);
+	split_page_ext(head, HPAGE_PMD_ORDER);
 
 	/* See comment in __split_huge_page_tail() */
 	if (PageAnon(head)) {
diff --git a/mm/migrate.c b/mm/migrate.c
index 532eb9f8e151..e89da1163510 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -46,6 +46,7 @@
 #include <linux/mmu_notifier.h>
 #include <linux/page_idle.h>
 #include <linux/page_owner.h>
+#include <linux/page_vzext.h>
 #include <linux/sched/mm.h>
 #include <linux/ptrace.h>
 #include <linux/oom.h>
@@ -659,7 +660,7 @@ void migrate_page_states(struct page *newpage, struct page *page)
 	if (PageWriteback(newpage))
 		end_page_writeback(newpage);
 
-	copy_page_owner(page, newpage);
+	copy_page_ext(page, newpage);
 
 	mem_cgroup_migrate(page, newpage);
 }
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 52d70919c1b8..91024c513211 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -64,6 +64,7 @@
 #include <linux/sched/rt.h>
 #include <linux/sched/mm.h>
 #include <linux/page_owner.h>
+#include <linux/page_vzext.h>
 #include <linux/kthread.h>
 #include <linux/memcontrol.h>
 #include <linux/ftrace.h>
@@ -1207,7 +1208,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
 		 */
 		if (memcg_kmem_enabled() && PageKmemcg(page))
 			__memcg_kmem_uncharge_page(page, order);
-		reset_page_owner(page, order);
+		reset_page_ext(page, order);
 		return false;
 	}
 
@@ -1250,7 +1251,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
 
 	page_cpupid_reset_last(page);
 	page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
-	reset_page_owner(page, order);
+	reset_page_ext(page, order);
 
 	if (!PageHighMem(page)) {
 		debug_check_no_locks_freed(page_address(page),
@@ -3238,7 +3239,7 @@ void split_page(struct page *page, unsigned int order)
 
 	for (i = 1; i < (1 << order); i++)
 		set_page_refcounted(page + i);
-	split_page_owner(page, order);
+	split_page_ext(page, order);
 }
 EXPORT_SYMBOL_GPL(split_page);
 
diff --git a/mm/page_ext.c b/mm/page_ext.c
index a1a43ab3da8a..ac1ccc75fb94 100644
--- a/mm/page_ext.c
+++ b/mm/page_ext.c
@@ -7,6 +7,7 @@
 #include <linux/vmalloc.h>
 #include <linux/kmemleak.h>
 #include <linux/page_owner.h>
+#include <linux/page_vzext.h>
 #include <linux/page_idle.h>
 
 /*
@@ -65,6 +66,7 @@ static struct page_ext_operations *page_ext_ops[] = {
 #if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
 	&page_idle_ops,
 #endif
+	&page_vzext_ops,
 };
 
 static unsigned long total_usage;
diff --git a/mm/page_vzext.c b/mm/page_vzext.c
new file mode 100644
index 000000000000..52f4af2ef3f4
--- /dev/null
+++ b/mm/page_vzext.c
@@ -0,0 +1,67 @@
+/*
+ *  mm/page_vzext.c
+ *
+ *  Copyright (c) 2021 Virtuozzo International GmbH. All rights reserved.
+ *
+ */
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/memblock.h>
+#include <linux/stacktrace.h>
+#include <linux/page_vzext.h>
+#include <linux/jump_label.h>
+#include <linux/migrate.h>
+
+#include "internal.h"
+
+static bool need_page_vzext(void)
+{
+	return true;
+}
+
+struct page_ext_operations page_vzext_ops = {
+	.size = sizeof(struct page_vzext),
+	.need = need_page_vzext,
+};
+
+static inline struct page_vzext *get_page_ext_vzext(struct page_ext *page_ext)
+{
+	return (void *)page_ext + page_vzext_ops.offset;
+}
+
+void reset_page_vzext(struct page *page, unsigned int order)
+{
+	/* TODO: write universal code for page deinitialization */
+}
+
+void split_page_vzext(struct page *page, unsigned int order)
+{
+	int i;
+	struct page_ext *page_ext = lookup_page_ext(page);
+	struct page_vzext *page_vzext;
+
+	if (unlikely(!page_ext))
+		return;
+
+	page_vzext = get_page_ext_vzext(page_ext);
+	for (i = 1; i < (1 << order); i++)
+		copy_page_vzext(page, page + i);
+}
+
+void copy_page_vzext(struct page *oldpage, struct page *newpage)
+{
+	struct page_ext *old_ext = lookup_page_ext(oldpage);
+	struct page_ext *new_ext = lookup_page_ext(newpage);
+	struct page_vzext *old_page_vzext, *new_page_vzext;
+
+	if (unlikely(!old_ext || !new_ext))
+		return;
+
+	old_page_vzext = get_page_ext_vzext(old_ext);
+	new_page_vzext = get_page_ext_vzext(new_ext);
+
+	/* TODO: add callbacks to handle separate vzext in different helpers */
+	new_page_vzext->vzflags = old_page_vzext->vzflags;
+}
-- 
2.28.0



More information about the Devel mailing list