[Devel] [PATCH RH8 1/5] mm: introduce page vz extension (using page_ext)
Alexander Mikhalitsyn
alexander.mikhalitsyn at virtuozzo.com
Thu Jul 29 18:20:45 MSK 2021
This module allows to manage per-page data.
We will use it in memcg page cache limiting feature
to store additional flag on page.
We may also use this in the future in many other
circumstances (for debugging and so on).
See also mm/page_owner.c
https://jira.sw.ru/browse/PSBM-131957
Signed-off-by: Alexander Mikhalitsyn <alexander.mikhalitsyn at virtuozzo.com>
---
include/linux/page_vzext.h | 25 +++++++++++++++
mm/Makefile | 1 +
mm/huge_memory.c | 2 ++
mm/migrate.c | 2 ++
mm/page_alloc.c | 4 +++
mm/page_ext.c | 2 ++
mm/page_vzext.c | 66 ++++++++++++++++++++++++++++++++++++++
7 files changed, 102 insertions(+)
create mode 100644 include/linux/page_vzext.h
create mode 100644 mm/page_vzext.c
diff --git a/include/linux/page_vzext.h b/include/linux/page_vzext.h
new file mode 100644
index 000000000000..f59c17babdcb
--- /dev/null
+++ b/include/linux/page_vzext.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_PAGE_VZEXT_H
+#define __LINUX_PAGE_VZEXT_H
+
+extern struct page_ext_operations page_vzext_ops;
+
+extern void reset_page_vzext(struct page *page, unsigned int order);
+extern void split_page_vzext(struct page *page, unsigned int order);
+extern void copy_page_vzext(struct page *oldpage, struct page *newpage);
+
+struct page_vzext {
+ unsigned long vzflags;
+};
+
+static inline struct page_vzext *get_page_vzext(struct page *page)
+{
+ struct page_ext *page_ext = lookup_page_ext(page);
+
+ if (unlikely(!page_ext))
+ return NULL;
+
+ return (void *)page_ext + page_vzext_ops.offset;
+}
+
+#endif /* __LINUX_PAGE_VZEXT_H */
diff --git a/mm/Makefile b/mm/Makefile
index 3ab9c8d28816..41ecdcb4caa8 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -86,6 +86,7 @@ obj-$(CONFIG_DEBUG_KMEMLEAK) += kmemleak.o
obj-$(CONFIG_DEBUG_KMEMLEAK_TEST) += kmemleak-test.o
obj-$(CONFIG_DEBUG_RODATA_TEST) += rodata_test.o
obj-$(CONFIG_PAGE_OWNER) += page_owner.o
+obj-$(CONFIG_PAGE_EXTENSION) += page_vzext.o
obj-$(CONFIG_CLEANCACHE) += cleancache.o
obj-$(CONFIG_MEMORY_ISOLATION) += page_isolation.o
obj-$(CONFIG_ZPOOL) += zpool.o
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 71acb7d1b253..ca8f0bd2ce3f 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -35,6 +35,7 @@
#include <linux/oom.h>
#include <linux/numa.h>
#include <linux/page_owner.h>
+#include <linux/page_vzext.h>
#include <asm/tlb.h>
#include <asm/pgalloc.h>
@@ -2389,6 +2390,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
ClearPageCompound(head);
split_page_owner(head, HPAGE_PMD_ORDER);
+ split_page_vzext(head, HPAGE_PMD_ORDER);
/* See comment in __split_huge_page_tail() */
if (PageAnon(head)) {
diff --git a/mm/migrate.c b/mm/migrate.c
index 532eb9f8e151..88d9cce19071 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -46,6 +46,7 @@
#include <linux/mmu_notifier.h>
#include <linux/page_idle.h>
#include <linux/page_owner.h>
+#include <linux/page_vzext.h>
#include <linux/sched/mm.h>
#include <linux/ptrace.h>
#include <linux/oom.h>
@@ -660,6 +661,7 @@ void migrate_page_states(struct page *newpage, struct page *page)
end_page_writeback(newpage);
copy_page_owner(page, newpage);
+ copy_page_vzext(page, newpage);
mem_cgroup_migrate(page, newpage);
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 52d70919c1b8..e227e1b41445 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -64,6 +64,7 @@
#include <linux/sched/rt.h>
#include <linux/sched/mm.h>
#include <linux/page_owner.h>
+#include <linux/page_vzext.h>
#include <linux/kthread.h>
#include <linux/memcontrol.h>
#include <linux/ftrace.h>
@@ -1208,6 +1209,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
if (memcg_kmem_enabled() && PageKmemcg(page))
__memcg_kmem_uncharge_page(page, order);
reset_page_owner(page, order);
+ reset_page_vzext(page, order);
return false;
}
@@ -1251,6 +1253,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
page_cpupid_reset_last(page);
page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
reset_page_owner(page, order);
+ reset_page_vzext(page, order);
if (!PageHighMem(page)) {
debug_check_no_locks_freed(page_address(page),
@@ -3239,6 +3242,7 @@ void split_page(struct page *page, unsigned int order)
for (i = 1; i < (1 << order); i++)
set_page_refcounted(page + i);
split_page_owner(page, order);
+ split_page_vzext(page, order);
}
EXPORT_SYMBOL_GPL(split_page);
diff --git a/mm/page_ext.c b/mm/page_ext.c
index a1a43ab3da8a..ac1ccc75fb94 100644
--- a/mm/page_ext.c
+++ b/mm/page_ext.c
@@ -7,6 +7,7 @@
#include <linux/vmalloc.h>
#include <linux/kmemleak.h>
#include <linux/page_owner.h>
+#include <linux/page_vzext.h>
#include <linux/page_idle.h>
/*
@@ -65,6 +66,7 @@ static struct page_ext_operations *page_ext_ops[] = {
#if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
&page_idle_ops,
#endif
+ &page_vzext_ops,
};
static unsigned long total_usage;
diff --git a/mm/page_vzext.c b/mm/page_vzext.c
new file mode 100644
index 000000000000..90cef8c18e80
--- /dev/null
+++ b/mm/page_vzext.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/memblock.h>
+#include <linux/stacktrace.h>
+#include <linux/page_vzext.h>
+#include <linux/jump_label.h>
+#include <linux/migrate.h>
+
+#include "internal.h"
+
+static bool need_page_vzext(void)
+{
+ return true;
+}
+
+struct page_ext_operations page_vzext_ops = {
+ .size = sizeof(struct page_vzext),
+ .need = need_page_vzext,
+};
+
+static inline struct page_vzext *get_page_ext_vzext(struct page_ext *page_ext)
+{
+ return (void *)page_ext + page_vzext_ops.offset;
+}
+
+void reset_page_vzext(struct page *page, unsigned int order)
+{
+ int i;
+ struct page_ext *page_ext;
+
+ for (i = 0; i < (1 << order); i++) {
+ page_ext = lookup_page_ext(page + i);
+ if (unlikely(!page_ext))
+ continue;
+ }
+}
+
+void split_page_vzext(struct page *page, unsigned int order)
+{
+ int i;
+ struct page_ext *page_ext = lookup_page_ext(page);
+ struct page_vzext *page_vzext;
+
+ if (unlikely(!page_ext))
+ return;
+
+ page_vzext = get_page_ext_vzext(page_ext);
+ for (i = 1; i < (1 << order); i++)
+ copy_page_vzext(page, page + i);
+}
+
+void copy_page_vzext(struct page *oldpage, struct page *newpage)
+{
+ struct page_ext *old_ext = lookup_page_ext(oldpage);
+ struct page_ext *new_ext = lookup_page_ext(newpage);
+ struct page_vzext *old_page_vzext, *new_page_vzext;
+
+ if (unlikely(!old_ext || !new_ext))
+ return;
+
+ old_page_vzext = get_page_ext_vzext(old_ext);
+ new_page_vzext = get_page_ext_vzext(new_ext);
+ new_page_vzext->vzflags = old_page_vzext->vzflags;
+}
--
2.28.0
More information about the Devel
mailing list