[Devel] [PATCH rh7] x86: "pages zeroing on free" option introduced

Stanislav Kinsburskiy skinsbursky at virtuozzo.com
Wed Nov 18 07:12:10 PST 2015


This patch add pages zeroing option to free_pages_prepare().
It's disabled by default. To enable it one should add "zero-free-pages"
kernel option to kernel boot parameters.
Note, that kernel boot option handling and enabling of pages zeroing are
divided. The reason for this is that static keys initialization can't be
performed during kernel parameters parsing (can't specify, what is the exact
reason, but kernel just doesn't boot, if static key is incremented on
parameter callback). Because of this split, actual zeroing is enabled later
during kernel boot, but still early enough.
Another thing to notice, is that code is moved to a separate source file.
The reason for this is to simplify rebases in future.

https://jira.sw.ru/browse/PSBM-33071

Signed-off-by: Stanislav Kinsburskiy <skinsbursky at virtuozzo.com>
---
 mm/Makefile     |    2 +-
 mm/internal.h   |    2 ++
 mm/page_alloc.c |    5 ++++-
 mm/zero_page.c  |   19 +++++++++++++++++++
 4 files changed, 26 insertions(+), 2 deletions(-)
 create mode 100644 mm/zero_page.c

diff --git a/mm/Makefile b/mm/Makefile
index e499a0d..76f3593 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -21,7 +21,7 @@ obj-y			:= filemap.o mempool.o oom_kill.o fadvise.o \
 			   mm_init.o mmu_context.o percpu.o slab_common.o \
 			   compaction.o balloon_compaction.o \
 			   interval_tree.o list_lru.o workingset.o oom_group.o \
-			   iov-iter.o $(mmu-y)
+			   iov-iter.o zero_page.o $(mmu-y)
 
 obj-y += init-mm.o
 
diff --git a/mm/internal.h b/mm/internal.h
index 2524d31..3ed90e0 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -379,4 +379,6 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
 #define ALLOC_CMA		0x80 /* allow allocations from CMA areas */
 #define ALLOC_FAIR		0x100 /* fair zone allocation */
 
+extern struct static_key zero_free_pages;
+
 #endif	/* __MM_INTERNAL_H */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f70c5f4..12126f2 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -730,8 +730,11 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
 
 	if (PageAnon(page))
 		page->mapping = NULL;
-	for (i = 0; i < (1 << order); i++)
+	for (i = 0; i < (1 << order); i++) {
 		bad += free_pages_check(page + i);
+		if (static_key_false(&zero_free_pages))
+			clear_highpage(page + i);
+	}
 	if (bad)
 		return false;
 
diff --git a/mm/zero_page.c b/mm/zero_page.c
new file mode 100644
index 0000000..51cce4f
--- /dev/null
+++ b/mm/zero_page.c
@@ -0,0 +1,19 @@
+#include <linux/jump_label.h>
+
+static int zero_data_pages_enabled;
+struct static_key __initdata zero_free_pages = STATIC_KEY_INIT_FALSE;
+
+static int __init enable_zero_free_pages(char *__unused)
+{
+	zero_data_pages_enabled = 1;
+	return 1;
+}
+__setup("zero-free-pages", enable_zero_free_pages);
+
+static int __init setup_zero_free_pages(void)
+{
+	if (zero_data_pages_enabled)
+		static_key_slow_inc(&zero_free_pages);
+	return 0;
+}
+early_initcall(setup_zero_free_pages);



More information about the Devel mailing list