[CRIU] [PATCH 2/2] mem: Don't assume guard page is returned in procfs with new kernels

Cyrill Gorcunov gorcunov at openvz.org
Wed Jun 21 00:42:27 MSK 2017


If the guard page is not reported in show_map_vma we should
not ajust vma address neither we should call unmap_guard_pages
in restorer.

https://github.com/xemul/criu/issues/322

Signed-off-by: Cyrill Gorcunov <gorcunov at openvz.org>
---
 criu/include/mem.h |  2 ++
 criu/mem.c         | 19 ++++++++++++++-----
 criu/proc_parse.c  |  6 ++++--
 3 files changed, 20 insertions(+), 7 deletions(-)

diff --git a/criu/include/mem.h b/criu/include/mem.h
index 6791bfd0a647..bb897c59983e 100644
--- a/criu/include/mem.h
+++ b/criu/include/mem.h
@@ -9,12 +9,14 @@ struct parasite_ctl;
 struct vm_area_list;
 struct page_pipe;
 struct pstree_item;
+struct vma_area;
 
 struct mem_dump_ctl {
 	bool	pre_dump;
 	bool	lazy;
 };
 
+extern bool vma_has_guard_gap_hidden(struct vma_area *vma);
 extern bool page_is_zero(u64 pme);
 extern bool page_in_parent(bool dirty);
 extern int prepare_mm_pid(struct pstree_item *i);
diff --git a/criu/mem.c b/criu/mem.c
index 3c7186ade2c0..390fc0a50d5e 100644
--- a/criu/mem.c
+++ b/criu/mem.c
@@ -530,7 +530,7 @@ int prepare_mm_pid(struct pstree_item *i)
 
 		if (vma_area_is_private(vma, kdat.task_size)) {
 			ri->vmas.priv_size += vma_area_len(vma);
-			if (vma->e->flags & MAP_GROWSDOWN)
+			if (vma_has_guard_gap_hidden(vma))
 				ri->vmas.priv_size += PAGE_SIZE;
 		}
 
@@ -665,7 +665,7 @@ static int premap_private_vma(struct pstree_item *t, struct vma_area *vma, void
 	 * A grow-down VMA has a guard page, which protect a VMA below it.
 	 * So one more page is mapped here to restore content of the first page
 	 */
-	if (vma->e->flags & MAP_GROWSDOWN)
+	if (vma_has_guard_gap_hidden(vma))
 		vma->e->start -= PAGE_SIZE;
 
 	size = vma_entry_len(vma->e);
@@ -717,7 +717,7 @@ static int premap_private_vma(struct pstree_item *t, struct vma_area *vma, void
 		 */
 
 		paddr = decode_pointer(vma->pvma->premmaped_addr);
-		if (vma->e->flags & MAP_GROWSDOWN)
+		if (vma_has_guard_gap_hidden(vma))
 			paddr -= PAGE_SIZE;
 
 		addr = mremap(paddr, size, size,
@@ -733,7 +733,7 @@ static int premap_private_vma(struct pstree_item *t, struct vma_area *vma, void
 	pr_debug("\tpremap %#016"PRIx64"-%#016"PRIx64" -> %016lx\n",
 		vma->e->start, vma->e->end, (unsigned long)addr);
 
-	if (vma->e->flags & MAP_GROWSDOWN) { /* Skip gurad page */
+	if (vma_has_guard_gap_hidden(vma)) { /* Skip gurad page */
 		vma->e->start += PAGE_SIZE;
 		vma->premmaped_addr += PAGE_SIZE;
 	}
@@ -748,7 +748,8 @@ static int premap_private_vma(struct pstree_item *t, struct vma_area *vma, void
 static inline bool vma_force_premap(struct vma_area *vma, struct list_head *head)
 {
 	/*
-	 * Growsdown VMAs always have one guard page at the
+	 * On kernels with 4K guard pages, growsdown VMAs
+	 * always have one guard page at the
 	 * beginning and sometimes this page contains data.
 	 * In case the VMA is premmaped, we premmap one page
 	 * larger VMA. In case of in place restore we can only
@@ -1095,6 +1096,11 @@ int prepare_mappings(struct pstree_item *t)
 	return ret;
 }
 
+bool vma_has_guard_gap_hidden(struct vma_area *vma)
+{
+	return kdat.stack_guard_gap_hidden && (vma->e->flags & MAP_GROWSDOWN);
+}
+
 /*
  * A gard page must be unmapped after restoring content and
  * forking children to restore COW memory.
@@ -1104,6 +1110,9 @@ int unmap_guard_pages(struct pstree_item *t)
 	struct vma_area *vma;
 	struct list_head *vmas = &rsti(t)->vmas.h;
 
+	if (!kdat.stack_guard_gap_hidden)
+		return 0;
+
 	list_for_each_entry(vma, vmas, list) {
 		if (!vma_area_is(vma, VMA_PREMMAPED))
 			continue;
diff --git a/criu/proc_parse.c b/criu/proc_parse.c
index 041d4512413d..6ca9984058aa 100644
--- a/criu/proc_parse.c
+++ b/criu/proc_parse.c
@@ -25,6 +25,7 @@
 #include "kerndat.h"
 #include "vdso.h"
 #include "vma.h"
+#include "mem.h"
 #include "bfd.h"
 #include "proc_parse.h"
 #include "fdinfo.h"
@@ -637,9 +638,10 @@ static int vma_list_add(struct vma_area *vma_area,
 	}
 
 	/* Add a guard page only if here is enough space for it */
-	if ((vma_area->e->flags & MAP_GROWSDOWN) &&
+	if (vma_has_guard_gap_hidden(vma_area) &&
 	    *prev_end < vma_area->e->start)
-		vma_area->e->start -= PAGE_SIZE; /* Guard page */
+		if (kdat.stack_guard_gap_hidden)
+			vma_area->e->start -= PAGE_SIZE; /* Guard page */
 	*prev_end = vma_area->e->end;
 
 	list_add_tail(&vma_area->list, &vma_area_list->h);
-- 
2.7.5



More information about the CRIU mailing list