<div dir="ltr"><br><div class="gmail_extra"><br><br><div class="gmail_quote">2013/8/28 Pavel Emelyanov <span dir="ltr"><<a href="mailto:xemul@parallels.com" target="_blank">xemul@parallels.com</a>></span><br><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">
<div class="HOEnZb"><div class="h5">On 08/26/2013 05:15 PM, Andrey Vagin wrote:<br>
> In /proc/pid/maps grow-down VMA-s are shown without guard pages, but<br>
> sometime these "guard" pages can contain usefull data. For example if<br>
> a real guard page has been remmaped by another VMA. Let's call such<br>
> pages as fake guard pages.<br>
><br>
> So when a grow-down VMA is mmaped on restore, it should be mapped with<br>
> one more guard page to restore content of the fake guard page.<br>
><br>
> <a href="https://bugzilla.openvz.org/show_bug.cgi?id=2715" target="_blank">https://bugzilla.openvz.org/show_bug.cgi?id=2715</a><br>
> Signed-off-by: Andrey Vagin <<a href="mailto:avagin@openvz.org">avagin@openvz.org</a>><br>
> ---<br>
> cr-restore.c | 60 +++++++++++++++++++++++++++++++++++++++++++++++++++++-------<br>
> 1 file changed, 53 insertions(+), 7 deletions(-)<br>
><br>
> diff --git a/cr-restore.c b/cr-restore.c<br>
> index 14bf300..8fea9ee 100644<br>
> --- a/cr-restore.c<br>
> +++ b/cr-restore.c<br>
> @@ -207,7 +207,7 @@ static int map_private_vma(pid_t pid, struct vma_area *vma, void *tgt_addr,<br>
> {<br>
> int ret;<br>
> void *addr, *paddr = NULL;<br>
> - unsigned long nr_pages;<br>
> + unsigned long nr_pages, size;<br>
> struct vma_area *p = *pvma;<br>
><br>
> if (vma_entry_is(&vma->vma, VMA_FILE_PRIVATE)) {<br>
> @@ -242,6 +242,17 @@ static int map_private_vma(pid_t pid, struct vma_area *vma, void *tgt_addr,<br>
><br>
> *pvma = p;<br>
><br>
> + /*<br>
> + * A grow-down VMA has a guard page, which protect a VMA below it.<br>
> + * So one more page is mapped here to restore content of the first page<br>
> + */<br>
> + if (vma->vma.flags & MAP_GROWSDOWN) {<br>
> + vma->vma.start -= PAGE_SIZE;<br>
> + if (paddr)<br>
> + paddr -= PAGE_SIZE;<br>
> + }<br>
> +<br>
> + size = vma_entry_len(&vma->vma);<br>
> if (paddr == NULL) {<br>
> /*<br>
> * The respective memory area was NOT found in the parent.<br>
> @@ -250,7 +261,7 @@ static int map_private_vma(pid_t pid, struct vma_area *vma, void *tgt_addr,<br>
> pr_info("Map 0x%016"PRIx64"-0x%016"PRIx64" 0x%016"PRIx64" vma\n",<br>
> vma->vma.start, vma->vma.end, vma->vma.pgoff);<br>
><br>
> - addr = mmap(tgt_addr, vma_entry_len(&vma->vma),<br>
> + addr = mmap(tgt_addr, size,<br>
> vma->vma.prot | PROT_WRITE,<br>
> vma->vma.flags | MAP_FIXED,<br>
> vma->vma.fd, vma->vma.pgoff);<br>
> @@ -266,7 +277,7 @@ static int map_private_vma(pid_t pid, struct vma_area *vma, void *tgt_addr,<br>
> */<br>
> vma->ppage_bitmap = p->page_bitmap;<br>
><br>
> - addr = mremap(paddr, vma_area_len(vma), vma_area_len(vma),<br>
> + addr = mremap(paddr, size, size,<br>
> MREMAP_FIXED | MREMAP_MAYMOVE, tgt_addr);<br>
> if (addr != tgt_addr) {<br>
> pr_perror("Unable to remap a private vma");<br>
> @@ -279,10 +290,15 @@ static int map_private_vma(pid_t pid, struct vma_area *vma, void *tgt_addr,<br>
> pr_debug("\tpremap 0x%016"PRIx64"-0x%016"PRIx64" -> %016lx\n",<br>
> vma->vma.start, vma->vma.end, (unsigned long)addr);<br>
><br>
> + if (vma->vma.flags & MAP_GROWSDOWN) { /* Skip gurad page */<br>
> + vma->vma.start += PAGE_SIZE;<br>
> + vma_premmaped_start(&vma->vma) += PAGE_SIZE;<br>
> + }<br>
> +<br>
> if (vma_entry_is(&vma->vma, VMA_FILE_PRIVATE))<br>
> close(vma->vma.fd);<br>
><br>
> - return 0;<br>
> + return size;<br>
> }<br>
><br>
> static int restore_priv_vma_content(pid_t pid)<br>
> @@ -475,8 +491,11 @@ static int prepare_mappings(int pid)<br>
> break;<br>
> }<br>
><br>
> - if (vma_priv(&vma->vma))<br>
> + if (vma_priv(&vma->vma)) {<br>
> rst_vmas.priv_size += vma_area_len(vma);<br>
> + if (vma->vma.flags & MAP_GROWSDOWN)<br>
> + rst_vmas.priv_size += PAGE_SIZE;<br>
> + }<br>
> }<br>
> close(fd);<br>
><br>
> @@ -512,10 +531,10 @@ static int prepare_mappings(int pid)<br>
> if (ret < 0)<br>
> break;<br>
><br>
> - addr += vma_area_len(vma);<br>
> + addr += ret;<br>
> }<br>
><br>
> - if (ret == 0)<br>
> + if (ret >= 0)<br>
> ret = restore_priv_vma_content(pid);<br>
><br>
> out:<br>
> @@ -536,6 +555,31 @@ out:<br>
> return ret;<br>
> }<br>
><br>
> +/*<br>
> + * A gard page must be unmapped after restoring content and<br>
> + * forking children to restore COW memory.<br>
> + */<br>
> +int unmap_guard_pages()<br>
> +{<br>
> + struct vma_area *vma;<br>
> +<br>
> + list_for_each_entry(vma, &rst_vmas.h, list) {<br>
<br>
</div></div>Let's unmap them "in place", instead of one more vmas list scan.<br></blockquote><div><br></div><div>We can't do that, because children are not forked in this moment.</div><div> </div><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">
<div class="HOEnZb"><div class="h5"><br>
> + if (!vma_priv(&vma->vma))<br>
> + continue;<br>
> +<br>
> + if (vma->vma.flags & MAP_GROWSDOWN) {<br>
> + void *addr = (void *) vma_premmaped_start(&vma->vma);<br>
> +<br>
> + if (munmap(addr - PAGE_SIZE, PAGE_SIZE)) {<br>
> + pr_perror("Can't unmap guard page\n");<br>
> + return -1;<br>
> + }<br>
> + }<br>
> + }<br>
> +<br>
> + return 0;<br>
> +}<br>
> +<br>
> static int open_vmas(int pid)<br>
> {<br>
> struct vma_area *vma;<br>
> @@ -1184,6 +1228,8 @@ static int restore_task_with_children(void *_arg)<br>
> if (create_children_and_session())<br>
> exit(1);<br>
><br>
> + if (unmap_guard_pages())<br>
> + exit(1);<br>
> /*<br>
> * Unlike sessions, process groups (a.k.a. pgids) can be joined<br>
> * by any task, provided the task with pid == pgid (group leader)<br>
><br>
<br>
<br>
</div></div></blockquote></div><br></div></div>