[CRIU] [PATCH cr 07/16] restore: map private vma-s before forking
children
Andrey Vagin
avagin at openvz.org
Tue Oct 23 06:02:19 EDT 2012
In this case private vma-s will be inherited by children,
it allows to restore copy-on-write reqions.
This code compares child and parent vma lists. If it found
two vma-s with the same start and end addresses, it decides
that the child inherites this vmas from the parent.
This code calculates a size of all private vma-s, then allocate
a memory region for all vma-s and maps them one by one. If a vma is
inherited it will be remaped to an allocated place.
As a result all vma-s will be placed in a continious memory region
and sorted by start addresses. This logic will be used for remap
vma-s to correct address.
Signed-off-by: Andrey Vagin <avagin at openvz.org>
---
cr-restore.c | 104 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++-
1 files changed, 103 insertions(+), 1 deletions(-)
diff --git a/cr-restore.c b/cr-restore.c
index 9d1fdea..b11fae3 100644
--- a/cr-restore.c
+++ b/cr-restore.c
@@ -173,15 +173,77 @@ err:
return ret;
}
+/* Map a private vma, if it is not mapped by a parrent yet */
+static int map_private_vma(pid_t pid, struct vma_area *vma, void *tgt_addr,
+ struct vma_area **pvma, struct list_head *pvma_list)
+{
+ void *addr;
+ struct vma_area *p = *pvma;
+
+ list_for_each_entry_continue(p, pvma_list, list) {
+ if (p->vma.start > vma->vma.start)
+ break;
+
+ if (p->vma.end == vma->vma.end &&
+ p->vma.start == vma->vma.start) {
+ pr_info("COW 0x%016lx-0x%016lx 0x%016lx vma\n",
+ vma->vma.start, vma->vma.end, vma->vma.pgoff);
+ vma->vma.shmid = vma_premmaped_start(&p->vma);
+ break;
+ }
+
+ }
+
+ *pvma = list_entry(p->list.prev, struct vma_area, list);
+
+ if (!vma_premmaped_start(&vma->vma)) {
+ pr_info("Map 0x%016lx-0x%016lx 0x%016lx vma\n",
+ vma->vma.start, vma->vma.end, vma->vma.pgoff);
+
+ addr = mmap(tgt_addr, vma_entry_len(&vma->vma),
+ vma->vma.prot | PROT_WRITE,
+ vma->vma.flags | MAP_FIXED,
+ vma->vma.fd, vma->vma.pgoff);
+
+ if (addr == MAP_FAILED) {
+ pr_perror("Unable to map ANON_VMA");
+ return -1;
+ }
+ vma->vma.shmid = (unsigned long) addr;
+ } else {
+ addr = mremap((void *)vma_premmaped_start(&vma->vma),
+ vma_area_len(vma), vma_area_len(vma),
+ MREMAP_FIXED | MREMAP_MAYMOVE, tgt_addr);
+ if (addr != tgt_addr) {
+ pr_perror("Unable to remap a private vma");
+ return -1;
+ }
+
+ vma->vma.shmid = (unsigned long) addr;
+
+ list_del(&p->list);
+ xfree(p);
+ }
+
+ if (vma_entry_is(&vma->vma, VMA_FILE_PRIVATE))
+ close(vma->vma.fd);
+
+ return 0;
+}
+
static int read_vmas(int pid)
{
int fd, ret = 0;
LIST_HEAD(old);
- struct vma_area *vma;
+ struct vma_area *pvma, *vma;
+ unsigned long priv_size = 0;
+ void *addr;
list_replace_init(&vma_list, &old);
INIT_LIST_HEAD(&vma_list);
+ pvma = list_first_entry(&old, struct vma_area, list);
+
/* Skip errors, because a zombie doesn't have an image of vmas */
fd = open_image_ro(CR_FD_VMAS, pid);
if (fd < 0)
@@ -211,14 +273,54 @@ static int read_vmas(int pid)
vma->vma = *e;
vma_entry__free_unpacked(e, NULL);
+
+ if (!vma_priv(&vma->vma))
+ continue;
+
+ priv_size += vma_area_len(vma);
+ }
+
+ /* Reserve a place for mapping private vma-s one by one */
+ addr = mmap(NULL, priv_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
+ if (addr == MAP_FAILED) {
+ pr_perror("Unable to reserve memory");
+ return -1;
+ }
+
+ list_for_each_entry(vma, &vma_list, list) {
+ if (!vma_priv(&vma->vma))
+ continue;
+
+ ret = map_private_vma(pid, vma, addr, &pvma, &old);
+ if (ret < 0)
+ break;
+
+ addr += vma_area_len(vma);
}
close(fd);
out:
while (!list_empty(&old)) {
+ unsigned long len;
+
vma = list_first_entry(&old, struct vma_area, list);
list_del(&vma->list);
+
+ if (!vma_priv(&vma->vma))
+ continue;
+
+ addr = (void *) vma_premmaped_start(&vma->vma);
+ len = vma_area_len(vma);
+
+ if (!addr)
+ continue;
+
+ if (munmap(addr, len)) {
+ pr_perror("Unable to unmap %p-%p", addr, addr + len);
+ return -1;
+ }
+
xfree(vma);
}
--
1.7.1
More information about the CRIU
mailing list