[CRIU] [RFC] shmem: Swap out anonymous shared pages before reading pagemap
Cyrill Gorcunov
gorcunov at gmail.com
Fri Sep 16 06:17:47 PDT 2016
When pages are swapped out we can't detect their presence with mincore
or pagemap. Instead lest do a walk over pagerange and read first byte
calling swap out procedure and then used pagemap to filter out clean pages.
https://jira.sw.ru/browse/PSBM-52138
Suggested-by: Andrei Vagin <avagin at openvz.org>
Signed-off-by: Cyrill Gorcunov <gorcunov at virtuozzo.com>
---
criu/include/shmem.h | 1 +
criu/mem.c | 5 +++++
criu/shmem.c | 51 ++++++++++++++++++++++++++++++++++++++-------------
3 files changed, 44 insertions(+), 13 deletions(-)
diff --git a/criu/include/shmem.h b/criu/include/shmem.h
index 11edccb..ec3592f 100644
--- a/criu/include/shmem.h
+++ b/criu/include/shmem.h
@@ -12,6 +12,7 @@ extern int collect_shmem(int pid, struct vma_area *vma);
extern int collect_sysv_shmem(unsigned long shmid, unsigned long size);
extern int cr_dump_shmem(void);
extern int add_shmem_area(pid_t pid, VmaEntry *vma, u64 *map);
+extern int shmem_swapout(pid_t pid, VmaEntry *vma);
extern int fixup_sysv_shmems(void);
#define SYSV_SHMEM_SKIP_FD (0x7fffffff)
diff --git a/criu/mem.c b/criu/mem.c
index ecaaa5b..6ec85a5 100644
--- a/criu/mem.c
+++ b/criu/mem.c
@@ -328,6 +328,11 @@ static int __parasite_dump_pages_seized(struct parasite_ctl *ctl,
has_parent = false;
}
+ ret = -1;
+ if (vma_area_is(vma_area, VMA_ANON_SHARED) &&
+ shmem_swapout(ctl->pid.real, vma_area->e))
+ goto out_xfer;
+
map = pmc_get_map(&pmc, vma_area);
if (!map)
goto out_xfer;
diff --git a/criu/shmem.c b/criu/shmem.c
index 5c31576..3edfbff 100644
--- a/criu/shmem.c
+++ b/criu/shmem.c
@@ -600,12 +600,46 @@ static int dump_pages(struct page_pipe *pp, struct page_xfer *xfer, void *addr)
return page_xfer_dump_pages(xfer, pp, (unsigned long)addr);
}
+/*
+ * Walk over memroy range causing pages to swapout
+ * if they were swapin'ed, otherwise we won't be
+ * able to detect if page swapped needs to be dumped.
+ * mincore won't help(see mincore_page in kernel code).
+ */
+int shmem_swapout(pid_t pid, VmaEntry *vma)
+{
+ unsigned long size = vma->pgoff + (vma->end - vma->start);
+ unsigned long pfn, nrpages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
+ void *addr;
+ int fd;
+
+ fd = open_proc(pid, "map_files/%lx-%lx", vma->start, vma->end);
+ if (fd < 0)
+ return -1;
+
+ addr = mmap(NULL, size, PROT_READ, MAP_SHARED, fd, 0);
+ close(fd);
+
+ if (addr == MAP_FAILED) {
+ pr_err("Can't map shmem %#lx (0x%lx-0x%lx)\n",
+ vma->shmid, vma->start, vma->end);
+ return -1;
+ }
+
+ for (pfn = 0; pfn < nrpages; pfn++) {
+ unsigned char v = *(char *)((unsigned long)addr + pfn * PAGE_SIZE);
+ (void)v;
+ }
+
+ munmap(addr, size);
+ return 0;
+}
+
static int dump_one_shmem(struct shmem_info *si)
{
struct page_pipe *pp;
struct page_xfer xfer;
int err, ret = -1, fd;
- unsigned char *mc_map = NULL;
void *addr = NULL;
unsigned long pfn, nrpages;
@@ -613,24 +647,17 @@ static int dump_one_shmem(struct shmem_info *si)
fd = open_proc(si->pid, "map_files/%lx-%lx", si->start, si->end);
if (fd < 0)
- goto err;
+ return -1;
addr = mmap(NULL, si->size, PROT_READ, MAP_SHARED, fd, 0);
close(fd);
if (addr == MAP_FAILED) {
pr_err("Can't map shmem 0x%lx (0x%lx-0x%lx)\n",
si->shmid, si->start, si->end);
- goto err;
+ return -1;
}
nrpages = (si->size + PAGE_SIZE - 1) / PAGE_SIZE;
- mc_map = xmalloc(nrpages * sizeof(*mc_map));
- if (!mc_map)
- goto err_unmap;
- /* We can't rely only on PME bits for anon shmem */
- err = mincore(addr, si->size, mc_map);
- if (err)
- goto err_unmap;
pp = create_page_pipe((nrpages + 1) / 2, NULL, PP_CHUNK_MODE);
if (!pp)
@@ -645,7 +672,7 @@ static int dump_one_shmem(struct shmem_info *si)
unsigned long pgaddr;
pgstate = get_pstate(si->pstate_map, pfn);
- if ((pgstate == PST_DONT_DUMP) && !(mc_map[pfn] & PAGE_RSS))
+ if (pgstate == PST_DONT_DUMP)
continue;
pgaddr = (unsigned long)addr + pfn * PAGE_SIZE;
@@ -673,8 +700,6 @@ err_pp:
destroy_page_pipe(pp);
err_unmap:
munmap(addr, si->size);
-err:
- xfree(mc_map);
return ret;
}
--
2.7.4
More information about the CRIU
mailing list