[CRIU] [PATCH 04/11] mem: Shuffle page-read around
Pavel Emelyanov
xemul at virtuozzo.com
Fri May 5 09:02:30 PDT 2017
The page-read will be needed during the premap stage.
Signed-off-by: Pavel Emelyanov <xemul at virtuozzo.com>
---
criu/mem.c | 45 ++++++++++++++++++++++++++-------------------
1 file changed, 26 insertions(+), 19 deletions(-)
diff --git a/criu/mem.c b/criu/mem.c
index 3bcf467..c20528e 100644
--- a/criu/mem.c
+++ b/criu/mem.c
@@ -674,7 +674,8 @@ static int map_private_vma(struct pstree_item *t,
return 0;
}
-static int premap_priv_vmas(struct pstree_item *t, struct vm_area_list *vmas, void *at)
+static int premap_priv_vmas(struct pstree_item *t, struct vm_area_list *vmas,
+ void *at, struct page_read *pr)
{
struct list_head *parent_vmas;
struct vma_area *pvma, *vma;
@@ -712,7 +713,7 @@ static int premap_priv_vmas(struct pstree_item *t, struct vm_area_list *vmas, vo
return ret;
}
-static int restore_priv_vma_content(struct pstree_item *t)
+static int restore_priv_vma_content(struct pstree_item *t, struct page_read *pr)
{
struct vma_area *vma;
int ret = 0;
@@ -724,37 +725,34 @@ static int restore_priv_vma_content(struct pstree_item *t)
unsigned int nr_compared = 0;
unsigned int nr_lazy = 0;
unsigned long va;
- struct page_read pr;
- if (opts.check_only)
+ if (opts.check_only) {
+ pr->close(pr);
return 0;
+ }
vma = list_first_entry(vmas, struct vma_area, list);
- ret = open_page_read(vpid(t), &pr, PR_TASK);
- if (ret <= 0)
- return -1;
-
/*
* Read page contents.
*/
while (1) {
unsigned long off, i, nr_pages;
- ret = pr.advance(&pr);
+ ret = pr->advance(pr);
if (ret <= 0)
break;
- va = (unsigned long)decode_pointer(pr.pe->vaddr);
- nr_pages = pr.pe->nr_pages;
+ va = (unsigned long)decode_pointer(pr->pe->vaddr);
+ nr_pages = pr->pe->nr_pages;
/*
* This means that userfaultfd is used to load the pages
* on demand.
*/
- if (opts.lazy_pages && pagemap_lazy(pr.pe)) {
+ if (opts.lazy_pages && pagemap_lazy(pr->pe)) {
pr_debug("Lazy restore skips %ld pages at %lx\n", nr_pages, va);
- pr.skip_pages(&pr, nr_pages * PAGE_SIZE);
+ pr->skip_pages(pr, nr_pages * PAGE_SIZE);
nr_lazy += nr_pages;
continue;
}
@@ -794,7 +792,7 @@ static int restore_priv_vma_content(struct pstree_item *t)
if (vma->ppage_bitmap) { /* inherited vma */
clear_bit(off, vma->ppage_bitmap);
- ret = pr.read_pages(&pr, va, 1, buf, 0);
+ ret = pr->read_pages(pr, va, 1, buf, 0);
if (ret < 0)
goto err_read;
@@ -822,7 +820,7 @@ static int restore_priv_vma_content(struct pstree_item *t)
nr = min_t(int, nr_pages - i, (vma->e->end - va) / PAGE_SIZE);
- ret = pr.read_pages(&pr, va, nr, p, PR_ASYNC);
+ ret = pr->read_pages(pr, va, nr, p, PR_ASYNC);
if (ret < 0)
goto err_read;
@@ -837,10 +835,10 @@ static int restore_priv_vma_content(struct pstree_item *t)
}
err_read:
- if (pr.sync(&pr))
+ if (pr->sync(pr))
return -1;
- pr.close(&pr);
+ pr->close(pr);
if (ret < 0)
return ret;
@@ -893,6 +891,7 @@ int prepare_mappings(struct pstree_item *t)
int ret = 0;
void *addr;
struct vm_area_list *vmas;
+ struct page_read pr;
void *old_premmapped_addr = NULL;
unsigned long old_premmapped_len;
@@ -914,11 +913,19 @@ int prepare_mappings(struct pstree_item *t)
rsti(t)->premmapped_addr = addr;
rsti(t)->premmapped_len = vmas->priv_size;
- ret = premap_priv_vmas(t, vmas, addr);
+ ret = open_page_read(vpid(t), &pr, PR_TASK);
+ if (ret <= 0)
+ return -1;
+
+ pr.advance(&pr); /* shift to the 1st iovec */
+
+ ret = premap_priv_vmas(t, vmas, addr, &pr);
if (ret < 0)
goto out;
- ret = restore_priv_vma_content(t);
+ pr.reset(&pr);
+
+ ret = restore_priv_vma_content(t, &pr);
if (ret < 0)
goto out;
--
2.5.5
More information about the CRIU
mailing list