[CRIU] [PATCH 03/10] uffdd: Move loop into collect_uffd_pages
Pavel Emelyanov
xemul at virtuozzo.com
Fri Nov 11 21:24:04 PST 2016
It just makes the code look cleaner and allows further
improvements.
Signed-off-by: Pavel Emelyanov <xemul at virtuozzo.com>
---
criu/uffd.c | 80 ++++++++++++++++++++++++++++++-------------------------------
1 file changed, 40 insertions(+), 40 deletions(-)
diff --git a/criu/uffd.c b/criu/uffd.c
index 4ee37fa..5c34007 100644
--- a/criu/uffd.c
+++ b/criu/uffd.c
@@ -497,53 +497,55 @@ static int collect_uffd_pages(struct page_read *pr, struct lazy_pages_info *lpi)
BUG_ON(!item);
+ ps = page_size();
vmas = &rsti(item)->vmas;
- rc = pr->get_pagemap(pr, &iov);
- if (rc <= 0)
- return 0;
+ while (1) {
+ rc = pr->get_pagemap(pr, &iov);
+ if (rc <= 0)
+ break;
- ps = page_size();
- nr_pages = iov.iov_len / ps;
- base = (unsigned long) iov.iov_base;
- pr_debug("iov.iov_base 0x%lx (%ld pages)\n", base, nr_pages);
+ nr_pages = iov.iov_len / ps;
+ base = (unsigned long) iov.iov_base;
+ pr_debug("iov.iov_base 0x%lx (%ld pages)\n", base, nr_pages);
- for (i = 0; i < nr_pages; i++) {
- bool uffd_page = false;
- base = (unsigned long) iov.iov_base + (i * ps);
- /*
- * Only pages which are MAP_ANONYMOUS and MAP_PRIVATE
- * are relevant for userfaultfd handling.
- * Loop over all VMAs to see if the flags matching.
- */
- list_for_each_entry(vma, &vmas->h, list) {
+ for (i = 0; i < nr_pages; i++) {
+ bool uffd_page = false;
+ base = (unsigned long) iov.iov_base + (i * ps);
/*
- * This loop assumes that base can actually be found
- * in the VMA list.
+ * Only pages which are MAP_ANONYMOUS and MAP_PRIVATE
+ * are relevant for userfaultfd handling.
+ * Loop over all VMAs to see if the flags matching.
*/
- if (base >= vma->e->start && base < vma->e->end) {
- if (vma_entry_can_be_lazy(vma->e)) {
- if(!pagemap_in_parent(pr->pe))
- uffd_page = true;
- break;
+ list_for_each_entry(vma, &vmas->h, list) {
+ /*
+ * This loop assumes that base can actually be found
+ * in the VMA list.
+ */
+ if (base >= vma->e->start && base < vma->e->end) {
+ if (vma_entry_can_be_lazy(vma->e)) {
+ if(!pagemap_in_parent(pr->pe))
+ uffd_page = true;
+ break;
+ }
}
}
- }
- /* This is not a page we are looking for. Move along */
- if (!uffd_page)
- continue;
+ /* This is not a page we are looking for. Move along */
+ if (!uffd_page)
+ continue;
- pr_debug("Adding 0x%lx to our list\n", base);
+ pr_debug("Adding 0x%lx to our list\n", base);
- uffd_pages = xzalloc(sizeof(struct uffd_pages_struct));
- if (!uffd_pages)
- return -1;
- uffd_pages->addr = base;
- list_add(&uffd_pages->list, &lpi->pages);
+ uffd_pages = xzalloc(sizeof(struct uffd_pages_struct));
+ if (!uffd_pages)
+ return -1;
+ uffd_pages->addr = base;
+ list_add(&uffd_pages->list, &lpi->pages);
+ }
}
- return 1;
+ return 0;
}
static int handle_remaining_pages(struct lazy_pages_info *lpi, void *dest)
@@ -660,12 +662,10 @@ static int find_vmas(struct lazy_pages_info *lpi)
* a page has already been transferred or if it needs to be
* pushed into the process using userfaultfd.
*/
- do {
- ret = collect_uffd_pages(&lpi->pr, lpi);
- if (ret == -1) {
- goto out;
- }
- } while (ret);
+ ret = collect_uffd_pages(&lpi->pr, lpi);
+ if (ret == -1) {
+ goto out;
+ }
/* Count detected pages */
list_for_each_entry(uffd_pages, &lpi->pages, list)
--
2.5.0
More information about the CRIU
mailing list