[CRIU] [PATCH 2/2] lazy-pages: handle_remaining_pages: enable asynchronous reads

Mike Rapoport rppt at linux.vnet.ibm.com
Thu Jun 15 11:08:37 MSK 2017


Until now once we've started to fetch an iovec we've been waiting until
it's completely copied before returning to event processing loop. Now we
can have several request for the remote pages in flight.

Signed-off-by: Mike Rapoport <rppt at linux.vnet.ibm.com>
---
 criu/uffd.c | 44 ++++++++++++++++++++++++++++++++++++++++----
 1 file changed, 40 insertions(+), 4 deletions(-)

diff --git a/criu/uffd.c b/criu/uffd.c
index 84fd07b..591cab2 100644
--- a/criu/uffd.c
+++ b/criu/uffd.c
@@ -63,6 +63,7 @@ struct lazy_iov {
 	unsigned long base;	/* run-time start address, tracks remaps */
 	unsigned long img_base;	/* start address at the dump time */
 	unsigned long len;
+	bool queued;
 };
 
 struct lp_req {
@@ -84,6 +85,7 @@ struct lazy_pages_info {
 
 	unsigned long total_pages;
 	unsigned long copied_pages;
+	bool iovs_pending;
 
 	struct epoll_rfd lpfd;
 
@@ -113,6 +115,7 @@ static struct lazy_pages_info *lpi_init(void)
 	INIT_LIST_HEAD(&lpi->reqs);
 	INIT_LIST_HEAD(&lpi->l);
 	lpi->lpfd.revent = handle_uffd_event;
+	lpi->iovs_pending = true;
 
 	return lpi;
 }
@@ -803,13 +806,43 @@ static int uffd_handle_pages(struct lazy_pages_info *lpi, __u64 address, int nr,
 	return 0;
 }
 
+static struct lazy_iov *first_pending_iov(struct lazy_pages_info *lpi)
+{
+	struct lazy_iov *iov;
+
+	list_for_each_entry(iov, &lpi->iovs, l)
+		if (!iov->queued)
+			return iov;
+
+	return NULL;
+}
+
+static bool is_iov_queued(struct lazy_pages_info *lpi, struct lazy_iov *iov)
+{
+	struct lp_req *req;
+
+	list_for_each_entry(req, &lpi->reqs, l)
+		if (req->addr >= iov->base && req->addr < iov->base + iov->len)
+			return true;
+
+	return false;
+}
+
 static int handle_remaining_pages(struct lazy_pages_info *lpi)
 {
 	struct lazy_iov *iov;
 	struct lp_req *req;
 	int nr_pages, err;
 
-	iov = list_first_entry(&lpi->iovs, struct lazy_iov, l);
+	iov = first_pending_iov(lpi);
+	if (!iov) {
+		lpi->iovs_pending = false;
+		return 0;
+	}
+
+	if (is_iov_queued(lpi, iov))
+		return 0;
+
 	nr_pages = iov->len / PAGE_SIZE;
 
 	req = xzalloc(sizeof(*req));
@@ -820,8 +853,9 @@ static int handle_remaining_pages(struct lazy_pages_info *lpi)
 	req->img_addr = iov->img_base;
 	req->len = iov->len;
 	list_add(&req->l, &lpi->reqs);
+	iov->queued = true;
 
-	err = uffd_handle_pages(lpi, req->img_addr, nr_pages, 0);
+	err = uffd_handle_pages(lpi, req->img_addr, nr_pages, PR_ASYNC | PR_ASAP);
 	if (err < 0) {
 		lp_err(lpi, "Error during UFFD copy\n");
 		return -1;
@@ -1048,8 +1082,10 @@ static int handle_requests(int epollfd, struct epoll_event *events, int nr_fds)
 
 		poll_timeout = 0;
 		list_for_each_entry(lpi, &lpis, l) {
-			if (!list_empty(&lpi->iovs)) {
-				remaining = true;
+			if (list_empty(&lpi->iovs))
+				continue;
+			remaining = true;
+			if (lpi->iovs_pending) {
 				ret = handle_remaining_pages(lpi);
 				if (ret < 0)
 					goto out;
-- 
2.7.4



More information about the CRIU mailing list