[Devel] [PATCH RH9 04/12] dm-qcow2: Prepare handle_md_page() for calling not only from main kwork

Kirill Tkhai ktkhai at virtuozzo.com
Wed Jan 19 19:17:15 MSK 2022


Parallel handle_md_page() may fail because of a page has just been added.
Teach it to repeat the search.

Signed-off-by: Kirill Tkhai <ktkhai at virtuozzo.com>
---
 drivers/md/dm-qcow2-map.c    |    7 +++----
 drivers/md/dm-qcow2-target.c |   15 ++++++++++-----
 2 files changed, 13 insertions(+), 9 deletions(-)

diff --git a/drivers/md/dm-qcow2-map.c b/drivers/md/dm-qcow2-map.c
index b9993cc5e736..17edc7bd7b87 100644
--- a/drivers/md/dm-qcow2-map.c
+++ b/drivers/md/dm-qcow2-map.c
@@ -1524,10 +1524,8 @@ static int submit_read_md_page(struct qcow2 *qcow2, struct qio **qio,
 	int ret;
 
 	ret = alloc_and_insert_md_page(qcow2, page_id, &md);
-	if (ret < 0) {
-		pr_err("Can't alloc: ret=%d, page_id=%llu\n", ret, page_id);
-		return -EIO;
-	}
+	if (ret < 0)
+		return ret;
 
 	spin_lock_irq(&qcow2->md_pages_lock);
 	list_add_tail(&(*qio)->link, &md->wait_list);
@@ -1541,6 +1539,7 @@ static int submit_read_md_page(struct qcow2 *qcow2, struct qio **qio,
 /*
  * This may be called with @qio == NULL, in case of we are
  * interesting in searching cached in memory md only.
+ * This is aimed to be called not only from main kwork.
  */
 static int handle_md_page(struct qcow2 *qcow2, u64 page_id,
 		 struct qio **qio, struct md_page **ret_md)
diff --git a/drivers/md/dm-qcow2-target.c b/drivers/md/dm-qcow2-target.c
index 2a29bff42bfe..9e82189312e3 100644
--- a/drivers/md/dm-qcow2-target.c
+++ b/drivers/md/dm-qcow2-target.c
@@ -310,7 +310,7 @@ struct md_page *md_page_find_or_postpone(struct qcow2 *qcow2, unsigned int id,
 	return md;
 }
 
-static void md_page_insert(struct qcow2 *qcow2, struct md_page *new_md)
+static int md_page_try_insert(struct qcow2 *qcow2, struct md_page *new_md)
 {
 	struct rb_root *root = &qcow2->md_pages;
 	unsigned int new_id = new_md->id;
@@ -329,11 +329,12 @@ static void md_page_insert(struct qcow2 *qcow2, struct md_page *new_md)
 		else if (new_id > md->id)
 			node = &parent->rb_right;
 		else
-			BUG();
+			return -EEXIST;
 	}
 
 	rb_link_node(&new_md->node, parent, node);
 	rb_insert_color(&new_md->node, root);
+	return 0;
 }
 
 void md_page_erase(struct qcow2 *qcow2, struct md_page *md)
@@ -353,7 +354,8 @@ struct md_page *md_page_renumber(struct qcow2 *qcow2, unsigned int id,
 		WARN_ON_ONCE(!list_empty(&md->wait_list));
 		md_page_erase(qcow2, md);
 		md->id = new_id;
-		md_page_insert(qcow2, md);
+		if (WARN_ON(md_page_try_insert(qcow2, md) < 0))
+			md = NULL;
 	}
 	return md;
 }
@@ -388,10 +390,13 @@ int alloc_and_insert_md_page(struct qcow2 *qcow2, u64 index, struct md_page **md
 	INIT_LIST_HEAD(&(*md)->wb_link);
 
 	spin_lock_irq(&qcow2->md_pages_lock);
-	md_page_insert(qcow2, *md);
+	ret = md_page_try_insert(qcow2, *md);
 	spin_unlock_irq(&qcow2->md_pages_lock);
+	if (ret)
+		goto err_putpage;
 	return 0;
-
+err_putpage:
+	put_page((*md)->page);
 err_kfree:
 	kfree(*md);
 	return ret;




More information about the Devel mailing list