[Devel] [PATCH RH9 4/9] dm-qcow2: Prepare handle_md_page() for calling not only from main kwork
Kirill Tkhai
ktkhai at virtuozzo.com
Sun Mar 6 13:28:16 MSK 2022
Parallel handle_md_page() may fail because of a page has just been added.
Teach it to repeat the search.
Signed-off-by: Kirill Tkhai <ktkhai at virtuozzo.com>
---
drivers/md/dm-qcow2-map.c | 5 ++---
drivers/md/dm-qcow2-target.c | 14 ++++++++++----
2 files changed, 12 insertions(+), 7 deletions(-)
diff --git a/drivers/md/dm-qcow2-map.c b/drivers/md/dm-qcow2-map.c
index 4e04505810fc..4edd63e47a3d 100644
--- a/drivers/md/dm-qcow2-map.c
+++ b/drivers/md/dm-qcow2-map.c
@@ -1526,10 +1526,8 @@ static int submit_read_md_page(struct qcow2 *qcow2, struct qio **qio,
int ret;
ret = alloc_and_insert_md_page(qcow2, page_id, &md);
- if (ret < 0) {
- pr_err("Can't alloc: ret=%d, page_id=%llu\n", ret, page_id);
+ if (ret < 0)
return ret;
- }
spin_lock_irq(&qcow2->md_pages_lock);
list_add_tail(&(*qio)->link, &md->wait_list);
@@ -1543,6 +1541,7 @@ static int submit_read_md_page(struct qcow2 *qcow2, struct qio **qio,
/*
* This may be called with @qio == NULL, in case of we are
* interesting in searching cached in memory md only.
+ * This is aimed to be called not only from main kwork.
*/
static int handle_md_page(struct qcow2 *qcow2, u64 page_id,
struct qio **qio, struct md_page **ret_md)
diff --git a/drivers/md/dm-qcow2-target.c b/drivers/md/dm-qcow2-target.c
index 6c550cbe2579..795d64516507 100644
--- a/drivers/md/dm-qcow2-target.c
+++ b/drivers/md/dm-qcow2-target.c
@@ -318,7 +318,7 @@ struct md_page *md_page_find_or_postpone(struct qcow2 *qcow2, unsigned int id,
return md;
}
-static void md_page_insert(struct qcow2 *qcow2, struct md_page *new_md)
+static int md_page_try_insert(struct qcow2 *qcow2, struct md_page *new_md)
{
struct rb_root *root = &qcow2->md_pages;
unsigned int new_id = new_md->id;
@@ -337,11 +337,12 @@ static void md_page_insert(struct qcow2 *qcow2, struct md_page *new_md)
else if (new_id > md->id)
node = &parent->rb_right;
else
- BUG();
+ return -EEXIST;
}
rb_link_node(&new_md->node, parent, node);
rb_insert_color(&new_md->node, root);
+ return 0;
}
void md_page_erase(struct qcow2 *qcow2, struct md_page *md)
@@ -361,7 +362,8 @@ struct md_page *md_page_renumber(struct qcow2 *qcow2, unsigned int id,
WARN_ON_ONCE(!list_empty(&md->wait_list));
md_page_erase(qcow2, md);
md->id = new_id;
- md_page_insert(qcow2, md);
+ if (WARN_ON(md_page_try_insert(qcow2, md) < 0))
+ md = NULL;
}
return md;
}
@@ -396,10 +398,14 @@ int alloc_and_insert_md_page(struct qcow2 *qcow2, u64 index, struct md_page **md
INIT_LIST_HEAD(&(*md)->wb_link);
spin_lock_irq(&qcow2->md_pages_lock);
- md_page_insert(qcow2, *md);
+ ret = md_page_try_insert(qcow2, *md);
spin_unlock_irq(&qcow2->md_pages_lock);
+ if (ret)
+ goto err_putpage;
return 0;
+err_putpage:
+ put_page((*md)->page);
err_kfree:
kfree(*md);
return ret;
More information about the Devel
mailing list