[Devel] [RFC PATCH vz9 v6 58/62] dm-ploop: fix locking and improve error handling when submitting pios
Alexander Atanasov
alexander.atanasov at virtuozzo.com
Fri Dec 6 00:56:31 MSK 2024
The difference between hlist_unhashed_lockless and hlist_unhashed
is that _lockless version uses READ_ONCE to do the check.
Since it is used without locks we must switch to the _lockless variant.
Also make locking clusters and adding to inflight_pios return result
so we can track if they failed and where.
https://virtuozzo.atlassian.net/browse/VSTOR-91821
Signed-off-by: Alexander Atanasov <alexander.atanasov at virtuozzo.com>
---
drivers/md/dm-ploop-map.c | 70 ++++++++++++++++++++++++++++++---------
1 file changed, 54 insertions(+), 16 deletions(-)
diff --git a/drivers/md/dm-ploop-map.c b/drivers/md/dm-ploop-map.c
index 55ed95e39601..a03e1af3fd87 100644
--- a/drivers/md/dm-ploop-map.c
+++ b/drivers/md/dm-ploop-map.c
@@ -496,17 +496,32 @@ static void ploop_dec_nr_inflight(struct ploop *ploop, struct pio *pio)
}
}
-static void ploop_link_pio(struct hlist_head head[], struct pio *pio,
+static int ploop_link_pio(struct hlist_head head[], struct pio *pio,
u32 clu, bool exclusive)
{
struct hlist_head *slot = ploop_htable_slot(head, clu);
+ struct ploop *ploop = pio->ploop;
+
+ if (exclusive) {
+ struct pio *pe = ploop_find_pio(head, clu);
+
+ if (pe == pio)
+ return 1;
- if (exclusive)
- WARN_ON_ONCE(ploop_find_pio(head, clu) != NULL);
+ WARN_ON_ONCE(pe != NULL);
- BUG_ON(!hlist_unhashed(&pio->hlist_node));
+ if (pe)
+ PL_ERR("clu:%u already exclusively locked\n", clu);
+ }
+
+ if (!hlist_unhashed_lockless(&pio->hlist_node)) {
+ PL_ERR("already hashed clu:%u e:%d\n", clu, exclusive);
+ WARN_ON_ONCE(1);
+ return 0;
+ }
hlist_add_head(&pio->hlist_node, slot);
pio->clu = clu;
+ return 1;
}
/*
@@ -514,45 +529,55 @@ static void ploop_link_pio(struct hlist_head head[], struct pio *pio,
* or from exclusive_bios_rbtree. BIOs from endio_list are requeued
* to deferred_list.
*/
-static void ploop_unlink_pio(struct ploop *ploop, struct pio *pio,
+static int ploop_unlink_pio(struct ploop *ploop, struct pio *pio,
struct list_head *pio_list)
{
- BUG_ON(hlist_unhashed(&pio->hlist_node));
+ if (hlist_unhashed_lockless(&pio->hlist_node)) {
+ WARN_ON_ONCE(1);
+ return 0;
+ }
hlist_del_init(&pio->hlist_node);
list_splice_tail_init(&pio->endio_list, pio_list);
+ return 1;
}
-static void ploop_add_cluster_lk(struct ploop *ploop, struct pio *pio, u32 clu)
+static int ploop_add_cluster_lk(struct ploop *ploop, struct pio *pio, u32 clu)
{
unsigned long flags;
+ int ret;
spin_lock_irqsave(&ploop->deferred_lock, flags);
- ploop_link_pio(ploop->exclusive_pios, pio, clu, true);
+ ret = ploop_link_pio(ploop->exclusive_pios, pio, clu, true);
spin_unlock_irqrestore(&ploop->deferred_lock, flags);
+ return ret;
}
-static void ploop_del_cluster_lk(struct ploop *ploop, struct pio *pio)
+static int ploop_del_cluster_lk(struct ploop *ploop, struct pio *pio)
{
LIST_HEAD(pio_list);
unsigned long flags;
+ int ret;
spin_lock_irqsave(&ploop->deferred_lock, flags);
- ploop_unlink_pio(ploop, pio, &pio_list);
+ ret = ploop_unlink_pio(ploop, pio, &pio_list);
spin_unlock_irqrestore(&ploop->deferred_lock, flags);
if (!list_empty(&pio_list))
ploop_dispatch_pios(ploop, NULL, &pio_list);
+ return ret;
}
-static void ploop_link_submitting_pio(struct ploop *ploop, struct pio *pio,
+static int ploop_link_submitting_pio(struct ploop *ploop, struct pio *pio,
u32 clu)
{
unsigned long flags;
+ int ret;
spin_lock_irqsave(&ploop->inflight_lock, flags);
- ploop_link_pio(ploop->inflight_pios, pio, clu, false);
+ ret = ploop_link_pio(ploop->inflight_pios, pio, clu, false);
spin_unlock_irqrestore(&ploop->inflight_lock, flags);
+ return ret;
}
static void ploop_unlink_completed_pio(struct ploop *ploop, struct pio *pio)
@@ -560,7 +585,7 @@ static void ploop_unlink_completed_pio(struct ploop *ploop, struct pio *pio)
LIST_HEAD(pio_list);
unsigned long flags;
- if (hlist_unhashed(&pio->hlist_node))
+ if (hlist_unhashed_lockless(&pio->hlist_node))
return;
spin_lock_irqsave(&ploop->inflight_lock, flags);
@@ -705,7 +730,8 @@ static int ploop_handle_discard_pio(struct ploop *ploop, struct pio *pio,
return 0;
}
- ploop_add_cluster_lk(ploop, pio, clu);
+ if (!ploop_add_cluster_lk(ploop, pio, clu))
+ PL_ERR("dis clu %d already locked\n", clu);
pio->wants_discard_index_cleanup = true;
punch_hole:
@@ -1419,7 +1445,8 @@ static int ploop_submit_cluster_cow(struct ploop *ploop, unsigned int level,
cow->aux_pio = aux_pio;
cow->cow_pio = cow_pio;
- ploop_add_cluster_lk(ploop, cow_pio, clu);
+ if (!ploop_add_cluster_lk(ploop, cow_pio, clu))
+ PL_ERR("cowclu %d already locked\n", clu);
/* Stage #0: read secondary delta full clu */
ploop_map_and_submit_rw(ploop, dst_clu, aux_pio, level);
@@ -1699,8 +1726,19 @@ static int ploop_process_one_deferred_bio(struct ploop *ploop, struct pio *pio)
&dst_clu, pio);
if (!ret)
goto executed;
+
queue: /* pio needs to go to the queue */
- ploop_link_submitting_pio(ploop, pio, clu);
+ if (!ploop_link_submitting_pio(ploop, pio, clu)) {
+ PL_ERR("link submitting pio failed\n");
+ /* Pio is put into endio_list */
+ if (ploop_postpone_if_cluster_locked(ploop, pio, clu)) {
+ goto handled;
+ } else {
+ PL_ERR("link submitting pio postpone failed\n");
+ WARN_ON_ONCE(1);
+ /* fail thru and run it anyway nothing */
+ }
+ }
ploop_map_and_submit_rw(ploop, dst_clu, pio, ploop_top_level(ploop));
// return 1;
executed: /* pio op started */
--
2.43.0
More information about the Devel
mailing list