[Devel] [PATCH RH8 06/18] ploop: Always link submitted pios
Kirill Tkhai
ktkhai at virtuozzo.com
Wed Jun 16 18:46:47 MSK 2021
We want to simplify ploop_inflight_bios_ref_switch()
and stop using it from work. So, we will link inflight
always in hash table.
Signed-off-by: Kirill Tkhai <ktkhai at virtuozzo.com>
---
drivers/md/dm-ploop-map.c | 66 ++++--------------------------------------
drivers/md/dm-ploop-target.c | 1 -
drivers/md/dm-ploop.h | 7 +---
3 files changed, 7 insertions(+), 67 deletions(-)
diff --git a/drivers/md/dm-ploop-map.c b/drivers/md/dm-ploop-map.c
index efa0da8afe3f..ef37874e8991 100644
--- a/drivers/md/dm-ploop-map.c
+++ b/drivers/md/dm-ploop-map.c
@@ -447,24 +447,21 @@ static void del_cluster_lk(struct ploop *ploop, struct pio *pio)
}
-static void maybe_link_submitting_pio(struct ploop *ploop, struct pio *pio,
- unsigned int cluster)
+static void link_submitting_pio(struct ploop *ploop, struct pio *pio,
+ unsigned int cluster)
{
unsigned long flags;
- if (!ploop->force_rbtree_for_inflight)
- return;
-
spin_lock_irqsave(&ploop->inflight_lock, flags);
link_pio(ploop->inflight_pios, pio, cluster, false);
spin_unlock_irqrestore(&ploop->inflight_lock, flags);
}
-static void maybe_unlink_completed_pio(struct ploop *ploop, struct pio *pio)
+static void unlink_completed_pio(struct ploop *ploop, struct pio *pio)
{
LIST_HEAD(pio_list);
unsigned long flags;
- if (likely(hlist_unhashed(&pio->hlist_node)))
+ if (hlist_unhashed(&pio->hlist_node))
return;
spin_lock_irqsave(&ploop->inflight_lock, flags);
@@ -539,25 +536,6 @@ static void handle_discard_pio(struct ploop *ploop, struct pio *pio,
if (ploop->nr_deltas != 1)
goto punch_hole;
- if (!ploop->force_rbtree_for_inflight) {
- /*
- * Force all not exclusive inflight bios to link into
- * inflight_pios_rbtree. Note, that this does not wait
- * completion of two-stages requests (currently, these
- * may be only cow, which take cluster lk, so we are
- * safe with them).
- */
- ploop->force_rbtree_for_inflight = true;
- ret = ploop_inflight_bios_ref_switch(ploop, true);
- if (ret) {
- pr_err_ratelimited("ploop: discard ignored by err=%d\n",
- ret);
- ploop->force_rbtree_for_inflight = false;
- pio->bi_status = BLK_STS_IOERR;
- pio_endio(pio);
- }
- }
-
spin_lock_irqsave(&ploop->inflight_lock, flags);
inflight_h = find_inflight_bio(ploop, cluster);
if (inflight_h)
@@ -571,7 +549,6 @@ static void handle_discard_pio(struct ploop *ploop, struct pio *pio,
}
add_cluster_lk(ploop, pio, cluster);
- atomic_inc(&ploop->nr_discard_bios);
pio->wants_discard_index_cleanup = true;
punch_hole:
@@ -591,11 +568,6 @@ static void handle_discard_pio(struct ploop *ploop, struct pio *pio,
static void ploop_discard_index_pio_end(struct ploop *ploop, struct pio *pio)
{
del_cluster_lk(ploop, pio);
-
- WRITE_ONCE(ploop->pending_discard_cleanup, jiffies);
- /* Pairs with barrier in do_discard_cleanup() */
- smp_mb__before_atomic();
- atomic_dec(&ploop->nr_discard_bios);
}
static void complete_cow(struct ploop_cow *cow, blk_status_t bi_status)
@@ -1472,7 +1444,7 @@ static int process_one_deferred_bio(struct ploop *ploop, struct pio *pio,
inc_nr_inflight(ploop, pio);
read_unlock_irq(&ploop->bat_rwlock);
- maybe_link_submitting_pio(ploop, pio, cluster);
+ link_submitting_pio(ploop, pio, cluster);
submit_rw_mapped(ploop, dst_cluster, pio);
out:
@@ -1556,25 +1528,6 @@ static int process_one_discard_pio(struct ploop *ploop, struct pio *pio,
return 0;
}
-static void do_discard_cleanup(struct ploop *ploop)
-{
- unsigned long cleanup_jiffies;
-
- if (ploop->force_rbtree_for_inflight &&
- !atomic_read(&ploop->nr_discard_bios)) {
- /* Pairs with barrier in ploop_discard_index_pio_end() */
- smp_rmb();
- cleanup_jiffies = READ_ONCE(ploop->pending_discard_cleanup);
-
- if (time_after(jiffies, cleanup_jiffies + CLEANUP_DELAY * HZ))
- ploop->force_rbtree_for_inflight = false;
- }
-}
-
-/*
- * This switches the device back in !force_rbtree_for_inflight mode
- * after cleanup timeout has expired.
- */
static void process_discard_pios(struct ploop *ploop, struct list_head *pios,
struct ploop_index_wb *piwb)
{
@@ -1584,11 +1537,6 @@ static void process_discard_pios(struct ploop *ploop, struct list_head *pios,
process_one_discard_pio(ploop, pio, piwb);
}
-static void check_services_timeout(struct ploop *ploop)
-{
- do_discard_cleanup(ploop);
-}
-
void do_ploop_work(struct work_struct *ws)
{
struct ploop *ploop = container_of(ws, struct ploop, worker);
@@ -1628,8 +1576,6 @@ void do_ploop_work(struct work_struct *ws)
ploop_reset_bat_update(&piwb);
}
- check_services_timeout(ploop);
-
current->flags = (current->flags & ~PF_IO_THREAD) | pf_io_thread;
}
@@ -1779,7 +1725,7 @@ static void handle_cleanup(struct ploop *ploop, struct pio *pio)
if (pio->wants_discard_index_cleanup)
ploop_discard_index_pio_end(ploop, pio);
- maybe_unlink_completed_pio(ploop, pio);
+ unlink_completed_pio(ploop, pio);
dec_nr_inflight(ploop, pio);
}
diff --git a/drivers/md/dm-ploop-target.c b/drivers/md/dm-ploop-target.c
index bbe3686e9f34..692d7c6a52d4 100644
--- a/drivers/md/dm-ploop-target.c
+++ b/drivers/md/dm-ploop-target.c
@@ -312,7 +312,6 @@ static int ploop_ctr(struct dm_target *ti, unsigned int argc, char **argv)
INIT_LIST_HEAD(&ploop->discard_pios);
INIT_LIST_HEAD(&ploop->cluster_lk_list);
INIT_LIST_HEAD(&ploop->delta_cow_action_list);
- atomic_set(&ploop->nr_discard_bios, 0);
ploop->bat_entries = RB_ROOT;
INIT_WORK(&ploop->worker, do_ploop_work);
diff --git a/drivers/md/dm-ploop.h b/drivers/md/dm-ploop.h
index 635c53d6993b..ecfc4dab81d3 100644
--- a/drivers/md/dm-ploop.h
+++ b/drivers/md/dm-ploop.h
@@ -158,12 +158,10 @@ struct ploop {
unsigned int tb_nr; /* tracking_bitmap size in bits */
unsigned int tb_cursor;
- bool force_rbtree_for_inflight;
/*
* Hash table to link non-exclusive submitted bios.
* This is needed for discard to check, nobody uses
- * the discarding cluster. Only made when the above
- * force_rbtree_for_inflight is enabled.
+ * the discarding cluster.
*/
struct hlist_head *inflight_pios;
/*
@@ -172,9 +170,6 @@ struct ploop {
*/
struct hlist_head *exclusive_pios;
- atomic_t nr_discard_bios;
- unsigned long pending_discard_cleanup;
-
struct workqueue_struct *wq;
struct work_struct worker;
struct work_struct fsync_worker;
More information about the Devel
mailing list