[Devel] [RFC PATCH vz9 v3 11/11] ploop: combine processing of pios thru prepare list and remove fsync worker
Alexander Atanasov
alexander.atanasov at virtuozzo.com
Mon Oct 21 13:13:58 MSK 2024
Currently data pios and fluses are separated into different lists before
handled to workqueue. This can lead to executing of flushes before relevant
data pios and it is not possible to get that dependancy in the worker.
So put both data and flush pios into prepare list. This way worker can
get single list of the pios and manage ordere while executing.
Now we can remove the fsync_worker and the worker can queue back more
work without problems.
Signed-off-by: Alexander Atanasov <alexander.atanasov at virtuozzo.com>
---
drivers/md/dm-ploop-map.c | 62 ++++++++++++------------------------
drivers/md/dm-ploop-target.c | 1 -
drivers/md/dm-ploop.h | 2 --
3 files changed, 20 insertions(+), 45 deletions(-)
diff --git a/drivers/md/dm-ploop-map.c b/drivers/md/dm-ploop-map.c
index 6b929c478dd0..83b4b8148b10 100644
--- a/drivers/md/dm-ploop-map.c
+++ b/drivers/md/dm-ploop-map.c
@@ -337,8 +337,7 @@ static int ploop_split_pio_to_list(struct ploop *ploop, struct pio *pio,
}
ALLOW_ERROR_INJECTION(ploop_split_pio_to_list, ERRNO);
-static void ploop_dispatch_pio(struct ploop *ploop, struct pio *pio,
- bool *is_data, bool *is_flush)
+static void ploop_dispatch_pio(struct ploop *ploop, struct pio *pio)
{
struct llist_head *list = (struct llist_head *)&ploop->pios[pio->queue_list_id];
@@ -346,9 +345,7 @@ static void ploop_dispatch_pio(struct ploop *ploop, struct pio *pio,
WARN_ON_ONCE(pio->queue_list_id >= PLOOP_LIST_COUNT);
if (pio->queue_list_id == PLOOP_LIST_FLUSH)
- *is_flush = true;
- else
- *is_data = true;
+ list = (struct llist_head *)&ploop->pios[PLOOP_LIST_PREPARE];
llist_add((struct llist_node *)(&pio->list), list);
}
@@ -356,19 +353,14 @@ static void ploop_dispatch_pio(struct ploop *ploop, struct pio *pio,
void ploop_dispatch_pios(struct ploop *ploop, struct pio *pio,
struct list_head *pio_list)
{
- bool is_data = false, is_flush = false;
-
if (pio)
- ploop_dispatch_pio(ploop, pio, &is_data, &is_flush);
+ ploop_dispatch_pio(ploop, pio);
if (pio_list) {
while ((pio = ploop_pio_list_pop(pio_list)) != NULL)
- ploop_dispatch_pio(ploop, pio, &is_data, &is_flush);
+ ploop_dispatch_pio(ploop, pio);
}
- if (is_data)
- queue_work(ploop->wq, &ploop->worker);
- else if (is_flush)
- queue_work(ploop->wq, &ploop->fsync_worker);
+ queue_work(ploop->wq, &ploop->worker);
}
static bool ploop_delay_if_md_busy(struct ploop *ploop, struct md_page *md,
@@ -806,10 +798,9 @@ static void ploop_advance_local_after_bat_wb(struct ploop *ploop,
wait_llist_pending = llist_del_all(&md->wait_llist);
if (wait_llist_pending) {
- wait_llist_pending = llist_reverse_order(wait_llist_pending);
llist_for_each_safe(pos, t, wait_llist_pending) {
pio = list_entry((struct list_head *)pos, typeof(*pio), list);
- list_add_tail(&pio->list, &list);
+ list_add(&pio->list, &list);
}
}
@@ -1682,7 +1673,10 @@ static void ploop_prepare_embedded_pios(struct ploop *ploop,
llist_for_each_safe(pos, t, pios) {
pio = list_entry((struct list_head *)pos, typeof(*pio), list);
INIT_LIST_HEAD(&pio->list); /* until type is changed */
- ploop_prepare_one_embedded_pio(ploop, pio, deferred_pios);
+ if (pio->queue_list_id != PLOOP_LIST_FLUSH)
+ ploop_prepare_one_embedded_pio(ploop, pio, deferred_pios);
+ else
+ llist_add((struct llist_node *)(&pio->list), &ploop->pios[PLOOP_LIST_FLUSH]);
}
}
@@ -1776,6 +1770,7 @@ static void ploop_submit_metadata_writeback(struct ploop *ploop)
struct md_page *md;
struct md_page *t;
struct llist_node *wbl;
+
wbl = llist_del_all(&ploop->wb_batch_llist);
if (!wbl)
return;
@@ -1799,6 +1794,9 @@ static void process_ploop_fsync_work(struct ploop *ploop)
llflush_pios = llist_del_all(&ploop->pios[PLOOP_LIST_FLUSH]);
+ if (!llflush_pios)
+ return;
+
file = ploop_top_delta(ploop)->file;
/* All flushes are done as one */
ret = vfs_fsync(file, 0);
@@ -1824,32 +1822,25 @@ void do_ploop_work(struct work_struct *ws)
struct llist_node *lldiscard_pios;
struct llist_node *llcow_pios;
struct llist_node *llresubmit;
- bool do_fsync = false;
unsigned int old_flags = current->flags;
current->flags |= PF_IO_THREAD|PF_LOCAL_THROTTLE|PF_MEMALLOC_NOIO;
- spin_lock_irq(&ploop->deferred_lock);
llembedded_pios = llist_del_all(&ploop->pios[PLOOP_LIST_PREPARE]);
lldeferred_pios = llist_del_all(&ploop->pios[PLOOP_LIST_DEFERRED]);
lldiscard_pios = llist_del_all(&ploop->pios[PLOOP_LIST_DISCARD]);
llcow_pios = llist_del_all(&ploop->pios[PLOOP_LIST_COW]);
llresubmit = llist_del_all(&ploop->llresubmit_pios);
- if (!llist_empty(&ploop->pios[PLOOP_LIST_FLUSH]))
- do_fsync = true;
-
- spin_unlock_irq(&ploop->deferred_lock);
-
/* add old deferred to the list */
if (lldeferred_pios) {
struct llist_node *pos, *t;
struct pio *pio;
- llist_for_each_safe(pos, t, llist_reverse_order(lldeferred_pios)) {
+ llist_for_each_safe(pos, t, lldeferred_pios) {
pio = list_entry((struct list_head *)pos, typeof(*pio), list);
INIT_LIST_HEAD(&pio->list);
- list_add_tail(&pio->list, &deferred_pios);
+ list_add(&pio->list, &deferred_pios);
}
}
@@ -1858,7 +1849,6 @@ void do_ploop_work(struct work_struct *ws)
if (llresubmit)
ploop_process_resubmit_pios(ploop, llist_reverse_order(llresubmit));
-
ploop_process_deferred_pios(ploop, &deferred_pios);
if (lldiscard_pios)
@@ -1869,34 +1859,22 @@ void do_ploop_work(struct work_struct *ws)
ploop_submit_metadata_writeback(ploop);
- current->flags = old_flags;
-
- if (do_fsync)
+ if (!llist_empty(&ploop->pios[PLOOP_LIST_FLUSH]))
process_ploop_fsync_work(ploop);
-}
-
-void do_ploop_fsync_work(struct work_struct *ws)
-{
- struct ploop *ploop = container_of(ws, struct ploop, fsync_worker);
-
- process_ploop_fsync_work(ploop);
+ current->flags = old_flags;
}
static void ploop_submit_embedded_pio(struct ploop *ploop, struct pio *pio)
{
struct ploop_rq *prq = pio->endio_cb_data;
struct request *rq = prq->rq;
- struct work_struct *worker;
- unsigned long flags;
bool queue = true;
if (blk_rq_bytes(rq)) {
pio->queue_list_id = PLOOP_LIST_PREPARE;
- worker = &ploop->worker;
} else {
WARN_ON_ONCE(pio->bi_op != REQ_OP_FLUSH);
pio->queue_list_id = PLOOP_LIST_FLUSH;
- worker = &ploop->fsync_worker;
}
if (unlikely(ploop->stop_submitting_pios)) {
@@ -1906,11 +1884,11 @@ static void ploop_submit_embedded_pio(struct ploop *ploop, struct pio *pio)
}
ploop_inc_nr_inflight(ploop, pio);
- llist_add((struct llist_node *)(&pio->list), &ploop->pios[pio->queue_list_id]);
+ llist_add((struct llist_node *)(&pio->list), &ploop->pios[PLOOP_LIST_PREPARE]);
out:
if (queue)
- queue_work(ploop->wq, worker);
+ queue_work(ploop->wq, &ploop->worker);
}
void ploop_submit_embedded_pios(struct ploop *ploop, struct list_head *list)
diff --git a/drivers/md/dm-ploop-target.c b/drivers/md/dm-ploop-target.c
index f12c6912f8d0..ea9af6b6abe9 100644
--- a/drivers/md/dm-ploop-target.c
+++ b/drivers/md/dm-ploop-target.c
@@ -384,7 +384,6 @@ static int ploop_ctr(struct dm_target *ti, unsigned int argc, char **argv)
timer_setup(&ploop->enospc_timer, ploop_enospc_timer, 0);
INIT_WORK(&ploop->worker, do_ploop_work);
- INIT_WORK(&ploop->fsync_worker, do_ploop_fsync_work);
INIT_WORK(&ploop->event_work, do_ploop_event_work);
init_completion(&ploop->inflight_bios_ref_comp);
diff --git a/drivers/md/dm-ploop.h b/drivers/md/dm-ploop.h
index 66c7ce20dd60..bd4906e4c2b5 100644
--- a/drivers/md/dm-ploop.h
+++ b/drivers/md/dm-ploop.h
@@ -182,7 +182,6 @@ struct ploop {
struct workqueue_struct *wq;
struct work_struct worker;
- struct work_struct fsync_worker;
struct work_struct event_work;
struct completion inflight_bios_ref_comp;
@@ -569,7 +568,6 @@ extern void ploop_submit_embedded_pios(struct ploop *ploop,
extern void ploop_dispatch_pios(struct ploop *ploop, struct pio *pio,
struct list_head *pio_list);
extern void do_ploop_work(struct work_struct *ws);
-extern void do_ploop_fsync_work(struct work_struct *ws);
extern void do_ploop_event_work(struct work_struct *work);
extern int ploop_clone_and_map(struct dm_target *ti, struct request *rq,
union map_info *map_context,
--
2.43.0
More information about the Devel
mailing list