[Devel] [PATCH RHEL9 COMMIT] dm-ploop: convert wb_batch_list to lockless variant
Konstantin Khorenko
khorenko at virtuozzo.com
Mon Jan 27 16:12:43 MSK 2025
The commit is pushed to "branch-rh9-5.14.0-427.44.1.vz9.80.x-ovz" and will appear at git at bitbucket.org:openvz/vzkernel.git
after rh9-5.14.0-427.44.1.vz9.80.6
------>
commit dfdcc0b79d4ba8c90fc310bea0831a9bb41c21d6
Author: Alexander Atanasov <alexander.atanasov at virtuozzo.com>
Date: Fri Jan 24 17:36:04 2025 +0200
dm-ploop: convert wb_batch_list to lockless variant
Merging required to back this change, so do it again.
Signed-off-by: Alexander Atanasov <alexander.atanasov at virtuozzo.com>
======
Patchset description:
ploop: optimistations and scalling
Ploop processes requsts in a different threads in parallel
where possible which results in significant improvement in
performance and makes further optimistations possible.
Known bugs:
- delayed metadata writeback is not working and is missing error handling
- patch to disable it until fixed
- fast path is not working - causes rcu lockups - patch to disable it
Further improvements:
- optimize md pages lookups
Alexander Atanasov (50):
dm-ploop: md_pages map all pages at creation time
dm-ploop: Use READ_ONCE/WRITE_ONCE to access md page data
dm-ploop: fsync after all pios are sent
dm-ploop: move md status to use proper bitops
dm-ploop: convert wait_list and wb_batch_llist to use lockless lists
dm-ploop: convert enospc handling to use lockless lists
dm-ploop: convert suspended_pios list to use lockless list
dm-ploop: convert the rest of the lists to use llist variant
dm-ploop: combine processing of pios thru prepare list and remove
fsync worker
dm-ploop: move from wq to kthread
dm-ploop: move preparations of pios into the caller from worker
dm-ploop: fast path execution for reads
dm-ploop: do not use a wrapper for set_bit to make a page writeback
dm-ploop: BAT use only one list for writeback
dm-ploop: make md writeback timeout to be per page
dm-ploop: add interface to disable bat writeback delay
dm-ploop: convert wb_batch_list to lockless variant
dm-ploop: convert high_prio to status
dm-ploop: split cow processing into two functions
dm-ploop: convert md page rw lock to spin lock
dm-ploop: convert bat_rwlock to bat_lock spinlock
dm-ploop: prepare bat updates under bat_lock
dm-ploop: make ploop_bat_write_complete ready for parallel pio
completion
dm-ploop: make ploop_submit_metadata_writeback return number of
requests sent
dm-ploop: introduce pio runner threads
dm-ploop: add pio list ids to be used when passing pios to runners
dm-ploop: process pios via runners
dm-ploop: disable metadata writeback delay
dm-ploop: disable fast path
dm-ploop: use lockless lists for chained cow updates list
dm-ploop: use lockless lists for data ready pios
dm-ploop: give runner threads better name
dm-ploop: resize operation - add holes bitmap locking
dm-ploop: remove unnecessary operations
dm-ploop: use filp per thread
dm-ploop: catch if we try to advance pio past bio end
dm-ploop: support REQ_FUA for data pios
dm-ploop: proplerly access nr_bat_entries
dm-ploop: fix locking and improve error handling when submitting pios
dm-ploop: fix how ENOTBLK is handled
dm-ploop: sync when suspended or stopping
dm-ploop: rework bat completion logic
dm-ploop: rework logic in pio processing
dm-ploop: end fsync pios in parallel
dm-ploop: make filespace preallocations async
dm-ploop: resubmit enospc pios from dispatcher thread
dm-ploop: dm-ploop: simplify discard completion
dm-ploop: use GFP_ATOMIC instead of GFP_NOIO
dm-ploop: fix locks used in mixed context
dm-ploop: fix how current flags are managed inside threads
Andrey Zhadchenko (13):
dm-ploop: do not flush after metadata writes
dm-ploop: set IOCB_DSYNC on all FUA requests
dm-ploop: remove extra ploop_cluster_is_in_top_delta()
dm-ploop: introduce per-md page locking
dm-ploop: reduce BAT accesses on discard completion
dm-ploop: simplify llseek
dm-ploop: speed up ploop_prepare_bat_update()
dm-ploop: make new allocations immediately visible in BAT
dm-ploop: drop ploop_cluster_is_in_top_delta()
dm-ploop: do not wait for BAT update for non-FUA requests
dm-ploop: add delay for metadata writeback
dm-ploop: submit all postponed metadata on REQ_OP_FLUSH
dm-ploop: handle REQ_PREFLUSH
Feature: dm-ploop: ploop target driver
---
drivers/md/dm-ploop-map.c | 53 ++++++++++++++++++++++----------------------
drivers/md/dm-ploop-target.c | 1 -
drivers/md/dm-ploop.h | 1 -
3 files changed, 27 insertions(+), 28 deletions(-)
diff --git a/drivers/md/dm-ploop-map.c b/drivers/md/dm-ploop-map.c
index cc75b99908a2..bd80dac34073 100644
--- a/drivers/md/dm-ploop-map.c
+++ b/drivers/md/dm-ploop-map.c
@@ -560,7 +560,7 @@ static bool ploop_md_make_dirty(struct ploop *ploop, struct md_page *md)
write_lock_irqsave(&ploop->bat_rwlock, flags);
WARN_ON_ONCE(test_bit(MD_WRITEBACK, &md->status));
if (!test_and_set_bit(MD_DIRTY, &md->status)) {
- list_add(&md->wb_link, &ploop->wb_batch_list);
+ llist_add((struct llist_node *)&md->wb_link, &ploop->wb_batch_llist);
new = true;
}
write_unlock_irqrestore(&ploop->bat_rwlock, flags);
@@ -1843,36 +1843,36 @@ static void ploop_process_resubmit_pios(struct ploop *ploop,
static void ploop_submit_metadata_writeback(struct ploop *ploop)
{
LIST_HEAD(ll_skipped);
- struct md_page *md, *mtmp;
unsigned long timeout = jiffies;
+ struct md_page *md;
+ struct llist_node *pos, *t;
+ struct llist_node *ll_wb_batch;
+ /* Lock here to protect against md_inflight counting */
+ write_lock_irq(&ploop->bat_rwlock);
+ ll_wb_batch = llist_del_all(&ploop->wb_batch_llist);
+ write_unlock_irq(&ploop->bat_rwlock);
/*
* Pages are set dirty so no one must touch lists
* if new md entries are dirtied they are added at the start of the list
*/
- list_for_each_entry_safe(md, mtmp, &ploop->wb_batch_list, wb_link) {
- write_lock_irq(&ploop->bat_rwlock);
- list_del_init(&md->wb_link);
+ llist_for_each_safe(pos, t, ll_wb_batch) {
+ md = list_entry((struct list_head *)pos, typeof(*md), wb_link);
+ INIT_LIST_HEAD(&md->wb_link);
if (!llist_empty(&md->wait_llist) || md->high_prio ||
time_before(md->dirty_timeout, timeout) ||
ploop->force_md_writeback) {
/* L1L2 mustn't be redirtyed, when wb in-flight! */
WARN_ON_ONCE(!test_bit(MD_DIRTY, &md->status));
WARN_ON_ONCE(test_bit(MD_WRITEBACK, &md->status));
+ md->high_prio = false;
set_bit(MD_WRITEBACK, &md->status);
clear_bit(MD_DIRTY, &md->status);
- md->high_prio = false;
- write_unlock_irq(&ploop->bat_rwlock);
ploop_index_wb_submit(ploop, md->piwb);
} else {
- list_add_tail(&md->wb_link, &ll_skipped);
- write_unlock_irq(&ploop->bat_rwlock);
+ llist_add((struct llist_node *)&md->wb_link, &ploop->wb_batch_llist);
}
}
- write_lock_irq(&ploop->bat_rwlock);
- list_splice(&ll_skipped, &ploop->wb_batch_list);
- write_unlock_irq(&ploop->bat_rwlock);
-
}
static void process_ploop_fsync_work(struct ploop *ploop, struct llist_node *llflush_pios)
@@ -1998,18 +1998,19 @@ static void ploop_preflush_endio(struct pio *pio, void *orig_pio_ptr,
}
}
-static void ploop_prepare_flush(struct ploop *ploop, struct pio *pio)
+static int ploop_prepare_flush(struct ploop *ploop, struct pio *pio)
{
struct pio *flush_pio = pio;
- struct md_page *md, *n;
+ struct md_page *md;
int md_inflight = 0;
+ struct llist_node *pos, *t;
if (pio->bi_op & REQ_PREFLUSH && (pio->bi_op & REQ_OP_MASK) != REQ_OP_FLUSH) {
flush_pio = ploop_alloc_pio(ploop, GFP_NOIO);
if (!flush_pio) {
pio->bi_status = BLK_STS_RESOURCE;
ploop_pio_endio(pio);
- return;
+ return -1;
}
ploop_init_pio(ploop, REQ_OP_FLUSH, flush_pio);
@@ -2020,21 +2021,17 @@ static void ploop_prepare_flush(struct ploop *ploop, struct pio *pio)
}
write_lock_irq(&ploop->bat_rwlock);
-
- list_for_each_entry_safe(md, n, &ploop->wb_batch_list, wb_link) {
+ llist_for_each_safe(pos, t, ploop->wb_batch_llist.first) {
+ md = list_entry((struct list_head *)pos, typeof(*md), wb_link);
md_inflight++;
md->piwb->flush_pio = flush_pio;
md->high_prio = true;
}
atomic_set(&flush_pio->md_inflight, md_inflight);
-
write_unlock_irq(&ploop->bat_rwlock);
- if (md_inflight)
- ploop_schedule_work(ploop);
- else
- ploop_dispatch_pios(ploop, flush_pio, NULL);
+ return md_inflight;
}
static void ploop_submit_embedded_pio(struct ploop *ploop, struct pio *pio)
@@ -2060,8 +2057,12 @@ static void ploop_submit_embedded_pio(struct ploop *ploop, struct pio *pio)
ploop_inc_nr_inflight(ploop, pio);
if ((pio->bi_op & REQ_OP_MASK) == REQ_OP_FLUSH || pio->bi_op & REQ_PREFLUSH) {
- ploop_prepare_flush(ploop, pio);
- return;
+ ret = ploop_prepare_flush(ploop, pio);
+ if (ret < 0)
+ return;
+ if (ret > 0)
+ goto out;
+ /* Will add to prepare list and schedule work */
}
if (pio->queue_list_id == PLOOP_LIST_FLUSH) {
@@ -2103,7 +2104,7 @@ static void ploop_submit_embedded_pio(struct ploop *ploop, struct pio *pio)
&ploop->pios[PLOOP_LIST_DEFERRED]);
}
}
-
+out:
ploop_schedule_work(ploop);
}
diff --git a/drivers/md/dm-ploop-target.c b/drivers/md/dm-ploop-target.c
index 87827b90aeef..9a4a4dc57bb5 100644
--- a/drivers/md/dm-ploop-target.c
+++ b/drivers/md/dm-ploop-target.c
@@ -422,7 +422,6 @@ static int ploop_ctr(struct dm_target *ti, unsigned int argc, char **argv)
INIT_LIST_HEAD(&ploop->cluster_lk_list);
init_llist_head(&ploop->wb_batch_llist);
- INIT_LIST_HEAD(&ploop->wb_batch_list);
ploop->last_md_submit = 0;
ploop->md_submit_delay_ms = PLOOP_DEFAULT_METADATA_SUBMIT_DELAY;
diff --git a/drivers/md/dm-ploop.h b/drivers/md/dm-ploop.h
index b0cf01bb3003..91176b3d8b35 100644
--- a/drivers/md/dm-ploop.h
+++ b/drivers/md/dm-ploop.h
@@ -178,7 +178,6 @@ struct ploop {
rwlock_t bat_rwlock;
struct llist_head wb_batch_llist;
- struct list_head wb_batch_list;
ktime_t last_md_submit;
bool force_md_writeback;
More information about the Devel
mailing list