[Devel] [PATCH vz9 v2 36/65] dm-ploop: convert bat_rwlock to bat_lock spinlock
Konstantin Khorenko
khorenko at virtuozzo.com
Wed Feb 12 12:33:29 MSK 2025
From: Alexander Atanasov <alexander.atanasov at virtuozzo.com>
Prepare for threads. Convert rwlock to spin lock.
This patch converts existing rwlocks only.
In the next one lock is used where required.
https://virtuozzo.atlassian.net/browse/VSTOR-91821
Signed-off-by: Alexander Atanasov <alexander.atanasov at virtuozzo.com>
======
Patchset description:
ploop: optimistations and scalling
Ploop processes requsts in a different threads in parallel
where possible which results in significant improvement in
performance and makes further optimistations possible.
Known bugs:
- delayed metadata writeback is not working and is missing error handling
- patch to disable it until fixed
- fast path is not working - causes rcu lockups - patch to disable it
Further improvements:
- optimize md pages lookups
Alexander Atanasov (50):
dm-ploop: md_pages map all pages at creation time
dm-ploop: Use READ_ONCE/WRITE_ONCE to access md page data
dm-ploop: fsync after all pios are sent
dm-ploop: move md status to use proper bitops
dm-ploop: convert wait_list and wb_batch_llist to use lockless lists
dm-ploop: convert enospc handling to use lockless lists
dm-ploop: convert suspended_pios list to use lockless list
dm-ploop: convert the rest of the lists to use llist variant
dm-ploop: combine processing of pios thru prepare list and remove
fsync worker
dm-ploop: move from wq to kthread
dm-ploop: move preparations of pios into the caller from worker
dm-ploop: fast path execution for reads
dm-ploop: do not use a wrapper for set_bit to make a page writeback
dm-ploop: BAT use only one list for writeback
dm-ploop: make md writeback timeout to be per page
dm-ploop: add interface to disable bat writeback delay
dm-ploop: convert wb_batch_list to lockless variant
dm-ploop: convert high_prio to status
dm-ploop: split cow processing into two functions
dm-ploop: convert md page rw lock to spin lock
dm-ploop: convert bat_rwlock to bat_lock spinlock
dm-ploop: prepare bat updates under bat_lock
dm-ploop: make ploop_bat_write_complete ready for parallel pio
completion
dm-ploop: make ploop_submit_metadata_writeback return number of
requests sent
dm-ploop: introduce pio runner threads
dm-ploop: add pio list ids to be used when passing pios to runners
dm-ploop: process pios via runners
dm-ploop: disable metadata writeback delay
dm-ploop: disable fast path
dm-ploop: use lockless lists for chained cow updates list
dm-ploop: use lockless lists for data ready pios
dm-ploop: give runner threads better name
dm-ploop: resize operation - add holes bitmap locking
dm-ploop: remove unnecessary operations
dm-ploop: use filp per thread
dm-ploop: catch if we try to advance pio past bio end
dm-ploop: support REQ_FUA for data pios
dm-ploop: proplerly access nr_bat_entries
dm-ploop: fix locking and improve error handling when submitting pios
dm-ploop: fix how ENOTBLK is handled
dm-ploop: sync when suspended or stopping
dm-ploop: rework bat completion logic
dm-ploop: rework logic in pio processing
dm-ploop: end fsync pios in parallel
dm-ploop: make filespace preallocations async
dm-ploop: resubmit enospc pios from dispatcher thread
dm-ploop: dm-ploop: simplify discard completion
dm-ploop: use GFP_ATOMIC instead of GFP_NOIO
dm-ploop: fix locks used in mixed context
dm-ploop: fix how current flags are managed inside threads
Andrey Zhadchenko (13):
dm-ploop: do not flush after metadata writes
dm-ploop: set IOCB_DSYNC on all FUA requests
dm-ploop: remove extra ploop_cluster_is_in_top_delta()
dm-ploop: introduce per-md page locking
dm-ploop: reduce BAT accesses on discard completion
dm-ploop: simplify llseek
dm-ploop: speed up ploop_prepare_bat_update()
dm-ploop: make new allocations immediately visible in BAT
dm-ploop: drop ploop_cluster_is_in_top_delta()
dm-ploop: do not wait for BAT update for non-FUA requests
dm-ploop: add delay for metadata writeback
dm-ploop: submit all postponed metadata on REQ_OP_FLUSH
dm-ploop: handle REQ_PREFLUSH
Feature: dm-ploop: ploop target driver
---
drivers/md/dm-ploop-cmd.c | 8 ++++----
drivers/md/dm-ploop-map.c | 22 ++++++++++++----------
drivers/md/dm-ploop-target.c | 2 +-
drivers/md/dm-ploop.h | 2 +-
4 files changed, 18 insertions(+), 16 deletions(-)
diff --git a/drivers/md/dm-ploop-cmd.c b/drivers/md/dm-ploop-cmd.c
index fa9f8c9d8d40..dc0be65261f1 100644
--- a/drivers/md/dm-ploop-cmd.c
+++ b/drivers/md/dm-ploop-cmd.c
@@ -34,7 +34,7 @@ static void ploop_advance_holes_bitmap(struct ploop *ploop,
return;
cmd->resize.stage++;
- write_lock_irqsave(&ploop->bat_rwlock, flags);
+ spin_lock_irqsave(&ploop->bat_lock, flags);
/* Copy and swap holes_bitmap */
size = DIV_ROUND_UP(ploop->hb_nr, 8);
memcpy(cmd->resize.holes_bitmap, ploop->holes_bitmap, size);
@@ -59,7 +59,7 @@ static void ploop_advance_holes_bitmap(struct ploop *ploop,
}
spin_unlock(&md->md_lock);
}
- write_unlock_irqrestore(&ploop->bat_rwlock, flags);
+ spin_unlock_irqrestore(&ploop->bat_lock, flags);
}
static int ploop_wait_for_completion_maybe_killable(struct completion *comp,
@@ -451,7 +451,7 @@ static int ploop_process_resize_cmd(struct ploop *ploop, struct ploop_cmd *cmd)
/* Update header metadata */
ret = ploop_grow_update_header(ploop, cmd);
out:
- write_lock_irq(&ploop->bat_rwlock);
+ spin_lock_irq(&ploop->bat_lock);
if (ret) {
/* Cleanup: mark new BAT overages as free clusters */
dst_clu = cmd->resize.dst_clu - 1;
@@ -465,7 +465,7 @@ static int ploop_process_resize_cmd(struct ploop *ploop, struct ploop_cmd *cmd)
ploop_add_md_pages(ploop, &cmd->resize.md_pages_root);
swap(ploop->nr_bat_entries, cmd->resize.nr_bat_entries);
}
- write_unlock_irq(&ploop->bat_rwlock);
+ spin_unlock_irq(&ploop->bat_lock);
return ret;
}
diff --git a/drivers/md/dm-ploop-map.c b/drivers/md/dm-ploop-map.c
index bf7a24e074d0..1032b0e74d39 100644
--- a/drivers/md/dm-ploop-map.c
+++ b/drivers/md/dm-ploop-map.c
@@ -557,13 +557,13 @@ static bool ploop_md_make_dirty(struct ploop *ploop, struct md_page *md)
unsigned long flags;
bool new = false;
- write_lock_irqsave(&ploop->bat_rwlock, flags);
+ spin_lock_irqsave(&ploop->bat_lock, flags);
WARN_ON_ONCE(test_bit(MD_WRITEBACK, &md->status));
if (!test_and_set_bit(MD_DIRTY, &md->status)) {
llist_add((struct llist_node *)&md->wb_link, &ploop->wb_batch_llist);
new = true;
}
- write_unlock_irqrestore(&ploop->bat_rwlock, flags);
+ spin_unlock_irqrestore(&ploop->bat_lock, flags);
md->dirty_timeout = jiffies + ploop->md_submit_delay_ms*HZ/1000;
return new;
@@ -573,7 +573,7 @@ static void ploop_md_up_prio(struct ploop *ploop, struct md_page *md)
{
unsigned long flags;
- write_lock_irqsave(&ploop->bat_rwlock, flags);
+ spin_lock_irqsave(&ploop->bat_lock, flags);
if (test_bit(MD_WRITEBACK, &md->status))
goto out;
if (test_and_set_bit(MD_HIGHPRIO, &md->status))
@@ -581,7 +581,7 @@ static void ploop_md_up_prio(struct ploop *ploop, struct md_page *md)
WARN_ON_ONCE(!test_bit(MD_DIRTY, &md->status));
out:
- write_unlock_irqrestore(&ploop->bat_rwlock, flags);
+ spin_unlock_irqrestore(&ploop->bat_lock, flags);
}
void ploop_disable_writeback_delay(struct ploop *ploop)
@@ -732,9 +732,9 @@ static void ploop_complete_cow(struct ploop_cow *cow, blk_status_t bi_status)
ploop_del_cluster_lk(ploop, cow_pio);
if (dst_clu != BAT_ENTRY_NONE && bi_status != BLK_STS_OK) {
- read_lock_irqsave(&ploop->bat_rwlock, flags);
+ spin_lock_irqsave(&ploop->bat_lock, flags);
ploop_hole_set_bit(dst_clu, ploop);
- read_unlock_irqrestore(&ploop->bat_rwlock, flags);
+ spin_unlock_irqrestore(&ploop->bat_lock, flags);
}
ploop_queue_or_fail(ploop, blk_status_to_errno(bi_status), cow_pio);
@@ -1842,6 +1842,7 @@ static void ploop_process_resubmit_pios(struct ploop *ploop,
static void ploop_submit_metadata_writeback(struct ploop *ploop)
{
+ unsigned long flags;
LIST_HEAD(ll_skipped);
unsigned long timeout = jiffies;
struct md_page *md;
@@ -1849,9 +1850,9 @@ static void ploop_submit_metadata_writeback(struct ploop *ploop)
struct llist_node *ll_wb_batch;
/* Lock here to protect against md_inflight counting */
- write_lock_irq(&ploop->bat_rwlock);
+ spin_lock_irqsave(&ploop->bat_lock, flags);
ll_wb_batch = llist_del_all(&ploop->wb_batch_llist);
- write_unlock_irq(&ploop->bat_rwlock);
+ spin_unlock_irqrestore(&ploop->bat_lock, flags);
/*
* Pages are set dirty so no one must touch lists
* if new md entries are dirtied they are added at the start of the list
@@ -2005,6 +2006,7 @@ static int ploop_prepare_flush(struct ploop *ploop, struct pio *pio)
struct md_page *md;
int md_inflight = 0;
struct llist_node *pos, *t;
+ unsigned long flags;
if (pio->bi_op & REQ_PREFLUSH && (pio->bi_op & REQ_OP_MASK) != REQ_OP_FLUSH) {
flush_pio = ploop_alloc_pio(ploop, GFP_NOIO);
@@ -2021,7 +2023,7 @@ static int ploop_prepare_flush(struct ploop *ploop, struct pio *pio)
flush_pio->free_on_endio = true;
}
- write_lock_irq(&ploop->bat_rwlock);
+ spin_lock_irqsave(&ploop->bat_lock, flags);
llist_for_each_safe(pos, t, ploop->wb_batch_llist.first) {
md = list_entry((struct list_head *)pos, typeof(*md), wb_link);
md_inflight++;
@@ -2030,7 +2032,7 @@ static int ploop_prepare_flush(struct ploop *ploop, struct pio *pio)
}
atomic_set(&flush_pio->md_inflight, md_inflight);
- write_unlock_irq(&ploop->bat_rwlock);
+ spin_unlock_irqrestore(&ploop->bat_lock, flags);
return md_inflight;
}
diff --git a/drivers/md/dm-ploop-target.c b/drivers/md/dm-ploop-target.c
index 9a4a4dc57bb5..dc63c18cece8 100644
--- a/drivers/md/dm-ploop-target.c
+++ b/drivers/md/dm-ploop-target.c
@@ -405,7 +405,7 @@ static int ploop_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto err;
}
- rwlock_init(&ploop->bat_rwlock);
+ spin_lock_init(&ploop->bat_lock);
spin_lock_init(&ploop->err_status_lock);
init_rwsem(&ploop->ctl_rwsem);
init_waitqueue_head(&ploop->service_wq);
diff --git a/drivers/md/dm-ploop.h b/drivers/md/dm-ploop.h
index 3dcf37c51b2a..10c8cf2e154a 100644
--- a/drivers/md/dm-ploop.h
+++ b/drivers/md/dm-ploop.h
@@ -174,7 +174,7 @@ struct ploop {
*/
void *holes_bitmap; /* Clearing a bit occurs from kwork only */
u32 hb_nr; /* holes_bitmap size in bits */
- rwlock_t bat_rwlock;
+ spinlock_t bat_lock;
struct llist_head wb_batch_llist;
--
2.43.5
More information about the Devel
mailing list