[Devel] [PATCH RHEL9 COMMIT] dm-ploop: end fsync pios in parallel

Konstantin Khorenko khorenko at virtuozzo.com
Mon Jan 27 16:12:54 MSK 2025


The commit is pushed to "branch-rh9-5.14.0-427.44.1.vz9.80.x-ovz" and will appear at git at bitbucket.org:openvz/vzkernel.git
after rh9-5.14.0-427.44.1.vz9.80.6
------>
commit f68f8ecb535c9b780e85d1f9c6f0e6b74bb38c0e
Author: Alexander Atanasov <alexander.atanasov at virtuozzo.com>
Date:   Fri Jan 24 17:36:31 2025 +0200

    dm-ploop: end fsync pios in parallel
    
    Send all pios attached to sync operation to runners
    to call endio.
    
    https://virtuozzo.atlassian.net/browse/VSTOR-91821
    Signed-off-by: Alexander Atanasov <alexander.atanasov at virtuozzo.com>
    
    ======
    Patchset description:
    ploop: optimistations and scalling
    
    Ploop processes requsts in a different threads in parallel
    where possible which results in significant improvement in
    performance and makes further optimistations possible.
    
    Known bugs:
      - delayed metadata writeback is not working and is missing error handling
         - patch to disable it until fixed
      - fast path is not working - causes rcu lockups - patch to disable it
    
    Further improvements:
      - optimize md pages lookups
    
    Alexander Atanasov (50):
      dm-ploop: md_pages map all pages at creation time
      dm-ploop: Use READ_ONCE/WRITE_ONCE to access md page data
      dm-ploop: fsync after all pios are sent
      dm-ploop: move md status to use proper bitops
      dm-ploop: convert wait_list and wb_batch_llist to use lockless lists
      dm-ploop: convert enospc handling to use lockless lists
      dm-ploop: convert suspended_pios list to use lockless list
      dm-ploop: convert the rest of the lists to use llist variant
      dm-ploop: combine processing of pios thru prepare list and remove
        fsync worker
      dm-ploop: move from wq to kthread
      dm-ploop: move preparations of pios into the caller from worker
      dm-ploop: fast path execution for reads
      dm-ploop: do not use a wrapper for set_bit to make a page writeback
      dm-ploop: BAT use only one list for writeback
      dm-ploop: make md writeback timeout to be per page
      dm-ploop: add interface to disable bat writeback delay
      dm-ploop: convert wb_batch_list to lockless variant
      dm-ploop: convert high_prio to status
      dm-ploop: split cow processing into two functions
      dm-ploop: convert md page rw lock to spin lock
      dm-ploop: convert bat_rwlock to bat_lock spinlock
      dm-ploop: prepare bat updates under bat_lock
      dm-ploop: make ploop_bat_write_complete ready for parallel pio
        completion
      dm-ploop: make ploop_submit_metadata_writeback return number of
        requests sent
      dm-ploop: introduce pio runner threads
      dm-ploop: add pio list ids to be used when passing pios to runners
      dm-ploop: process pios via runners
      dm-ploop: disable metadata writeback delay
      dm-ploop: disable fast path
      dm-ploop: use lockless lists for chained cow updates list
      dm-ploop: use lockless lists for data ready pios
      dm-ploop: give runner threads better name
      dm-ploop: resize operation - add holes bitmap locking
      dm-ploop: remove unnecessary operations
      dm-ploop: use filp per thread
      dm-ploop: catch if we try to advance pio past bio end
      dm-ploop: support REQ_FUA for data pios
      dm-ploop: proplerly access nr_bat_entries
      dm-ploop: fix locking and improve error handling when submitting pios
      dm-ploop: fix how ENOTBLK is handled
      dm-ploop: sync when suspended or stopping
      dm-ploop: rework bat completion logic
      dm-ploop: rework logic in pio processing
      dm-ploop: end fsync pios in parallel
      dm-ploop: make filespace preallocations async
      dm-ploop: resubmit enospc pios from dispatcher thread
      dm-ploop: dm-ploop: simplify discard completion
      dm-ploop: use GFP_ATOMIC instead of GFP_NOIO
      dm-ploop: fix locks used in mixed context
      dm-ploop: fix how current flags are managed inside threads
    
    Andrey Zhadchenko (13):
      dm-ploop: do not flush after metadata writes
      dm-ploop: set IOCB_DSYNC on all FUA requests
      dm-ploop: remove extra ploop_cluster_is_in_top_delta()
      dm-ploop: introduce per-md page locking
      dm-ploop: reduce BAT accesses on discard completion
      dm-ploop: simplify llseek
      dm-ploop: speed up ploop_prepare_bat_update()
      dm-ploop: make new allocations immediately visible in BAT
      dm-ploop: drop ploop_cluster_is_in_top_delta()
      dm-ploop: do not wait for BAT update for non-FUA requests
      dm-ploop: add delay for metadata writeback
      dm-ploop: submit all postponed metadata on REQ_OP_FLUSH
      dm-ploop: handle REQ_PREFLUSH
    
    Feature: dm-ploop: ploop target driver
---
 drivers/md/dm-ploop-map.c    | 18 ++++++++++++------
 drivers/md/dm-ploop-target.c |  1 +
 drivers/md/dm-ploop.h        |  2 ++
 3 files changed, 15 insertions(+), 6 deletions(-)

diff --git a/drivers/md/dm-ploop-map.c b/drivers/md/dm-ploop-map.c
index f9904e708c35..cb25255e5bf4 100644
--- a/drivers/md/dm-ploop-map.c
+++ b/drivers/md/dm-ploop-map.c
@@ -2006,7 +2006,10 @@ static int process_ploop_fsync_work(struct ploop *ploop, struct llist_node *llfl
 			if (static_branch_unlikely(&ploop_standby_check))
 				ploop_check_standby_mode(ploop, ret);
 		}
-		ploop_pio_endio(pio);
+		pio->queue_list_id = PLOOP_LIST_FLUSHPIO;
+		atomic_inc(&ploop->kt_worker->fsync_pios);
+		ploop_runners_add_work(ploop, pio);
+		npios++;
 	}
 	return npios;
 }
@@ -2105,14 +2108,12 @@ void do_ploop_run_work(struct ploop *ploop)
 		/* Now process fsync pios after we have done all other */
 		npios = process_ploop_fsync_work(ploop, llflush_pios);
 		/* Since dispatcher is single thread no other work can be queued */
-#ifdef USE_RUNNERS__NOT_READY
 		if (npios) {
 			current->flags = old_flags;
 			wait_event_interruptible(ploop->dispatcher_wq_fsync,
-						 atomic_read(&wrkr->fsync_pios) != 0);
+					!atomic_read(&ploop->kt_worker->fsync_pios));
 			current->flags |= PF_IO_THREAD|PF_LOCAL_THROTTLE|PF_MEMALLOC_NOIO;
 		}
-#endif
 	}
 
 	current->flags = old_flags;
@@ -2134,6 +2135,7 @@ int ploop_pio_runner(void *data)
 	struct llist_node *pos, *t;
 	unsigned int old_flags = current->flags;
 	int did_process_pios = 0;
+	int did_process_fsync = 0;
 
 	for (;;) {
 		current->flags = old_flags;
@@ -2146,6 +2148,10 @@ int ploop_pio_runner(void *data)
 				did_process_pios = 0;
 				wake_up_interruptible(&ploop->dispatcher_wq_data);
 			}
+			if (did_process_fsync) {
+				did_process_fsync = 0;
+				wake_up_interruptible(&ploop->dispatcher_wq_fsync);
+			}
 			/* Only stop when there is no more pios */
 			if (kthread_should_stop()) {
 				__set_current_state(TASK_RUNNING);
@@ -2170,9 +2176,9 @@ int ploop_pio_runner(void *data)
 				WARN_ON_ONCE(1);	/* We must not see prepares here */
 				break;
 			case PLOOP_LIST_FLUSHPIO:
-				// fsync pios can come here for endio
-				// XXX: make it a FSYNC list
+				did_process_fsync++;
 				ploop_pio_endio(pio);
+				atomic_dec(&ploop->kt_worker->fsync_pios);
 				break;
 			case PLOOP_LIST_DEFERRED:
 				ploop_process_one_deferred_bio(ploop, pio);
diff --git a/drivers/md/dm-ploop-target.c b/drivers/md/dm-ploop-target.c
index 8b0d2b7ae85b..61190f2f7eae 100644
--- a/drivers/md/dm-ploop-target.c
+++ b/drivers/md/dm-ploop-target.c
@@ -558,6 +558,7 @@ static int ploop_ctr(struct dm_target *ti, unsigned int argc, char **argv)
 
 
 	init_waitqueue_head(&ploop->dispatcher_wq_data);
+	init_waitqueue_head(&ploop->dispatcher_wq_fsync);
 
 	ploop->kt_worker = ploop_worker_create(ploop, ploop_worker, "d", 0);
 	if (!ploop->kt_worker)
diff --git a/drivers/md/dm-ploop.h b/drivers/md/dm-ploop.h
index 11e50ab1293b..7dbe8819acf7 100644
--- a/drivers/md/dm-ploop.h
+++ b/drivers/md/dm-ploop.h
@@ -153,11 +153,13 @@ struct ploop_worker {
 	struct llist_head	work_llist;
 	unsigned int		runner_id;
 	atomic_t		inflight_pios;
+	atomic_t		fsync_pios;
 	struct ploop_worker	*next;
 };
 
 struct ploop {
 	struct wait_queue_head dispatcher_wq_data;
+	struct wait_queue_head dispatcher_wq_fsync;
 	struct dm_target *ti;
 #define PLOOP_PRQ_POOL_SIZE 512 /* Twice nr_requests from blk_mq_init_sched() */
 	mempool_t *prq_pool;


More information about the Devel mailing list