[Devel] [RFC PATCH vz9 v6 33/62] dm-ploop: convert wb_batch_list to lockless variant
Alexander Atanasov
alexander.atanasov at virtuozzo.com
Mon Dec 23 17:00:09 MSK 2024
On 13.12.24 14:51, Andrey Zhadchenko wrote:
> This should be merged with other writeback patches
I prefer to keep list conversion patches separate - they are doing a
conversion that is supposed to keep results the same and not change
anything. And is a separate change that will allow us to track if
something is off.
>
> On 12/5/24 22:56, Alexander Atanasov wrote:
>> Merging required to back this change, so do it again.
>>
>> Signed-off-by: Alexander Atanasov <alexander.atanasov at virtuozzo.com>
>> ---
>> drivers/md/dm-ploop-map.c | 49 ++++++++++++++++++------------------
>> drivers/md/dm-ploop-target.c | 1 -
>> drivers/md/dm-ploop.h | 1 -
>> 3 files changed, 25 insertions(+), 26 deletions(-)
>>
>> diff --git a/drivers/md/dm-ploop-map.c b/drivers/md/dm-ploop-map.c
>> index ad4fb84e913f..722a1fbf499e 100644
>> --- a/drivers/md/dm-ploop-map.c
>> +++ b/drivers/md/dm-ploop-map.c
>> @@ -559,7 +559,7 @@ static bool ploop_md_make_dirty(struct ploop
>> *ploop, struct md_page *md)
>> WARN_ON_ONCE(test_bit(MD_WRITEBACK, &md->status));
>> md->dirty_time = ktime_get();
>> if (!test_and_set_bit(MD_DIRTY, &md->status)) {
>> - list_add(&md->wb_link, &ploop->wb_batch_list);
>> + llist_add((struct llist_node *)&md->wb_link,
>> &ploop->wb_batch_llist);
>> new = true;
>> }
>> write_unlock_irqrestore(&ploop->bat_rwlock, flags);
>> @@ -1829,36 +1829,36 @@ static void
>> ploop_submit_metadata_writeback(struct ploop *ploop)
>> {
>> ktime_t ktime, ktimeout;
>> LIST_HEAD(ll_skipped);
>> - struct md_page *md, *mtmp;
>> + struct md_page *md;
>> + struct llist_node *pos, *t;
>> + struct llist_node *ll_wb_batch;
>> ktime = ktime_get();
>> ktimeout = ktime_add_ms(ktime, ploop->md_submit_delay_ms);
>> + /* Lock here to protect against md_inflight counting */
>> + write_lock_irq(&ploop->bat_rwlock);
>> + ll_wb_batch = llist_del_all(&ploop->wb_batch_llist);
>> + write_unlock_irq(&ploop->bat_rwlock);
>> /*
>> * Pages are set dirty so no one must touch lists
>> * if new md entries are dirtied they are added at the start of
>> the list
>> */
>> - list_for_each_entry_safe(md, mtmp, &ploop->wb_batch_list, wb_link) {
>> - write_lock_irq(&ploop->bat_rwlock);
>> - list_del_init(&md->wb_link);
>> + llist_for_each_safe(pos, t, ll_wb_batch) {
>> + md = list_entry((struct list_head *)pos, typeof(*md), wb_link);
>> + INIT_LIST_HEAD(&md->wb_link);
>> if (md->high_prio || ktime_after(md->dirty_time, ktimeout)
>> || ploop->force_md_writeback) {
>> /* L1L2 mustn't be redirtyed, when wb in-flight! */
>> WARN_ON_ONCE(!test_bit(MD_DIRTY, &md->status));
>> WARN_ON_ONCE(test_bit(MD_WRITEBACK, &md->status));
>> + md->high_prio = false;
>> set_bit(MD_WRITEBACK, &md->status);
>> clear_bit(MD_DIRTY, &md->status);
>> - md->high_prio = false;
>> - write_unlock_irq(&ploop->bat_rwlock);
>> ploop_index_wb_submit(ploop, md->piwb);
>> } else {
>> - list_add_tail(&md->wb_link, &ll_skipped);
>> - write_unlock_irq(&ploop->bat_rwlock);
>> + llist_add((struct llist_node *)&md->wb_link,
>> &ploop->wb_batch_llist);
>> }
>> }
>> - write_lock_irq(&ploop->bat_rwlock);
>> - list_splice(&ll_skipped, &ploop->wb_batch_list);
>> - write_unlock_irq(&ploop->bat_rwlock);
>> -
>> }
>> static void process_ploop_fsync_work(struct ploop *ploop, struct
>> llist_node *llflush_pios)
>> @@ -1991,18 +1991,19 @@ static void ploop_preflush_endio(struct pio
>> *pio, void *orig_pio_ptr,
>> }
>> }
>> -static void ploop_prepare_flush(struct ploop *ploop, struct pio *pio)
>> +static int ploop_prepare_flush(struct ploop *ploop, struct pio *pio)
>> {
>> struct pio *flush_pio = pio;
>> struct md_page *md, *n;
>> int md_inflight = 0;
>> + struct llist_node *pos, *t;
>> if (pio->bi_op & REQ_PREFLUSH && (pio->bi_op & REQ_OP_MASK) !=
>> REQ_OP_FLUSH) {
>> flush_pio = ploop_alloc_pio(ploop, GFP_NOIO);
>> if (!flush_pio) {
>> pio->bi_status = BLK_STS_RESOURCE;
>> ploop_pio_endio(pio);
>> - return;
>> + return -1;
>> }
>> ploop_init_pio(ploop, REQ_OP_FLUSH, flush_pio);
>> @@ -2013,21 +2014,17 @@ static void ploop_prepare_flush(struct ploop
>> *ploop, struct pio *pio)
>> }
>> write_lock_irq(&ploop->bat_rwlock);
>> -
>> - list_for_each_entry_safe(md, n, &ploop->wb_batch_list, wb_link) {
>> + llist_for_each_safe(pos, t, ploop->wb_batch_llist.first) {
>> + md = list_entry((struct list_head *)pos, typeof(*md), wb_link);
>> md_inflight++;
>> md->piwb->flush_pio = flush_pio;
>> md->high_prio = true;
>> }
>> atomic_set(&flush_pio->md_inflight, md_inflight);
>> -
>> write_unlock_irq(&ploop->bat_rwlock);
>> - if (md_inflight)
>> - ploop_schedule_work(ploop);
>> - else
>> - ploop_dispatch_pios(ploop, flush_pio, NULL);
>> + return md_inflight;
>> }
>> static void ploop_submit_embedded_pio(struct ploop *ploop, struct
>> pio *pio)
>> @@ -2056,8 +2053,12 @@ static void ploop_submit_embedded_pio(struct
>> ploop *ploop, struct pio *pio)
>> ploop_inc_nr_inflight(ploop, pio);
>> if ((pio->bi_op & REQ_OP_MASK) == REQ_OP_FLUSH || pio->bi_op &
>> REQ_PREFLUSH) {
>> - ploop_prepare_flush(ploop, pio);
>> - return;
>> + ret = ploop_prepare_flush(ploop, pio);
>> + if (ret < 0)
>> + return;
>> + if (ret > 0)
>> + goto out;
>> + /* Will add to prepare list and schedule work */
>> }
>> if (pio->queue_list_id == PLOOP_LIST_FLUSH) {
>> diff --git a/drivers/md/dm-ploop-target.c b/drivers/md/dm-ploop-target.c
>> index 0b691ff031e2..6c60043c0487 100644
>> --- a/drivers/md/dm-ploop-target.c
>> +++ b/drivers/md/dm-ploop-target.c
>> @@ -437,7 +437,6 @@ static int ploop_ctr(struct dm_target *ti,
>> unsigned int argc, char **argv)
>> INIT_LIST_HEAD(&ploop->cluster_lk_list);
>> init_llist_head(&ploop->wb_batch_llist);
>> - INIT_LIST_HEAD(&ploop->wb_batch_list);
>> ploop->last_md_submit = 0;
>> ploop->md_submit_delay_ms = PLOOP_DEFAULT_METADATA_SUBMIT_DELAY;
>> diff --git a/drivers/md/dm-ploop.h b/drivers/md/dm-ploop.h
>> index cdc1f9709a34..1ecd51f2ac3a 100644
>> --- a/drivers/md/dm-ploop.h
>> +++ b/drivers/md/dm-ploop.h
>> @@ -179,7 +179,6 @@ struct ploop {
>> rwlock_t bat_rwlock;
>> struct llist_head wb_batch_llist;
>> - struct list_head wb_batch_list;
>> ktime_t last_md_submit;
>> bool force_md_writeback;
--
Regards,
Alexander Atanasov
More information about the Devel
mailing list