[Devel] [RFC PATCH vz9 v2 01/11] ploop: md_pages map all pages at creation time

Alexander Atanasov alexander.atanasov at virtuozzo.com
Mon Oct 14 11:51:06 MSK 2024


md_page is always present in memory. In that case
md_page->page could be always be mapped and we would not need to perform
kmap_atomic/kunmap_atomic during each lookup

https://virtuozzo.atlassian.net/browse/VSTOR-91659
Suggested-by: Denis V. Lunev <den at openvz.org>
Signed-off-by: Alexander Atanasov <alexander.atanasov at virtuozzo.com>
---
 drivers/md/dm-ploop-bat.c | 21 ++++++++-------------
 drivers/md/dm-ploop-cmd.c | 33 ++++++++++++---------------------
 drivers/md/dm-ploop-map.c | 35 +++++++++++++----------------------
 drivers/md/dm-ploop.h     | 14 +++++++-------
 4 files changed, 40 insertions(+), 63 deletions(-)

diff --git a/drivers/md/dm-ploop-bat.c b/drivers/md/dm-ploop-bat.c
index cbe48e376097..f28bd5d7b322 100644
--- a/drivers/md/dm-ploop-bat.c
+++ b/drivers/md/dm-ploop-bat.c
@@ -87,6 +87,7 @@ static struct md_page *ploop_alloc_md_page(u32 id)
 	md->bat_levels = levels;
 	md->piwb = NULL;
 	md->page = page;
+	md->kmpage = kmap(page);
 	md->id = id;
 	return md;
 err_page:
@@ -99,6 +100,7 @@ ALLOW_ERROR_INJECTION(ploop_alloc_md_page, NULL);
 
 void ploop_free_md_page(struct md_page *md)
 {
+	kunmap(md->page);
 	put_page(md->page);
 	kfree(md->bat_levels);
 	kfree(md);
@@ -120,9 +122,8 @@ int ploop_prealloc_md_pages(struct rb_root *root, u32 nr_bat_entries,
 		md = ploop_alloc_md_page(i);
 		if (!md)
 			return -ENOMEM;
-		addr = kmap_atomic(md->page);
+		addr = md->kmpage;
 		memset32(addr, BAT_ENTRY_NONE, PAGE_SIZE / 4);
-		kunmap_atomic(addr);
 
 		__md_page_insert(root, md);
 	}
@@ -143,9 +144,8 @@ bool ploop_try_update_bat_entry(struct ploop *ploop, u32 clu, u8 level, u32 dst_
 	clu = ploop_bat_clu_idx_in_page(clu); /* relative offset */
 
 	if (md->bat_levels[clu] == level) {
-		bat_entries = kmap_atomic(md->page);
+		bat_entries = md->kmpage;
 		bat_entries[clu] = dst_clu;
-		kunmap_atomic(bat_entries);
 		return true;
 	}
 	return false;
@@ -245,7 +245,7 @@ static int ploop_delta_check_header(struct ploop *ploop,
 
 	WARN_ON_ONCE(md0->id != 0);
 
-	d_hdr = kmap(md0->page);
+	d_hdr = md0->kmpage;
 	if (memcmp(d_hdr->m_Sig, ploop->m_Sig, sizeof(d_hdr->m_Sig)) ||
 	    d_hdr->m_Sectors != ploop->m_Sectors ||
 	    d_hdr->m_Type != ploop->m_Type)
@@ -263,7 +263,6 @@ static int ploop_delta_check_header(struct ploop *ploop,
 	*delta_nr_be_ret = delta_nr_be;
 	ret = 0;
 out:
-	kunmap(md0->page);
 	return ret;
 }
 ALLOW_ERROR_INJECTION(ploop_delta_check_header, ERRNO);
@@ -282,7 +281,7 @@ static int ploop_convert_bat_entries(struct ploop *ploop, struct rb_root *md_roo
 
 	page_id = 0;
 	rb_root_for_each_md_page(md_root, md, node) {
-		bat_entries = kmap(md->page);
+		bat_entries = md->kmpage;
 		init_be_iter(nr_be, md->id, &i, &end);
 		WARN_ON_ONCE(page_id != md->id);
 		page_id++;
@@ -295,7 +294,6 @@ static int ploop_convert_bat_entries(struct ploop *ploop, struct rb_root *md_roo
 			if (bat_entries[i] < bat_clusters)
 				ret = -EXDEV;
 		}
-		kunmap(md->page);
 
 		if (ret || page_id == nr_pages)
 			break;
@@ -416,9 +414,9 @@ static void ploop_apply_delta_mappings(struct ploop *ploop,
 
 	write_lock_irq(&ploop->bat_rwlock);
 	ploop_for_each_md_page(ploop, md, node) {
-		bat_entries = kmap_atomic(md->page);
+		bat_entries = md->kmpage;
 		if (!is_raw)
-			d_bat_entries = kmap_atomic(d_md->page);
+			d_bat_entries = d_md->kmpage;
 
 		if (is_top_level && md->id == 0 && !is_raw) {
 			/* bat_entries before PLOOP_MAP_OFFSET is hdr */
@@ -453,9 +451,6 @@ static void ploop_apply_delta_mappings(struct ploop *ploop,
 				ploop_set_not_hole(ploop, bat_entries[i]);
 		}
 
-		kunmap_atomic(bat_entries);
-		if (!is_raw)
-			kunmap_atomic(d_bat_entries);
 		if (stop)
 			break;
 		if (!is_raw)
diff --git a/drivers/md/dm-ploop-cmd.c b/drivers/md/dm-ploop-cmd.c
index 88519f590148..b1b77e4546c4 100644
--- a/drivers/md/dm-ploop-cmd.c
+++ b/drivers/md/dm-ploop-cmd.c
@@ -43,7 +43,7 @@ static void ploop_advance_holes_bitmap(struct ploop *ploop,
 	swap(cmd->resize.hb_nr, ploop->hb_nr);
 	ploop_for_each_md_page(ploop, md, node) {
 		ploop_init_be_iter(ploop, md->id, &i, &end);
-		bat_entries = kmap_atomic(md->page);
+		bat_entries = md->kmpage;
 		for (; i <= end; i++) {
 			if (!ploop_md_page_cluster_is_in_top_delta(ploop, md, i))
 				continue;
@@ -54,7 +54,6 @@ static void ploop_advance_holes_bitmap(struct ploop *ploop,
 				ploop_hole_clear_bit(dst_clu, ploop);
 			}
 		}
-		kunmap_atomic(bat_entries);
 	}
 	write_unlock_irq(&ploop->bat_rwlock);
 }
@@ -161,7 +160,7 @@ static u32 ploop_find_bat_entry(struct ploop *ploop, u32 dst_clu, bool *is_locke
 	read_lock_irq(&ploop->bat_rwlock);
 	ploop_for_each_md_page(ploop, md, node) {
 		ploop_init_be_iter(ploop, md->id, &i, &end);
-		bat_entries = kmap_atomic(md->page);
+		bat_entries = md->kmpage;
 		for (; i <= end; i++) {
 			if (bat_entries[i] != dst_clu)
 				continue;
@@ -170,7 +169,6 @@ static u32 ploop_find_bat_entry(struct ploop *ploop, u32 dst_clu, bool *is_locke
 				break;
 			}
 		}
-		kunmap_atomic(bat_entries);
 		if (clu != UINT_MAX)
 			break;
 	}
@@ -265,9 +263,9 @@ static int ploop_write_zero_cluster_sync(struct ploop *ploop,
 	int i;
 
 	for (i = 0; i < pio->bi_vcnt; i++) {
-		data = kmap_atomic(pio->bi_io_vec[i].bv_page);
+		data = kmap(pio->bi_io_vec[i].bv_page);
 		memset(data, 0, PAGE_SIZE);
-		kunmap_atomic(data);
+		kunmap(data);
 	}
 
 	return ploop_write_cluster_sync(ploop, pio, clu);
@@ -390,12 +388,11 @@ static int ploop_grow_update_header(struct ploop *ploop,
 	clus = DIV_ROUND_UP(size, CLU_SIZE(ploop));
 	first_block_off = CLU_TO_SEC(ploop, clus);
 
-	hdr = kmap_atomic(piwb->bat_page);
+	hdr = piwb->kmpage;
 	/* TODO: head and cylinders */
 	nr_be = hdr->m_Size = cpu_to_le32(cmd->resize.nr_bat_entries);
 	sectors = hdr->m_SizeInSectors_v2 = cpu_to_le64(cmd->resize.new_sectors);
 	offset = hdr->m_FirstBlockOffset = cpu_to_le32(first_block_off);
-	kunmap_atomic(hdr);
 
 	ploop_make_md_wb(ploop, md);
 	init_completion(&comp);
@@ -407,11 +404,10 @@ static int ploop_grow_update_header(struct ploop *ploop,
 	ret = blk_status_to_errno(bi_status);
 	if (!ret) {
 		/* Now update our cached page */
-		hdr = kmap_atomic(cmd->resize.md0->page);
+		hdr = cmd->resize.md0->kmpage;
 		hdr->m_Size = nr_be;
 		hdr->m_SizeInSectors_v2 = sectors;
 		hdr->m_FirstBlockOffset = offset;
-		kunmap_atomic(hdr);
 	} else {
 		PL_ERR("Failed to update hdr: %d", ret);
 	}
@@ -545,9 +541,8 @@ static int ploop_resize(struct ploop *ploop, sector_t new_sectors)
 	md0 = ploop_md_page_find(ploop, 0);
 	if (WARN_ON(!md0))
 		return -EIO;
-	hdr = kmap(md0->page);
+	hdr = md0->kmpage;
 	old_sectors = le64_to_cpu(hdr->m_SizeInSectors_v2);
-	kunmap(md0->page);
 
 	if (old_sectors == new_sectors)
 		return 0;
@@ -731,8 +726,8 @@ static void notify_delta_merged(struct ploop *ploop, u8 level,
 	write_lock_irq(&ploop->bat_rwlock);
 	ploop_for_each_md_page(ploop, md, node) {
 		init_be_iter(nr_be, md->id, &i, &end);
-		bat_entries = kmap_atomic(md->page);
-		d_bat_entries = kmap_atomic(d_md->page);
+		bat_entries = md->kmpage;
+		d_bat_entries = d_md->kmpage;
 		for (; i <= end; i++) {
 			clu = ploop_page_clu_idx_to_bat_clu(md->id, i);
 			if (clu == nr_be - 1)
@@ -759,14 +754,12 @@ static void notify_delta_merged(struct ploop *ploop, u8 level,
 			 * 1)next delta (which became renumbered) or
 			 * 2)prev delta (if !@forward).
 			 */
-			bat_entries[i] = d_bat_entries[i];
+			WRITE_ONCE(bat_entries[i], READ_ONCE(d_bat_entries[i]));
 			if (!forward)
 				md->bat_levels[i] = level - 1;
 			else
 				md->bat_levels[i] = level;
 		}
-		kunmap_atomic(bat_entries);
-		kunmap_atomic(d_bat_entries);
 		if (stop)
 			break;
 		d_md = ploop_md_next_entry(d_md);
@@ -989,7 +982,7 @@ static int process_flip_upper_deltas(struct ploop *ploop)
 	/* Flip bat entries */
 	ploop_for_each_md_page(ploop, md, node) {
 		ploop_init_be_iter(ploop, md->id, &i, &end);
-		bat_entries = kmap_atomic(md->page);
+		bat_entries = md->kmpage;
 		for (; i <= end; i++) {
 			if (bat_entries[i] == BAT_ENTRY_NONE)
 				continue;
@@ -1000,7 +993,6 @@ static int process_flip_upper_deltas(struct ploop *ploop)
 				md->bat_levels[i] = level;
 			}
 		}
-		kunmap_atomic(bat_entries);
 	}
 
 	/* FIXME */
@@ -1057,7 +1049,7 @@ static int ploop_check_delta_before_flip(struct ploop *ploop, struct file *file)
 	write_lock_irq(&ploop->bat_rwlock);
 	ploop_for_each_md_page(ploop, md, node) {
 		init_be_iter(nr_be, md->id, &i, &end);
-		d_bat_entries = kmap_atomic(d_md->page);
+		d_bat_entries = d_md->kmpage;
 		for (; i <= end; i++) {
 			if (ploop_md_page_cluster_is_in_top_delta(ploop, md, i) &&
 			    d_bat_entries[i] != BAT_ENTRY_NONE) {
@@ -1073,7 +1065,6 @@ static int ploop_check_delta_before_flip(struct ploop *ploop, struct file *file)
 			goto unmap;
 		}
 unmap:
-		kunmap_atomic(d_bat_entries);
 		if (stop)
 			break;
 		d_md = ploop_md_next_entry(d_md);
diff --git a/drivers/md/dm-ploop-map.c b/drivers/md/dm-ploop-map.c
index 0809867ebd3a..ca2659ba30f5 100644
--- a/drivers/md/dm-ploop-map.c
+++ b/drivers/md/dm-ploop-map.c
@@ -59,6 +59,7 @@ void ploop_index_wb_init(struct ploop_index_wb *piwb, struct ploop *ploop)
 	spin_lock_init(&piwb->lock);
 	piwb->md = NULL;
 	piwb->bat_page = NULL;
+	piwb->kmpage = NULL;
 	piwb->bi_status = 0;
 	INIT_LIST_HEAD(&piwb->ready_data_pios);
 	INIT_LIST_HEAD(&piwb->cow_list);
@@ -722,11 +723,10 @@ static void ploop_release_cluster(struct ploop *ploop, u32 clu)
 
 	clu = ploop_bat_clu_idx_in_page(clu); /* relative to page */
 
-	bat_entries = kmap_atomic(md->page);
+	bat_entries = md->kmpage;
 	dst_clu = bat_entries[clu];
 	bat_entries[clu] = BAT_ENTRY_NONE;
 	md->bat_levels[clu] = 0;
-	kunmap_atomic(bat_entries);
 
 	ploop_hole_set_bit(dst_clu, ploop);
 }
@@ -761,7 +761,7 @@ static void ploop_advance_local_after_bat_wb(struct ploop *ploop,
 	LIST_HEAD(list);
 
 	BUG_ON(!md);
-	bat_entries = kmap_atomic(md->page);
+	bat_entries = md->kmpage;
 
 	/* Absolute number of first index in page (negative for page#0) */
 	off = piwb->page_id * PAGE_SIZE / sizeof(map_index_t);
@@ -775,7 +775,7 @@ static void ploop_advance_local_after_bat_wb(struct ploop *ploop,
 	if (!piwb->page_id)
 		i = PLOOP_MAP_OFFSET;
 
-	dst_clu = kmap_atomic(piwb->bat_page);
+	dst_clu = piwb->kmpage;
 	write_lock_irqsave(&ploop->bat_rwlock, flags);
 
 	for (; i < last; i++) {
@@ -806,8 +806,6 @@ static void ploop_advance_local_after_bat_wb(struct ploop *ploop,
 	md->piwb = NULL;
 	list_splice_tail_init(&md->wait_list, &list);
 	write_unlock_irqrestore(&ploop->bat_rwlock, flags);
-	kunmap_atomic(dst_clu);
-	kunmap_atomic(bat_entries);
 
 	if (!list_empty(&list))
 		ploop_dispatch_pios(ploop, NULL, &list);
@@ -815,6 +813,7 @@ static void ploop_advance_local_after_bat_wb(struct ploop *ploop,
 
 static void ploop_free_piwb(struct ploop_index_wb *piwb)
 {
+	kunmap(piwb->kmpage);
 	ploop_free_pio(piwb->ploop, piwb->pio);
 	put_page(piwb->bat_page);
 	kfree(piwb);
@@ -909,9 +908,10 @@ static int ploop_prepare_bat_update(struct ploop *ploop, struct md_page *md,
 	piwb->pio = pio = ploop_alloc_pio(ploop, GFP_NOIO);
 	if (!page || !pio)
 		goto err;
+	piwb->kmpage = kmap(piwb->bat_page);
 	ploop_init_pio(ploop, REQ_OP_WRITE, pio);
 
-	bat_entries = kmap_atomic(md->page);
+	bat_entries = md->kmpage;
 
 	write_lock_irq(&ploop->bat_rwlock);
 	md->piwb = piwb;
@@ -919,7 +919,7 @@ static int ploop_prepare_bat_update(struct ploop *ploop, struct md_page *md,
 	write_unlock_irq(&ploop->bat_rwlock);
 
 	piwb->page_id = page_id;
-	to = kmap_atomic(page);
+	to = piwb->kmpage;
 	memcpy((void *)to, bat_entries, PAGE_SIZE);
 
 	/* Absolute number of first index in page (negative for page#0) */
@@ -948,9 +948,6 @@ static int ploop_prepare_bat_update(struct ploop *ploop, struct md_page *md,
 			to[i] = 0;
 	}
 
-	kunmap_atomic(to);
-	kunmap_atomic(bat_entries);
-
 	piwb->type = type;
 	return 0;
 err:
@@ -980,9 +977,8 @@ static void ploop_bat_page_zero_cluster(struct ploop *ploop,
 	/* Cluster index related to the page[page_id] start */
 	clu = ploop_bat_clu_idx_in_page(clu);
 
-	to = kmap_atomic(piwb->bat_page);
+	to = piwb->kmpage;
 	to[clu] = 0;
-	kunmap_atomic(to);
 }
 
 static int ploop_find_dst_clu_bit(struct ploop *ploop, u32 *ret_dst_clu)
@@ -1086,7 +1082,6 @@ ALLOW_ERROR_INJECTION(ploop_allocate_cluster, ERRNO);
 static int ploop_alloc_cluster(struct ploop *ploop, struct ploop_index_wb *piwb,
 			       u32 clu, u32 *dst_clu)
 {
-	struct page *page = piwb->bat_page;
 	bool already_alloced = false;
 	map_index_t *to;
 	int ret = 0;
@@ -1094,13 +1089,12 @@ static int ploop_alloc_cluster(struct ploop *ploop, struct ploop_index_wb *piwb,
 	/* Cluster index related to the page[page_id] start */
 	clu -= piwb->page_id * PAGE_SIZE / sizeof(map_index_t) - PLOOP_MAP_OFFSET;
 
-	to = kmap_atomic(page);
+	to = piwb->kmpage;
 	if (to[clu]) {
 		/* Already mapped by one of previous bios */
 		*dst_clu = to[clu];
 		already_alloced = true;
 	}
-	kunmap_atomic(to);
 
 	if (already_alloced)
 		goto out;
@@ -1109,9 +1103,8 @@ static int ploop_alloc_cluster(struct ploop *ploop, struct ploop_index_wb *piwb,
 	if (ret < 0)
 		goto out;
 
-	to = kmap_atomic(page);
+	to = piwb->kmpage;
 	to[clu] = *dst_clu;
-	kunmap_atomic(to);
 out:
 	return ret;
 }
@@ -1391,10 +1384,9 @@ static void ploop_submit_cow_index_wb(struct ploop_cow *cow)
 
 	clu -= page_id * PAGE_SIZE / sizeof(map_index_t) - PLOOP_MAP_OFFSET;
 
-	to = kmap_atomic(piwb->bat_page);
+	to = piwb->kmpage;
 	WARN_ON(to[clu]);
 	to[clu] = cow->dst_clu;
-	kunmap_atomic(to);
 
 	/* Prevent double clearing of holes_bitmap bit on complete_cow() */
 	cow->dst_clu = BAT_ENTRY_NONE;
@@ -1725,7 +1717,7 @@ static void ploop_process_one_discard_pio(struct ploop *ploop, struct pio *pio)
 	/* Cluster index related to the page[page_id] start */
 	clu -= piwb->page_id * PAGE_SIZE / sizeof(map_index_t) - PLOOP_MAP_OFFSET;
 
-	to = kmap_atomic(piwb->bat_page);
+	to = piwb->kmpage;
 	if (WARN_ON_ONCE(!to[clu])) {
 		pio->bi_status = BLK_STS_IOERR;
 		goto err;
@@ -1733,7 +1725,6 @@ static void ploop_process_one_discard_pio(struct ploop *ploop, struct pio *pio)
 		to[clu] = 0;
 		list_add_tail(&pio->list, &piwb->ready_data_pios);
 	}
-	kunmap_atomic(to);
 
 	if (bat_update_prepared)
 		ploop_md_make_dirty(ploop, md);
diff --git a/drivers/md/dm-ploop.h b/drivers/md/dm-ploop.h
index e693d0ed7fe4..85291d3cbbe7 100644
--- a/drivers/md/dm-ploop.h
+++ b/drivers/md/dm-ploop.h
@@ -101,6 +101,7 @@ struct ploop_index_wb {
 	struct md_page *md;
 	struct pio *pio;
 	struct page *bat_page;
+	void *kmpage;
 	struct list_head ready_data_pios;
 	struct list_head cow_list;
 	atomic_t count;
@@ -118,6 +119,7 @@ struct md_page {
 #define MD_WRITEBACK	(1U << 2) /* Writeback was submitted */
 	unsigned int status;
 	struct page *page;
+	void *kmpage;
 	u8 *bat_levels;
 	struct list_head wait_list;
 
@@ -430,9 +432,8 @@ static inline u32 ploop_bat_entries(struct ploop *ploop, u32 clu,
 	if (md_ret)
 		*md_ret = md;
 
-	bat_entries = kmap_atomic(md->page);
-	dst_clu = bat_entries[clu];
-	kunmap_atomic(bat_entries);
+	bat_entries = md->kmpage;
+	dst_clu = READ_ONCE(bat_entries[clu]);
 	return dst_clu;
 }
 
@@ -463,11 +464,10 @@ static inline bool ploop_md_page_cluster_is_in_top_delta(struct ploop *ploop,
 		return false;
 	}
 
-	bat_entries = kmap_atomic(md->page);
-	if (bat_entries[clu] == BAT_ENTRY_NONE ||
-	    md->bat_levels[clu] < ploop_top_level(ploop))
+	bat_entries = md->kmpage;
+	if (READ_ONCE(bat_entries[clu]) == BAT_ENTRY_NONE ||
+	    READ_ONCE(md->bat_levels[clu]) < ploop_top_level(ploop))
 		ret = false;
-	kunmap_atomic(bat_entries);
 	return ret;
 }
 
-- 
2.43.0



More information about the Devel mailing list