[Devel] [RFC PATCH vz9 v6 57/62] dm-ploop: proplerly access nr_bat_entries

Alexander Atanasov alexander.atanasov at virtuozzo.com
Fri Dec 6 00:56:30 MSK 2024


nr_bat_entries are updated while resizing, some places
read it without holding the bat_lock, to ensure a good
value is read use READ_ONCE and WRITE_ONCE when updating.

Note - while submitting pios is suspended we might
still have pios in flight.

https://virtuozzo.atlassian.net/browse/VSTOR-91821
Signed-off-by: Alexander Atanasov <alexander.atanasov at virtuozzo.com>
---
 drivers/md/dm-ploop-cmd.c | 13 +++++++++----
 drivers/md/dm-ploop-map.c | 12 ++++++------
 2 files changed, 15 insertions(+), 10 deletions(-)

diff --git a/drivers/md/dm-ploop-cmd.c b/drivers/md/dm-ploop-cmd.c
index 1244cdbb8bb9..aa19040ffc8d 100644
--- a/drivers/md/dm-ploop-cmd.c
+++ b/drivers/md/dm-ploop-cmd.c
@@ -462,10 +462,14 @@ static int ploop_process_resize_cmd(struct ploop *ploop, struct ploop_cmd *cmd)
 			ploop_hole_set_bit(dst_clu, ploop);
 			dst_clu--;
 		}
-		swap(ploop->hb_nr, cmd->resize.hb_nr);
+		WRITE_ONCE(ploop->hb_nr, cmd->resize.hb_nr);
 	} else {
 		ploop_add_md_pages(ploop, &cmd->resize.md_pages_root);
-		swap(ploop->nr_bat_entries, cmd->resize.nr_bat_entries);
+		/*
+		 * In some places code reads nr_bat_entries without lock
+		 * it is ok for grow but if we do shrink this may be a problem.
+		 */
+		WRITE_ONCE(ploop->nr_bat_entries, cmd->resize.nr_bat_entries);
 	}
 	spin_unlock_irq(&ploop->bat_lock);
 
@@ -635,7 +639,7 @@ static int ploop_process_merge_latest_snapshot(struct ploop *ploop)
 	int ret = 0;
 	u32 clu;
 
-	for (clu = 0; clu < ploop->nr_bat_entries; clu++) {
+	for (clu = 0; clu < READ_ONCE(ploop->nr_bat_entries); clu++) {
 		if (fatal_signal_pending(current)) {
 			ret = -EINTR;
 			break;
@@ -972,7 +976,8 @@ static int process_flip_upper_deltas(struct ploop *ploop)
 	struct md_page *md;
 	u64 size;
 
-	size = (PLOOP_MAP_OFFSET + ploop->nr_bat_entries) * sizeof(map_index_t);
+	size = (PLOOP_MAP_OFFSET + READ_ONCE(ploop->nr_bat_entries))
+		* sizeof(map_index_t);
         bat_clusters = DIV_ROUND_UP(size, CLU_SIZE(ploop));
 	hb_nr = ploop->hb_nr;
 
diff --git a/drivers/md/dm-ploop-map.c b/drivers/md/dm-ploop-map.c
index 970f0dc52307..55ed95e39601 100644
--- a/drivers/md/dm-ploop-map.c
+++ b/drivers/md/dm-ploop-map.c
@@ -107,14 +107,14 @@ static int ploop_rq_valid(struct ploop *ploop, struct request *rq)
 	end_byte = to_bytes(sector) + blk_rq_bytes(rq) - 1;
 	end_clu = POS_TO_CLU(ploop, end_byte);
 
-	if (unlikely(end_clu >= ploop->nr_bat_entries)) {
+	if (unlikely(end_clu >= READ_ONCE(ploop->nr_bat_entries))) {
 		/*
 		 * This mustn't happen, since we set max_io_len
 		 * via dm_set_target_max_io_len().
 		 */
 		WARN_ONCE(1, "sec=%llu, size=%u, end_clu=%u, nr=%u\n",
 			  sector, blk_rq_bytes(rq),
-			  end_clu, ploop->nr_bat_entries);
+			  end_clu, READ_ONCE(ploop->nr_bat_entries));
 		return -EINVAL;
 	}
 
@@ -823,8 +823,9 @@ static void ploop_advance_local_after_bat_wb(struct ploop *ploop,
 	off = piwb->page_id * PAGE_SIZE / sizeof(map_index_t);
 	off -= PLOOP_MAP_OFFSET;
 
+	spin_lock_irqsave(&ploop->bat_lock, flags);
 	/* Last and first index in copied page */
-	last = ploop->nr_bat_entries - off;
+	last = READ_ONCE(ploop->nr_bat_entries) - off;
 	if (last > PAGE_SIZE / sizeof(map_index_t))
 		last = PAGE_SIZE / sizeof(map_index_t);
 	i = 0;
@@ -834,7 +835,6 @@ static void ploop_advance_local_after_bat_wb(struct ploop *ploop,
 	dst_clu = piwb->kmpage;
 
 	/* holes bit map requires bat_lock */
-	spin_lock_irqsave(&ploop->bat_lock, flags);
 	spin_lock(&md->md_lock);
 #ifdef PLOOP_DELAYWB
 	if (piwb->type == PIWB_TYPE_ALLOC)
@@ -1020,7 +1020,7 @@ static int ploop_prepare_bat_update(struct ploop *ploop, struct md_page *md,
 	off -= PLOOP_MAP_OFFSET;
 
 	/* Last and first index in copied page */
-	last = ploop->nr_bat_entries - off;
+	last = READ_ONCE(ploop->nr_bat_entries) - off;
 	if (last > PAGE_SIZE / sizeof(map_index_t)) {
 		last = PAGE_SIZE / sizeof(map_index_t);
 		is_last_page = false;
@@ -2484,7 +2484,7 @@ loff_t ploop_llseek_hole(struct dm_target *ti, loff_t offset, int whence)
 	clu = SEC_TO_CLU(ploop, to_sector(offset) + ploop->skip_off);
 	id = U32_MAX;
 
-	while (clu < ploop->nr_bat_entries) {
+	while (clu < READ_ONCE(ploop->nr_bat_entries)) {
 		if (id != ploop_bat_clu_to_page_nr(clu)) {
 			id = ploop_bat_clu_to_page_nr(clu);
 			md = ploop_md_page_find(ploop, id);
-- 
2.43.0



More information about the Devel mailing list