[Devel] [PATCH rh7 04/12] ploop: Add cluster_size_in_sec() helper

Kirill Tkhai ktkhai at virtuozzo.com
Fri Mar 1 18:13:42 MSK 2019


Signed-off-by: Kirill Tkhai <ktkhai at virtuozzo.com>
---
 drivers/block/ploop/dev.c       |   27 ++++++++++++++-------------
 drivers/block/ploop/io_direct.c |    4 ++--
 drivers/block/ploop/io_kaio.c   |    2 +-
 drivers/block/ploop/sysfs.c     |    2 +-
 include/linux/ploop/ploop.h     |    9 +++++++--
 5 files changed, 25 insertions(+), 19 deletions(-)

diff --git a/drivers/block/ploop/dev.c b/drivers/block/ploop/dev.c
index 820eb4f96df8..42b8a1b7af99 100644
--- a/drivers/block/ploop/dev.c
+++ b/drivers/block/ploop/dev.c
@@ -531,7 +531,7 @@ ploop_bio_queue(struct ploop_device * plo, struct bio * bio,
 
 	if (test_bit(PLOOP_S_DISCARD, &plo->state) &&
 	    unlikely(bio->bi_rw & REQ_DISCARD)) {
-		int clu_size = 1 << plo->cluster_log;
+		int clu_size = cluster_size_in_sec(plo);
 		int i = (clu_size - 1) & bio->bi_sector;
 		int err = 0;
 
@@ -1736,7 +1736,7 @@ ploop_reloc_sched_read(struct ploop_request *preq, iblock_t iblk)
 	preq->eng_state = PLOOP_E_RELOC_DATA_READ;
 	sbl.head = sbl.tail = preq->aux_bio;
 	delta->io.ops->submit(&delta->io, preq, READ_SYNC,
-			      &sbl, iblk, 1<<plo->cluster_log);
+			      &sbl, iblk, cluster_size_in_sec(plo));
 }
 
 /*
@@ -1793,7 +1793,7 @@ ploop_reuse_free_block(struct ploop_request *preq)
 		sbl.head = sbl.tail = preq->aux_bio;
 
 		top_delta->io.ops->submit(&top_delta->io, preq, preq->req_rw,
-				      &sbl, preq->iblock, 1<<plo->cluster_log);
+				      &sbl, preq->iblock, cluster_size_in_sec(plo));
 	}
 
 	return 0;
@@ -2040,7 +2040,7 @@ ploop_entry_nullify_req(struct ploop_request *preq)
 	}
 
 	top_delta->io.ops->submit(&top_delta->io, preq, preq->req_rw,
-				  &sbl, preq->iblock, 1<<plo->cluster_log);
+				  &sbl, preq->iblock, cluster_size_in_sec(plo));
 	return 0;
 }
 
@@ -2371,7 +2371,7 @@ ploop_entry_request(struct ploop_request * preq)
 		preq->eng_state = PLOOP_E_TRANS_DELTA_READ;
 		sbl.head = sbl.tail = preq->aux_bio;
 		delta->io.ops->submit(&delta->io, preq, READ_SYNC,
-				      &sbl, iblk, 1<<plo->cluster_log);
+				      &sbl, iblk, cluster_size_in_sec(plo));
 		plo->st.bio_trans_copy++;
 		return;
 	}
@@ -2478,7 +2478,7 @@ ploop_entry_request(struct ploop_request * preq)
 				preq->eng_state = PLOOP_E_DELTA_READ;
 				sbl.head = sbl.tail = preq->aux_bio;
 				delta->io.ops->submit(&delta->io, preq, READ_SYNC,
-						      &sbl, iblk, 1<<plo->cluster_log);
+						      &sbl, iblk, cluster_size_in_sec(plo));
 			}
 		} else {
 			if (!whole_block(plo, preq) && map_index_fault(preq) == 0) {
@@ -2705,7 +2705,7 @@ static void ploop_req_state_process(struct ploop_request * preq)
 			sbl.head = sbl.tail = preq->aux_bio;
 			top_delta = ploop_top_delta(plo);
 			top_delta->ops->allocate(top_delta, preq,
-						 &sbl, 1<<plo->cluster_log);
+						 &sbl, cluster_size_in_sec(plo));
 		}
 		break;
 	}
@@ -2742,7 +2742,7 @@ static void ploop_req_state_process(struct ploop_request * preq)
 
 			sbl.head = sbl.tail = preq->aux_bio;
 			top_delta->io.ops->submit(&top_delta->io, preq, preq->req_rw,
-						  &sbl, preq->iblock, 1<<plo->cluster_log);
+						  &sbl, preq->iblock, cluster_size_in_sec(plo));
 		}
 		break;
 	}
@@ -2758,7 +2758,7 @@ static void ploop_req_state_process(struct ploop_request * preq)
 		plo->st.bio_out++;
 		top_delta->io.ops->submit(&top_delta->io, preq, preq->req_rw,
 					  &sbl, preq->iblock,
-					  1<<plo->cluster_log);
+					  cluster_size_in_sec(plo));
 		break;
 	}
 	case PLOOP_E_RELOC_DATA_READ:
@@ -2786,10 +2786,10 @@ static void ploop_req_state_process(struct ploop_request * preq)
 			top_delta->io.ops->submit(&top_delta->io, preq,
 						  preq->req_rw, &sbl,
 						  preq->iblock,
-						  1<<plo->cluster_log);
+						  cluster_size_in_sec(plo));
 		} else {
 			top_delta->ops->allocate(top_delta, preq, &sbl,
-						 1<<plo->cluster_log);
+						 cluster_size_in_sec(plo));
 		}
 		break;
 	}
@@ -2843,13 +2843,14 @@ static void ploop_req_state_process(struct ploop_request * preq)
 			 * we can be here only if merge is in progress and
 			 * merge can't happen concurrently with ballooning
 			 */
-			top_delta->ops->allocate(top_delta, preq, &sbl, 1<<plo->cluster_log);
+			top_delta->ops->allocate(top_delta, preq, &sbl,
+						 cluster_size_in_sec(plo));
 			plo->st.bio_trans_alloc++;
 		} else {
 			preq->eng_state = PLOOP_E_COMPLETE;
 			preq->iblock = iblk;
 			top_delta->io.ops->submit(&top_delta->io, preq, preq->req_rw,
-						  &sbl, iblk, 1<<plo->cluster_log);
+						  &sbl, iblk, cluster_size_in_sec(plo));
 		}
 		break;
 	}
diff --git a/drivers/block/ploop/io_direct.c b/drivers/block/ploop/io_direct.c
index 64e45d87524f..775ab7f64c03 100644
--- a/drivers/block/ploop/io_direct.c
+++ b/drivers/block/ploop/io_direct.c
@@ -398,7 +398,7 @@ cached_submit(struct ploop_io *io, iblock_t iblk, struct ploop_request * preq,
 
 	if (may_fallocate) {
 		sector_t sec = (sector_t)iblk << preq->plo->cluster_log;
-		sector_t len = 1 << preq->plo->cluster_log;
+		sector_t len = cluster_size_in_sec(preq->plo);
 		struct extent_map * em = extent_lookup_create(io, sec, len);
 
 		if (unlikely(IS_ERR(em))) {
@@ -551,7 +551,7 @@ dio_submit_pad(struct ploop_io *io, struct ploop_request * preq,
 
 	/* sec..end_sec is the range which we are going to write */
 	sec = (sector_t)preq->iblock << preq->plo->cluster_log;
-	end_sec = sec + (1 << preq->plo->cluster_log);
+	end_sec = sec + cluster_size_in_sec(preq->plo);
 
 	/* start..end is data that we have. The rest must be zero padded. */
 	start = sec + (sbl->head->bi_sector & ((1<<preq->plo->cluster_log) - 1));
diff --git a/drivers/block/ploop/io_kaio.c b/drivers/block/ploop/io_kaio.c
index 81b42fd254a0..df55533458a3 100644
--- a/drivers/block/ploop/io_kaio.c
+++ b/drivers/block/ploop/io_kaio.c
@@ -445,7 +445,7 @@ static int kaio_resubmit(struct ploop_request * preq)
 			struct bio_list tbl;
 			tbl.head = tbl.tail = preq->aux_bio;
 			kaio_submit(&delta->io, preq, preq->req_rw, &tbl,
-				    preq->iblock, 1<<preq->plo->cluster_log);
+				    preq->iblock, cluster_size_in_sec(preq->plo));
 		} else {
 			kaio_submit(&delta->io, preq, preq->req_rw, &preq->bl,
 				    preq->iblock, preq->req_size);
diff --git a/drivers/block/ploop/sysfs.c b/drivers/block/ploop/sysfs.c
index 48998373ae04..11326613d75c 100644
--- a/drivers/block/ploop/sysfs.c
+++ b/drivers/block/ploop/sysfs.c
@@ -272,7 +272,7 @@ static ssize_t pstat_store(struct kobject * kobj, struct attribute * attr,
 
 static u32 show_block_size(struct ploop_device * plo)
 {
-	return 1 << plo->cluster_log;
+	return cluster_size_in_sec(plo);
 }
 
 static u32 show_fmt_version(struct ploop_device * plo)
diff --git a/include/linux/ploop/ploop.h b/include/linux/ploop/ploop.h
index bdf960f0c58f..58154219d7cb 100644
--- a/include/linux/ploop/ploop.h
+++ b/include/linux/ploop/ploop.h
@@ -638,6 +638,11 @@ static inline unsigned int cluster_size_in_bytes(struct ploop_device *plo)
 	return 1 << (plo->cluster_log + 9);
 }
 
+static inline unsigned int cluster_size_in_sec(struct ploop_device *plo)
+{
+	return 1 << (plo->cluster_log);
+}
+
 void ploop_complete_io_state(struct ploop_request * preq);
 void ploop_fail_request(struct ploop_request * preq, int err);
 void ploop_preq_drop(struct ploop_device * plo, struct list_head *drop_list);
@@ -843,9 +848,9 @@ static inline int ploop_map_log(struct ploop_device *plo)
 
 static inline bool whole_block(struct ploop_device * plo, struct ploop_request *preq)
 {
-	if (preq->req_size != (1<<plo->cluster_log))
+	if (preq->req_size != cluster_size_in_sec(plo))
 		return 0;
-	return !(preq->req_sector & ((1<<plo->cluster_log) - 1));
+	return !(preq->req_sector & (cluster_size_in_sec(plo) - 1));
 }
 
 struct map_node;



More information about the Devel mailing list