[Devel] [PATCH rh7 3/4] ploop: introduce plo->blockable_reqs counter

Maxim Patlasov mpatlasov at virtuozzo.com
Mon Jul 11 21:59:37 PDT 2016


The counter represents the number of ploop requests that
potentially can be blocked due to push_backup: let's call
them "blockable" requersts. In the other words they are
those who is going to be dependent on userspace backup tool.

We claim a preq as "blockable" if, at the time of converting
incoming bio to the preq, we observe corresponding bit in
pbd->ppb_map set, and corresponding bit in pbd->reported_map
is clear.

In case of in-flight conversion (ploop_make_request calling
process_bio_queue) the decision is posponed until ploop_thread
process preq by ploop_req_state_process(). This is done
intentionally, to avoid locking scheme complication.

The counter will be used by next patch of this patch-set.

https://jira.sw.ru/browse/PSBM-49454

Signed-off-by: Maxim Patlasov <mpatlasov at virtuozzo.com>
---
 drivers/block/ploop/dev.c         |   36 ++++++++++++++++++++++++++++++------
 drivers/block/ploop/push_backup.c |   22 ++++++++++++++++++++++
 drivers/block/ploop/push_backup.h |    1 +
 drivers/block/ploop/sysfs.c       |    6 ++++++
 include/linux/ploop/ploop.h       |    2 ++
 5 files changed, 61 insertions(+), 6 deletions(-)

diff --git a/drivers/block/ploop/dev.c b/drivers/block/ploop/dev.c
index cc33b2d..6795b95 100644
--- a/drivers/block/ploop/dev.c
+++ b/drivers/block/ploop/dev.c
@@ -227,6 +227,20 @@ static inline void preq_unlink(struct ploop_request * preq,
 	list_add(&preq->list, drop_list);
 }
 
+static void ploop_set_blockable(struct ploop_device *plo,
+				struct ploop_request *preq)
+{
+	if (!test_and_set_bit(PLOOP_REQ_BLOCKABLE, &preq->state))
+		plo->blockable_reqs++;
+}
+
+static void ploop_test_and_clear_blockable(struct ploop_device *plo,
+					   struct ploop_request *preq)
+{
+	if (test_and_clear_bit(PLOOP_REQ_BLOCKABLE, &preq->state))
+		plo->blockable_reqs--;
+}
+
 /* always called with plo->lock released */
 void ploop_preq_drop(struct ploop_device * plo, struct list_head *drop_list,
 		      int keep_locked)
@@ -242,6 +256,7 @@ void ploop_preq_drop(struct ploop_device * plo, struct list_head *drop_list,
 		}
 
 		BUG_ON (test_bit(PLOOP_REQ_ZERO, &preq->state));
+		ploop_test_and_clear_blockable(plo, preq);
 		drop_qlen++;
 	}
 
@@ -489,7 +504,7 @@ insert_entry_tree(struct ploop_device * plo, struct ploop_request * preq0,
 
 static void
 ploop_bio_queue(struct ploop_device * plo, struct bio * bio,
-		struct list_head *drop_list)
+		struct list_head *drop_list, int account_blockable)
 {
 	struct ploop_request * preq;
 
@@ -511,6 +526,10 @@ ploop_bio_queue(struct ploop_device * plo, struct bio * bio,
 	preq->iblock = 0;
 	preq->prealloc_size = 0;
 
+	if (account_blockable && (bio->bi_rw & REQ_WRITE) && bio->bi_size &&
+	    ploop_pb_check_and_clear_bit(plo->pbd, preq->req_cluster))
+		ploop_set_blockable(plo, preq);
+
 	if (unlikely(bio->bi_rw & REQ_DISCARD)) {
 		int clu_size = 1 << plo->cluster_log;
 		int i = (clu_size - 1) & bio->bi_sector;
@@ -734,7 +753,9 @@ preallocate_bio(struct bio * orig_bio, struct ploop_device * plo)
 	return nbio;
 }
 
-static void process_bio_queue(struct ploop_device * plo, struct list_head *drop_list)
+static void process_bio_queue(struct ploop_device * plo,
+			      struct list_head *drop_list,
+			      int account_blockable)
 {
 	while (plo->bio_head && !list_empty(&plo->free_list)) {
 		struct bio *tmp = plo->bio_head;
@@ -744,7 +765,7 @@ static void process_bio_queue(struct ploop_device * plo, struct list_head *drop_
 		if (!plo->bio_head)
 			plo->bio_tail = NULL;
 
-		ploop_bio_queue(plo, tmp, drop_list);
+		ploop_bio_queue(plo, tmp, drop_list, account_blockable);
 	}
 }
 
@@ -796,7 +817,7 @@ process_discard_bio_queue(struct ploop_device * plo, struct list_head *drop_list
 		/* If PLOOP_S_DISCARD isn't set, ploop_bio_queue
 		 * will complete it with a proper error.
 		 */
-		ploop_bio_queue(plo, tmp, drop_list);
+		ploop_bio_queue(plo, tmp, drop_list, 0);
 	}
 }
 
@@ -1001,7 +1022,7 @@ queue:
 	ploop_congest(plo);
 
 	/* second chance to merge requests */
-	process_bio_queue(plo, &drop_list);
+	process_bio_queue(plo, &drop_list, 0);
 
 queued:
 	/* If main thread is waiting for requests, wake it up.
@@ -1371,6 +1392,7 @@ static void ploop_complete_request(struct ploop_request * preq)
 
 	del_lockout(preq);
 	del_pb_lockout(preq); /* preq may die via ploop_fail_immediate() */
+	ploop_test_and_clear_blockable(plo, preq);
 
 	if (!list_empty(&preq->delay_list))
 		list_splice_init(&preq->delay_list, plo->ready_queue.prev);
@@ -2139,6 +2161,7 @@ restart:
 		} else {
 			/* needn't lock because only ploop_thread accesses */
 			ploop_add_pb_lockout(preq);
+			ploop_set_blockable(plo, preq);
 			/*
 			 * preq IN: preq is in ppb_pending tree waiting for
 			 * out-of-band push_backup processing by userspace ...
@@ -2152,6 +2175,7 @@ restart:
 		 * userspace done; preq was re-scheduled
 		 */
 		ploop_pb_clear_bit(plo->pbd, preq->req_cluster);
+		ploop_test_and_clear_blockable(plo, preq);
 
 		del_pb_lockout(preq);
 		spin_lock_irq(&plo->lock);
@@ -2855,7 +2879,7 @@ static int ploop_thread(void * data)
 	again:
 		BUG_ON (!list_empty(&drop_list));
 
-		process_bio_queue(plo, &drop_list);
+		process_bio_queue(plo, &drop_list, 1);
 		process_discard_bio_queue(plo, &drop_list);
 
 		if (!list_empty(&drop_list)) {
diff --git a/drivers/block/ploop/push_backup.c b/drivers/block/ploop/push_backup.c
index 4e2404c..c58aadf 100644
--- a/drivers/block/ploop/push_backup.c
+++ b/drivers/block/ploop/push_backup.c
@@ -657,6 +657,28 @@ int ploop_pb_preq_add_pending(struct ploop_pushbackup_desc *pbd,
 	return 0;
 }
 
+bool ploop_pb_check_and_clear_bit(struct ploop_pushbackup_desc *pbd,
+				  cluster_t clu)
+{
+	if (!pbd)
+		return false;
+
+	if (!check_bit_in_map(pbd->ppb_map, pbd->ppb_block_max, clu))
+		return false;
+
+	spin_lock(&pbd->ppb_lock);
+
+	if (pbd->ppb_state != PLOOP_PB_ALIVE ||
+	    check_bit_in_map(pbd->reported_map, pbd->ppb_block_max, clu)) {
+		spin_unlock(&pbd->ppb_lock);
+		ploop_pb_clear_bit(pbd, clu);
+		return false;
+	}
+
+	spin_unlock(&pbd->ppb_lock);
+	return true;
+}
+
 /* Always serialized by plo->ctl_mutex */
 unsigned long ploop_pb_stop(struct ploop_pushbackup_desc *pbd, bool do_merge)
 {
diff --git a/drivers/block/ploop/push_backup.h b/drivers/block/ploop/push_backup.h
index 5333537..0d479e0 100644
--- a/drivers/block/ploop/push_backup.h
+++ b/drivers/block/ploop/push_backup.h
@@ -24,6 +24,7 @@ void ploop_pb_put_reported(struct ploop_pushbackup_desc *pbd,
 
 void ploop_pb_clear_bit(struct ploop_pushbackup_desc *pbd, cluster_t clu);
 bool ploop_pb_check_bit(struct ploop_pushbackup_desc *pbd, cluster_t clu);
+bool ploop_pb_check_and_clear_bit(struct ploop_pushbackup_desc *pbd, cluster_t clu);
 
 int ploop_pb_preq_add_pending(struct ploop_pushbackup_desc *pbd,
 			       struct ploop_request *preq);
diff --git a/drivers/block/ploop/sysfs.c b/drivers/block/ploop/sysfs.c
index c062c1e..2160fb31 100644
--- a/drivers/block/ploop/sysfs.c
+++ b/drivers/block/ploop/sysfs.c
@@ -435,6 +435,11 @@ static u32 show_free_qmax(struct ploop_device * plo)
 	return plo->free_qmax;
 }
 
+static u32 show_blockable_reqs(struct ploop_device * plo)
+{
+	return plo->blockable_reqs;
+}
+
 #define _TUNE_U32(_name)				\
 static u32 show_##_name(struct ploop_device * plo)	\
 {							\
@@ -519,6 +524,7 @@ static struct attribute *state_attributes[] = {
 	_A(open_count),
 	_A(free_reqs),
 	_A(free_qmax),
+	_A(blockable_reqs),
 	NULL
 };
 
diff --git a/include/linux/ploop/ploop.h b/include/linux/ploop/ploop.h
index 87a530e..43bba66 100644
--- a/include/linux/ploop/ploop.h
+++ b/include/linux/ploop/ploop.h
@@ -366,6 +366,7 @@ struct ploop_device
 	int			read_sync_reqs;
 	int			free_qlen; /* len of free_list */
 	int			free_qmax; /* max len of free_list */
+	int			blockable_reqs; /* depends on userspace tool */
 
 	struct bio		*bio_head;
 	struct bio		*bio_tail;
@@ -484,6 +485,7 @@ enum
 	PLOOP_REQ_PUSH_BACKUP, /* preq was ACKed by userspace push_backup */
 	PLOOP_REQ_FSYNC_DONE,  /* fsync_thread() performed f_op->fsync() */
 	PLOOP_REQ_ISSUE_FLUSH, /* preq needs ->issue_flush before completing */
+	PLOOP_REQ_BLOCKABLE,  /* preq was accounted in plo->blockable_reqs */
 };
 
 #define PLOOP_REQ_MERGE_FL (1 << PLOOP_REQ_MERGE)



More information about the Devel mailing list