[Devel] [PATCH rh7] ploop: add a separate queue for discard bio-s (v2)
Andrey Smetanin
asmetanin at odin.com
Wed May 13 04:58:09 PDT 2015
When I created support of discard requests, process_bio_queue is
called from ploop_thread. So I use ploop_quiesce&ploop_relax for
synchronization. Now it is called from ploop_make_request too,
so my synchronization doesn't work any more.
The race was added by
diff-ploop-converting-bio-into-ploop-request-in-function-ploop_make_request.
This patch adds a separate queue for discard requests, which is handled
only from ploop_thread(). In addition we get ability to postpone discard bio-s,
while we are handling others. So we will not fail, if a bio is received while
another one is processed. In a future this will allow us to handle more than
one bio concurrently.
v2: fix comments from Maxim
> Also, ploop_preq_drop() and ploop_complete_request() must wake up ploop-thread
> if !bio_list_empty(&plo->bio_discard_list) as well.
https://jira.sw.ru/browse/PSBM-27676
Note, that this is a plain(no logic changes) port for RHEL7 of Andrew Vagin
original patch (RHEL6).
Signed-off-by: Andrew Vagin <avagin at openvz.org>
Signed-off-by: Andrey Smetanin <asmetanin at odin.com>
---
drivers/block/ploop/dev.c | 54 ++++++++++++++++++++++++++++++++++--------
drivers/block/ploop/freeblks.c | 5 ++++
drivers/block/ploop/freeblks.h | 1 +
drivers/block/ploop/sysfs.c | 6 +++++
include/linux/ploop/ploop.h | 2 ++
5 files changed, 58 insertions(+), 10 deletions(-)
diff --git a/drivers/block/ploop/dev.c b/drivers/block/ploop/dev.c
index 5a3a5ec..d70eaf1 100644
--- a/drivers/block/ploop/dev.c
+++ b/drivers/block/ploop/dev.c
@@ -104,8 +104,9 @@ static void mitigation_timeout(unsigned long data)
spin_lock_irq(&plo->lock);
if (test_bit(PLOOP_S_WAIT_PROCESS, &plo->state) &&
(!list_empty(&plo->entry_queue) ||
- (plo->bio_head && !list_empty(&plo->free_list))) &&
- waitqueue_active(&plo->waitq))
+ ((plo->bio_head && !bio_list_empty(&plo->bio_discard_list)) &&
+ !list_empty(&plo->free_list))) &&
+ waitqueue_active(&plo->waitq))
wake_up_interruptible(&plo->waitq);
spin_unlock_irq(&plo->lock);
}
@@ -224,7 +225,8 @@ void ploop_preq_drop(struct ploop_device * plo, struct list_head *drop_list,
if (waitqueue_active(&plo->req_waitq))
wake_up(&plo->req_waitq);
else if (test_bit(PLOOP_S_WAIT_PROCESS, &plo->state) &&
- waitqueue_active(&plo->waitq) && plo->bio_head)
+ waitqueue_active(&plo->waitq) &&
+ (plo->bio_head || !bio_list_empty(&plo->bio_discard_list)))
wake_up_interruptible(&plo->waitq);
ploop_uncongest(plo);
@@ -506,6 +508,7 @@ ploop_bio_queue(struct ploop_device * plo, struct bio * bio,
BIO_ENDIO(bio, err);
list_add(&preq->list, &plo->free_list);
plo->bio_qlen--;
+ plo->bio_discard_qlen--;
plo->bio_total--;
return;
}
@@ -743,6 +746,28 @@ static void ploop_unplug(struct blk_plug_cb *cb, bool from_schedule)
kfree(cb);
}
+static void
+process_discard_bio_queue(struct ploop_device *plo, struct list_head *drop_list)
+{
+ bool discard = test_bit(PLOOP_S_DISCARD, &plo->state);
+ while (!list_empty(&plo->free_list)) {
+ struct bio *tmp;
+
+ /* Only one discard bio can be handled concurrently */
+ if (discard && ploop_discard_is_inprogress(plo->fbd))
+ return;
+
+ tmp = bio_list_pop(&plo->bio_discard_list);
+ if (tmp == NULL)
+ break;
+
+ /* If PLOOP_S_DISCARD isn't set, ploop_bio_queue
+ * will complete it with a proper error.
+ */
+ ploop_bio_queue(plo, tmp, drop_list);
+ }
+}
+
static void ploop_make_request(struct request_queue *q, struct bio *bio)
{
struct bio * nbio;
@@ -830,6 +855,12 @@ static void ploop_make_request(struct request_queue *q, struct bio *bio)
return;
}
+ if (bio->bi_rw & REQ_DISCARD) {
+ bio_list_add(&plo->bio_discard_list, bio);
+ plo->bio_discard_qlen++;
+ goto queued;
+ }
+
/* Write tracking in fast path does not work at the moment. */
if (unlikely(test_bit(PLOOP_S_TRACK, &plo->state) &&
(bio->bi_rw & WRITE)))
@@ -851,9 +882,6 @@ static void ploop_make_request(struct request_queue *q, struct bio *bio)
if (unlikely(nbio == NULL))
goto queue;
- if (bio->bi_rw & REQ_DISCARD)
- goto queue;
-
/* Try to merge before checking for fastpath. Maybe, this
* is not wise.
*/
@@ -935,6 +963,7 @@ queue:
/* second chance to merge requests */
process_bio_queue(plo, &drop_list);
+queued:
/* If main thread is waiting for requests, wake it up.
* But try to mitigate wakeups, delaying wakeup for some short
* time.
@@ -1253,7 +1282,9 @@ static void ploop_complete_request(struct ploop_request * preq)
if (waitqueue_active(&plo->req_waitq))
wake_up(&plo->req_waitq);
else if (test_bit(PLOOP_S_WAIT_PROCESS, &plo->state) &&
- waitqueue_active(&plo->waitq) && plo->bio_head)
+ waitqueue_active(&plo->waitq) &&
+ (plo->bio_head ||
+ !bio_list_empty(&plo->bio_discard_list)))
wake_up_interruptible(&plo->waitq);
}
plo->bio_total -= nr_completed;
@@ -2479,7 +2510,8 @@ static void ploop_wait(struct ploop_device * plo, int once, struct blk_plug *plu
(!test_bit(PLOOP_S_ATTENTION, &plo->state) ||
!plo->active_reqs))
break;
- } else if (plo->bio_head) {
+ } else if (plo->bio_head ||
+ !bio_list_empty(&plo->bio_discard_list)) {
/* ready_queue and entry_queue are empty, but
* bio list not. Obviously, we'd like to process
* bio_list instead of sleeping */
@@ -2579,7 +2611,7 @@ static int ploop_thread(void * data)
BUG_ON (!list_empty(&drop_list));
process_bio_queue(plo, &drop_list);
-
+ process_discard_bio_queue(plo, &drop_list);
if (!list_empty(&drop_list)) {
spin_unlock_irq(&plo->lock);
ploop_preq_drop(plo, &drop_list, 1);
@@ -2655,7 +2687,8 @@ static int ploop_thread(void * data)
* no requests are in process or in entry queue
*/
if (kthread_should_stop() && !plo->active_reqs &&
- list_empty(&plo->entry_queue) && !plo->bio_head)
+ list_empty(&plo->entry_queue) && !plo->bio_head &&
+ bio_list_empty(&plo->bio_discard_list))
break;
wait_more:
@@ -4476,6 +4509,7 @@ static struct ploop_device *__ploop_dev_alloc(int index)
map_init(plo, &plo->map);
track_init(plo);
KOBJECT_INIT(&plo->kobj, &ploop_ktype);
+ bio_list_init(&plo->bio_discard_list);
atomic_inc(&plo_count);
dk->major = ploop_major;
diff --git a/drivers/block/ploop/freeblks.c b/drivers/block/ploop/freeblks.c
index 13eb57e..569cb94 100644
--- a/drivers/block/ploop/freeblks.c
+++ b/drivers/block/ploop/freeblks.c
@@ -1079,3 +1079,8 @@ int ploop_discard_add_bio(struct ploop_freeblks_desc *fbd, struct bio *bio)
return 0;
}
+
+int ploop_discard_is_inprogress(struct ploop_freeblks_desc *fbd)
+{
+ return fbd && fbd->fbd_dbl.head != NULL;
+}
diff --git a/drivers/block/ploop/freeblks.h b/drivers/block/ploop/freeblks.h
index 5b03eec..63591d0 100644
--- a/drivers/block/ploop/freeblks.h
+++ b/drivers/block/ploop/freeblks.h
@@ -12,6 +12,7 @@ int ploop_fb_add_reloc_extent(struct ploop_freeblks_desc *fbd, cluster_t clu, ib
void ploop_fb_lost_range_init(struct ploop_freeblks_desc *fbd, iblock_t first_lost_iblk);
void ploop_fb_relocation_start(struct ploop_freeblks_desc *fbd, __u32 n_scanned);
int ploop_discard_add_bio(struct ploop_freeblks_desc *fbd, struct bio *bio);
+int ploop_discard_is_inprogress(struct ploop_freeblks_desc *fbd);
/* avoid direct access to freeblks internals */
int ploop_fb_get_n_relocated(struct ploop_freeblks_desc *fbd);
diff --git a/drivers/block/ploop/sysfs.c b/drivers/block/ploop/sysfs.c
index 5c31826..0296e1c 100644
--- a/drivers/block/ploop/sysfs.c
+++ b/drivers/block/ploop/sysfs.c
@@ -269,6 +269,11 @@ static u32 show_queued_bios(struct ploop_device * plo)
return plo->bio_qlen;
}
+static u32 show_discard_bios(struct ploop_device *plo)
+{
+ return plo->bio_discard_qlen;
+}
+
static u32 show_active_reqs(struct ploop_device * plo)
{
return plo->active_reqs;
@@ -453,6 +458,7 @@ static struct attribute *state_attributes[] = {
_A(fmt_version),
_A(total_bios),
_A(queued_bios),
+ _A(discard_bios),
_A(active_reqs),
_A(entry_reqs),
_A(entry_read_sync_reqs),
diff --git a/include/linux/ploop/ploop.h b/include/linux/ploop/ploop.h
index 92ec3e1..098d966 100644
--- a/include/linux/ploop/ploop.h
+++ b/include/linux/ploop/ploop.h
@@ -346,6 +346,8 @@ struct ploop_device
struct bio *bio_head;
struct bio *bio_tail;
struct bio *bio_sync;
+ struct bio_list bio_discard_list;
+ int bio_discard_qlen;
int bio_qlen;
int bio_total;
--
1.9.3
More information about the Devel
mailing list