[Devel] [PATCH RHEL7 COMMIT] Revert "ploop: debug preq->list"
Konstantin Khorenko
khorenko at virtuozzo.com
Tue Jul 25 14:13:21 MSK 2017
The commit is pushed to "branch-rh7-3.10.0-514.26.1.vz7.33.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-514.26.1.vz7.33.16
------>
commit c88157d0b05fe8770a136f02075bd8d1ab338a0c
Author: Konstantin Khorenko <khorenko at virtuozzo.com>
Date: Tue Jul 25 15:11:03 2017 +0400
Revert "ploop: debug preq->list"
This reverts commit 2d58a9112f9e37a93485fdef25cf9fc05248a9c0.
We've found the problem with ploop list corruption,
thus the debug patch is not needed any more, reverting it.
https://jira.sw.ru/browse/PSBM-67513
https://jira.sw.ru/browse/PSBM-66831
Signed-off-by: Konstantin Khorenko <khorenko at virtuozzo.com>
---
drivers/block/ploop/dev.c | 96 +++++++--------------------------
drivers/block/ploop/discard.c | 2 -
drivers/block/ploop/freeblks.c | 10 +---
drivers/block/ploop/io_direct.c | 5 +-
drivers/block/ploop/io_kaio.c | 17 ++----
drivers/block/ploop/map.c | 36 ++++---------
drivers/block/ploop/push_backup.c | 51 ++++++------------
include/linux/ploop/ploop.h | 110 +-------------------------------------
8 files changed, 53 insertions(+), 274 deletions(-)
diff --git a/drivers/block/ploop/dev.c b/drivers/block/ploop/dev.c
index 2f9a571..eab6cb0 100644
--- a/drivers/block/ploop/dev.c
+++ b/drivers/block/ploop/dev.c
@@ -190,7 +190,6 @@ ploop_alloc_request(struct ploop_device * plo)
}
preq = list_entry(plo->free_list.next, struct ploop_request, list);
- preq_dbg_release(preq, OWNER_FREE_LIST);
list_del_init(&preq->list);
plo->free_qlen--;
ploop_congest(plo);
@@ -209,12 +208,10 @@ static void ploop_grab_iocontext(struct bio *bio)
/* always called with plo->lock held */
static inline void preq_unlink(struct ploop_request * preq,
- struct list_head *drop_list, unsigned who)
+ struct list_head *drop_list)
{
- preq_dbg_release(preq, OWNER_ENTRY_QUEUE);
- list_del_init(&preq->list);
+ list_del(&preq->list);
ploop_entry_qlen_dec(preq);
- preq_dbg_acquire(preq, OWNER_TEMP_DROP_LIST, who);
list_add(&preq->list, drop_list);
}
@@ -248,8 +245,6 @@ void ploop_preq_drop(struct ploop_device * plo, struct list_head *drop_list)
BUG_ON (test_bit(PLOOP_REQ_ZERO, &preq->state));
ploop_test_and_clear_blockable(plo, preq);
drop_qlen++;
- preq_dbg_release(preq, OWNER_TEMP_DROP_LIST);
- preq_dbg_acquire(preq, OWNER_FREE_LIST, WHO_PLOOP_PREQ_DROP);
}
spin_lock_irq(&plo->lock);
@@ -304,7 +299,7 @@ static void overlap_forward(struct ploop_device * plo,
preq_set_sync_bit(preq);
merge_rw_flags_to_req(preq1->req_rw, preq);
rb_erase(&preq1->lockout_link, &plo->entry_tree[preq1->req_rw & WRITE]);
- preq_unlink(preq1, drop_list, WHO_OVERLAP_FORWARD);
+ preq_unlink(preq1, drop_list);
plo->st.coal_mforw++;
}
@@ -335,7 +330,7 @@ static void overlap_backward(struct ploop_device * plo,
preq_set_sync_bit(preq);
merge_rw_flags_to_req(preq1->req_rw, preq);
rb_erase(&preq1->lockout_link, &plo->entry_tree[preq->req_rw & WRITE]);
- preq_unlink(preq1, drop_list, WHO_OVERLAP_BACKWARD);
+ preq_unlink(preq1, drop_list);
plo->st.coal_mback++;
}
@@ -449,7 +444,7 @@ insert_entry_tree(struct ploop_device * plo, struct ploop_request * preq0,
if (test_bit(PLOOP_REQ_SYNC, &preq0->state))
preq_set_sync_bit(clash);
merge_rw_flags_to_req(preq0->req_rw, clash);
- preq_unlink(preq0, drop_list, WHO_INSERT_ENTRY_TREE1);
+ preq_unlink(preq0, drop_list);
plo->st.coal_forw2++;
n = rb_next(&clash->lockout_link);
@@ -474,7 +469,7 @@ insert_entry_tree(struct ploop_device * plo, struct ploop_request * preq0,
if (test_bit(PLOOP_REQ_SYNC, &preq0->state))
preq_set_sync_bit(clash);
merge_rw_flags_to_req(preq0->req_rw, clash);
- preq_unlink(preq0, drop_list, WHO_INSERT_ENTRY_TREE2);
+ preq_unlink(preq0, drop_list);
n = rb_prev(&clash->lockout_link);
if (n) {
@@ -502,7 +497,6 @@ ploop_bio_queue(struct ploop_device * plo, struct bio * bio,
BUG_ON(list_empty(&plo->free_list));
BUG_ON(plo->free_qlen <= 0);
preq = list_entry(plo->free_list.next, struct ploop_request, list);
- preq_dbg_release(preq, OWNER_FREE_LIST);
list_del_init(&preq->list);
plo->free_qlen--;
@@ -546,7 +540,6 @@ ploop_bio_queue(struct ploop_device * plo, struct bio * bio,
clear_bit(BIO_BDEV_REUSED, &bio->bi_flags);
}
BIO_ENDIO(plo->queue, bio, err);
- preq_dbg_acquire(preq, OWNER_FREE_LIST, WHO_PLOOP_BIO_QUEUE);
list_add(&preq->list, &plo->free_list);
plo->free_qlen++;
plo->bio_discard_qlen--;
@@ -581,7 +574,7 @@ ploop_bio_queue(struct ploop_device * plo, struct bio * bio,
plo->bio_discard_qlen--;
else
plo->bio_qlen--;
- ploop_entry_add(plo, preq, WHO_PLOOP_BIO_QUEUE);
+ ploop_entry_add(plo, preq);
if (bio->bi_size && !(bio->bi_rw & REQ_DISCARD))
insert_entry_tree(plo, preq, drop_list);
@@ -598,8 +591,6 @@ ploop_get_request(struct ploop_device * plo, struct list_head * list)
return NULL;
preq = list_first_entry(list, struct ploop_request, list);
- preq_dbg_release(preq, (list == &plo->ready_queue) ?
- OWNER_READY_QUEUE : OWNER_ENTRY_QUEUE);
list_del_init(&preq->list);
return preq;
}
@@ -1181,8 +1172,6 @@ static int __check_lockout(struct ploop_request *preq, bool pb)
else if (preq->req_cluster > p->req_cluster)
n = n->rb_right;
else {
- preq_dbg_acquire(preq, OWNER_PREQ_DELAY_LIST,
- pb ? WHO_CHECK_LOCKOUT_PB : WHO_CHECK_LOCKOUT);
list_add_tail(&preq->list, &p->delay_list);
plo->st.bio_lockouts++;
trace_preq_lockout(preq, p);
@@ -1366,18 +1355,12 @@ static void ploop_complete_request(struct ploop_request * preq)
preq->req_cluster = ~0U;
- if (!list_empty(&preq->delay_list)) {
- struct ploop_request *pr;
- list_for_each_entry(pr, &preq->delay_list, list) {
- preq_dbg_release(pr, OWNER_PREQ_DELAY_LIST);
- preq_dbg_acquire(pr, OWNER_READY_QUEUE, WHO_PLOOP_COMPLETE_REQ_MERGE);
- }
+ if (!list_empty(&preq->delay_list))
list_splice_init(&preq->delay_list, plo->ready_queue.prev);
- }
plo->active_reqs--;
preq->eng_state = PLOOP_E_ENTRY;
- ploop_entry_add(plo, preq, WHO_PLOOP_COMPLETE_REQUEST1);
+ ploop_entry_add(plo, preq);
spin_unlock_irq(&plo->lock);
return;
}
@@ -1410,14 +1393,8 @@ static void ploop_complete_request(struct ploop_request * preq)
del_pb_lockout(preq); /* preq may die via ploop_fail_immediate() */
ploop_test_and_clear_blockable(plo, preq);
- if (!list_empty(&preq->delay_list)) {
- struct ploop_request *pr;
- list_for_each_entry(pr, &preq->delay_list, list) {
- preq_dbg_release(pr, OWNER_PREQ_DELAY_LIST);
- preq_dbg_acquire(pr, OWNER_READY_QUEUE, WHO_PLOOP_COMPLETE_REQUEST2);
- }
+ if (!list_empty(&preq->delay_list))
list_splice_init(&preq->delay_list, plo->ready_queue.prev);
- }
if (preq->map) {
map_release(preq->map);
@@ -1437,7 +1414,6 @@ static void ploop_complete_request(struct ploop_request * preq)
ploop_fb_put_zero_request(plo->fbd, preq);
} else {
ploop_uncongest(plo);
- preq_dbg_acquire(preq, OWNER_FREE_LIST, WHO_PLOOP_COMPLETE_REQUEST2);
list_add(&preq->list, &plo->free_list);
plo->free_qlen++;
if (waitqueue_active(&plo->req_waitq))
@@ -1476,13 +1452,11 @@ void ploop_fail_request(struct ploop_request * preq, int err)
spin_lock_irq(&plo->lock);
if (err == -ENOSPC) {
set_bit(PLOOP_S_ENOSPC_EVENT, &plo->state);
- preq_dbg_acquire(preq, OWNER_READY_QUEUE, WHO_PLOOP_FAIL_REQUEST_ENOSPC);
list_add(&preq->list, &plo->ready_queue);
if (waitqueue_active(&plo->event_waitq))
wake_up_interruptible(&plo->event_waitq);
} else {
set_bit(PLOOP_S_ABORT, &plo->state);
- preq_dbg_acquire(preq, OWNER_READY_QUEUE, WHO_PLOOP_FAIL_REQUEST);
list_add_tail(&preq->list, &plo->ready_queue);
}
spin_unlock_irq(&plo->lock);
@@ -1516,7 +1490,6 @@ void ploop_complete_io_state(struct ploop_request * preq)
if (preq->error)
set_bit(PLOOP_S_ABORT, &plo->state);
- preq_dbg_acquire(preq, OWNER_READY_QUEUE, WHO_PLOOP_COMPLETE_IO_STATE);
list_add_tail(&preq->list, &plo->ready_queue);
if (test_bit(PLOOP_S_WAIT_PROCESS, &plo->state) &&
waitqueue_active(&plo->waitq))
@@ -1697,10 +1670,8 @@ void ploop_queue_zero_request(struct ploop_device *plo,
}
orig_preq->iblock = 0;
INIT_LIST_HEAD(&preq->delay_list);
- preq_dbg_acquire(orig_preq, OWNER_PREQ_DELAY_LIST, WHO_PLOOP_QUEUE_ZERO_REQUEST1);
list_add_tail(&orig_preq->list, &preq->delay_list);
- preq_dbg_acquire(preq, OWNER_READY_QUEUE, WHO_PLOOP_QUEUE_ZERO_REQUEST2);
list_add(&preq->list, &plo->ready_queue);
plo->active_reqs++;
@@ -2025,15 +1996,6 @@ ploop_entry_nullify_req(struct ploop_request *preq)
sbl.head = sbl.tail = preq->aux_bio;
preq->eng_state = PLOOP_E_RELOC_NULLIFY;
-
- /* We can replace if & list_del_init with BUG_ON:
- the caller always does list_del_init before calling us */
- if (preq->list.next != &preq->list ||
- preq->list.prev != &preq->list) {
- printk("ploop_entry_nullify_req(%p): unexpected preq->list: %p %p\n",
- preq, preq->list.next, preq->list.prev);
- dump_stack();
- }
list_del_init(&preq->list);
/*
@@ -2158,7 +2120,6 @@ void ploop_add_req_to_fsync_queue(struct ploop_request * preq)
struct ploop_io * top_io = &top_delta->io;
spin_lock_irq(&plo->lock);
- preq_dbg_acquire(preq, OWNER_DIO_FSYNC_QUEUE, WHO_PLOOP_ADD_REQ_TO_FSYNC_QUEUE);
list_add_tail(&preq->list, &top_io->fsync_queue);
top_io->fsync_qlen++;
if (waitqueue_active(&top_io->fsync_waitq))
@@ -2287,14 +2248,8 @@ ploop_entry_request(struct ploop_request * preq)
del_pb_lockout(preq);
spin_lock_irq(&plo->lock);
- if (!list_empty(&preq->delay_list)) {
- struct ploop_request *pr;
- list_for_each_entry(pr, &preq->delay_list, list) {
- preq_dbg_release(pr, OWNER_PREQ_DELAY_LIST);
- preq_dbg_acquire(pr, OWNER_READY_QUEUE, WHO_PLOOP_ENTRY_REQUEST_PB_OUT);
- }
+ if (!list_empty(&preq->delay_list))
list_splice_init(&preq->delay_list, plo->ready_queue.prev);
- }
spin_unlock_irq(&plo->lock);
}
@@ -2609,10 +2564,8 @@ static void ploop_req_state_process(struct ploop_request * preq)
spin_lock_irq(&plo->lock);
if (!list_empty(&preq->delay_list)) {
struct ploop_request *pr;
- list_for_each_entry(pr, &preq->delay_list, list) {
- preq_dbg_release(pr, OWNER_PREQ_DELAY_LIST);
- preq_dbg_acquire(pr, OWNER_READY_QUEUE, WHO_PLOOP_E_RELOC_COMPLETE);
- }
+ pr = list_entry(preq->delay_list.next,
+ struct ploop_request, list);
list_splice_init(&preq->delay_list,
plo->ready_queue.prev);
}
@@ -2964,14 +2917,8 @@ static void ploop_handle_enospc_req(struct ploop_request *preq)
del_lockout(preq);
- if (!list_empty(&preq->delay_list)) {
- struct ploop_request *pr;
- list_for_each_entry(pr, &preq->delay_list, list) {
- preq_dbg_release(pr, OWNER_PREQ_DELAY_LIST);
- preq_dbg_acquire(pr, OWNER_READY_QUEUE, WHO_PLOOP_HANDLE_ENOSPC_REQ);
- }
+ if (!list_empty(&preq->delay_list))
list_splice_init(&preq->delay_list, plo->ready_queue.prev);
- }
if (preq->map) {
map_release(preq->map);
@@ -3059,7 +3006,6 @@ static int ploop_thread(void * data)
if (test_bit(PLOOP_REQ_BARRIER, &preq->state)) {
set_bit(PLOOP_S_ATTENTION, &plo->state);
if (plo->active_reqs) {
- preq_dbg_acquire(preq, OWNER_ENTRY_QUEUE, WHO_PLOOP_THREAD1);
list_add(&preq->list, &plo->entry_queue);
spin_unlock_irq(&plo->lock);
continue;
@@ -3071,7 +3017,6 @@ static int ploop_thread(void * data)
plo->active_reqs > plo->entry_qlen &&
time_before(jiffies, preq->tstamp + plo->tune.batch_entry_delay) &&
!kthread_should_stop()) {
- preq_dbg_acquire(preq, OWNER_ENTRY_QUEUE, WHO_PLOOP_THREAD2);
list_add(&preq->list, &plo->entry_queue);
once = 1;
mod_timer(&plo->mitigation_timer, preq->tstamp + plo->tune.batch_entry_delay);
@@ -3392,7 +3337,7 @@ void ploop_quiesce(struct ploop_device * plo)
init_completion(&plo->relaxed_comp);
plo->quiesce_comp = &qcomp;
- ploop_entry_add(plo, preq, WHO_PLOOP_QUIESCE);
+ ploop_entry_add(plo, preq);
plo->barrier_reqs++;
if (test_bit(PLOOP_S_WAIT_PROCESS, &plo->state))
@@ -3686,7 +3631,7 @@ static void ploop_merge_process(struct ploop_device * plo)
atomic_inc(&plo->maintenance_cnt);
- ploop_entry_add(plo, preq, WHO_PLOOP_MERGE_PROCESS);
+ ploop_entry_add(plo, preq);
if (test_bit(PLOOP_S_WAIT_PROCESS, &plo->state))
wake_up_interruptible(&plo->waitq);
@@ -3979,8 +3924,6 @@ static int ploop_start(struct ploop_device * plo, struct block_device *bdev)
preq->plo = plo;
INIT_LIST_HEAD(&preq->delay_list);
- atomic_set(&preq->dbg_state,
- PREQ_DBG_STATE(OWNER_FREE_LIST, WHO_PLOOP_START));
list_add(&preq->list, &plo->free_list);
plo->free_qlen++;
plo->free_qmax++;
@@ -4139,8 +4082,7 @@ static int ploop_stop(struct ploop_device * plo, struct block_device *bdev)
struct ploop_request * preq;
preq = list_first_entry(&plo->free_list, struct ploop_request, list);
- preq_dbg_release(preq, OWNER_FREE_LIST);
- list_del_init(&preq->list);
+ list_del(&preq->list);
plo->free_qlen--;
plo->free_qmax--;
kfree(preq);
@@ -4316,7 +4258,7 @@ static void ploop_relocate(struct ploop_device * plo, int grow_stage)
atomic_inc(&plo->maintenance_cnt);
- ploop_entry_add(plo, preq, WHO_PLOOP_RELOCATE);
+ ploop_entry_add(plo, preq);
if (test_bit(PLOOP_S_WAIT_PROCESS, &plo->state))
wake_up_interruptible(&plo->waitq);
@@ -4628,7 +4570,7 @@ static void ploop_relocblks_process(struct ploop_device *plo)
atomic_inc(&plo->maintenance_cnt);
- ploop_entry_add(plo, preq, WHO_PLOOP_RELOCBLKS_PROCESS);
+ ploop_entry_add(plo, preq);
if (test_bit(PLOOP_S_WAIT_PROCESS, &plo->state))
wake_up_interruptible(&plo->waitq);
diff --git a/drivers/block/ploop/discard.c b/drivers/block/ploop/discard.c
index 5ee8cf8..3312249 100644
--- a/drivers/block/ploop/discard.c
+++ b/drivers/block/ploop/discard.c
@@ -60,8 +60,6 @@ int ploop_discard_fini_ioc(struct ploop_device *plo)
spin_lock_irq(&plo->lock);
list_for_each_entry_safe(preq, tmp, &plo->entry_queue, list)
if (test_bit(PLOOP_REQ_DISCARD, &preq->state)) {
- preq_dbg_release(preq, OWNER_ENTRY_QUEUE);
- preq_dbg_acquire(preq, OWNER_TEMP_DROP_LIST, WHO_PLOOP_DISCARD_FINI_IOC);
list_move(&preq->list, &drop_list);
ploop_entry_qlen_dec(preq);
}
diff --git a/drivers/block/ploop/freeblks.c b/drivers/block/ploop/freeblks.c
index bc045a4..a74a22d 100644
--- a/drivers/block/ploop/freeblks.c
+++ b/drivers/block/ploop/freeblks.c
@@ -200,7 +200,6 @@ int ploop_fb_check_reloc_req(struct ploop_freeblks_desc *fbd,
else {
spin_lock_irq(&fbd->plo->lock);
preq->eng_state = pin_state;
- preq_dbg_acquire(preq, OWNER_PREQ_DELAY_LIST, WHO_PLOOP_FB_CHECK_RELOC_REQ);
list_add_tail(&preq->list, &p->delay_list);
spin_unlock_irq(&fbd->plo->lock);
return 1;
@@ -278,15 +277,13 @@ ploop_fb_get_zero_request(struct ploop_freeblks_desc *fbd)
preq = list_entry(fbd->free_zero_list.next,
struct ploop_request, list);
- preq_dbg_release(preq, OWNER_FBD_FREE_ZERO_LIST);
- list_del_init(&preq->list);
+ list_del(&preq->list);
return preq;
}
void ploop_fb_put_zero_request(struct ploop_freeblks_desc *fbd,
struct ploop_request *preq)
{
- preq_dbg_acquire(preq, OWNER_FBD_FREE_ZERO_LIST, WHO_PLOOP_FB_PUT_ZERO_REQ);
list_add(&preq->list, &fbd->free_zero_list);
}
@@ -793,8 +790,6 @@ struct ploop_freeblks_desc *ploop_fb_init(struct ploop_device *plo)
preq->plo = plo;
INIT_LIST_HEAD(&preq->delay_list);
- atomic_set(&preq->dbg_state,
- PREQ_DBG_STATE(OWNER_FBD_FREE_ZERO_LIST, WHO_PLOOP_FB_INIT));
list_add(&preq->list, &fbd->free_zero_list);
}
@@ -843,8 +838,7 @@ void ploop_fb_fini(struct ploop_freeblks_desc *fbd, int err)
preq = list_first_entry(&fbd->free_zero_list,
struct ploop_request,
list);
- preq_dbg_release(preq, OWNER_FBD_FREE_ZERO_LIST);
- list_del_init(&preq->list);
+ list_del(&preq->list);
kfree(preq);
}
diff --git a/drivers/block/ploop/io_direct.c b/drivers/block/ploop/io_direct.c
index 8077ec9..fb594c8 100644
--- a/drivers/block/ploop/io_direct.c
+++ b/drivers/block/ploop/io_direct.c
@@ -474,7 +474,6 @@ cached_submit(struct ploop_io *io, iblock_t iblk, struct ploop_request * preq,
spin_lock_irq(&plo->lock);
ploop_acc_flush_skip_locked(plo, preq->req_rw);
preq->iblock = iblk;
- preq_dbg_acquire(preq, OWNER_DIO_FSYNC_QUEUE, WHO_CACHED_SUBMIT);
list_add_tail(&preq->list, &io->fsync_queue);
io->fsync_qlen++;
plo->st.bio_syncwait++;
@@ -834,13 +833,11 @@ static int dio_fsync_thread(void * data)
while (!list_empty(&list)) {
struct ploop_request * preq;
preq = list_entry(list.next, struct ploop_request, list);
- preq_dbg_release(preq, OWNER_DIO_FSYNC_QUEUE);
- list_del_init(&preq->list);
+ list_del(&preq->list);
if (err)
PLOOP_REQ_SET_ERROR(preq, err);
__set_bit(PLOOP_REQ_FSYNC_DONE, &preq->state);
- preq_dbg_acquire(preq, OWNER_READY_QUEUE, WHO_DIO_FSYNC_THREAD);
list_add_tail(&preq->list, &plo->ready_queue);
io->fsync_qlen--;
}
diff --git a/drivers/block/ploop/io_kaio.c b/drivers/block/ploop/io_kaio.c
index f8ef504..ee9ba26 100644
--- a/drivers/block/ploop/io_kaio.c
+++ b/drivers/block/ploop/io_kaio.c
@@ -43,13 +43,10 @@ static void __kaio_queue_fsync_req(struct ploop_request * preq, int prio)
struct ploop_delta * delta = ploop_top_delta(plo);
struct ploop_io * io = &delta->io;
- if (prio) {
- preq_dbg_acquire(preq, OWNER_KAIO_FSYNC_QUEUE, WHO_KAIO_QUEUE_TRUNC_REQ);
+ if (prio)
list_add(&preq->list, &io->fsync_queue);
- } else {
- preq_dbg_acquire(preq, OWNER_KAIO_FSYNC_QUEUE, WHO_KAIO_QUEUE_FSYNC_REQ);
+ else
list_add_tail(&preq->list, &io->fsync_queue);
- }
io->fsync_qlen++;
if (waitqueue_active(&io->fsync_waitq))
@@ -403,8 +400,7 @@ static int kaio_fsync_thread(void * data)
break;
preq = list_entry(io->fsync_queue.next, struct ploop_request, list);
- preq_dbg_release(preq, OWNER_KAIO_FSYNC_QUEUE);
- list_del_init(&preq->list);
+ list_del(&preq->list);
io->fsync_qlen--;
if (!preq->prealloc_size)
plo->st.bio_fsync++;
@@ -437,7 +433,6 @@ static int kaio_fsync_thread(void * data)
}
spin_lock_irq(&plo->lock);
- preq_dbg_acquire(preq, OWNER_READY_QUEUE, WHO_KAIO_FSYNC_THREAD);
list_add_tail(&preq->list, &plo->ready_queue);
if (test_bit(PLOOP_S_WAIT_PROCESS, &plo->state))
@@ -488,7 +483,6 @@ kaio_submit_alloc(struct ploop_io *io, struct ploop_request * preq,
spin_unlock_irq(&io->plo->lock);
return;
} else { /* we're not first */
- preq_dbg_acquire(preq, OWNER_PREQ_DELAY_LIST, WHO_KAIO_SUBMIT_ALLOC);
list_add_tail(&preq->list,
&io->prealloc_preq->delay_list);
return;
@@ -971,10 +965,9 @@ static void kaio_issue_flush(struct ploop_io * io, struct ploop_request *preq)
spin_lock_irq(&io->plo->lock);
- if (delta->flags & PLOOP_FMT_RDONLY) {
- preq_dbg_acquire(preq, OWNER_READY_QUEUE, WHO_KAIO_ISSUE_FLUSH);
+ if (delta->flags & PLOOP_FMT_RDONLY)
list_add_tail(&preq->list, &io->plo->ready_queue);
- } else
+ else
kaio_queue_fsync_req(preq);
spin_unlock_irq(&io->plo->lock);
diff --git a/drivers/block/ploop/map.c b/drivers/block/ploop/map.c
index b6f2243..e579133 100644
--- a/drivers/block/ploop/map.c
+++ b/drivers/block/ploop/map.c
@@ -509,7 +509,6 @@ int map_index_fault(struct ploop_request * preq)
if (test_and_set_bit(PLOOP_MAP_READ, &m->state)) {
__TRACE("r %p %u %p\n", preq, preq->req_cluster, m);
- preq_dbg_acquire(preq, OWNER_MAP_NODE_IO_QUEUE, WHO_MAP_INDEX_FAULT);
list_add_tail(&preq->list, &m->io_queue);
plo->st.merge_lockouts++;
spin_unlock_irq(&plo->lock);
@@ -560,9 +559,7 @@ static void map_read_endio(struct ploop_request * preq, struct map_node * m)
list_for_each_safe(n, pn, &m->io_queue) {
preq = list_entry(n, struct ploop_request, list);
if (preq->eng_state == PLOOP_E_ENTRY) {
- preq_dbg_release(preq, OWNER_MAP_NODE_IO_QUEUE);
- list_del_init(&preq->list);
- preq_dbg_acquire(preq, OWNER_READY_QUEUE, WHO_MAP_READ_ENDIO);
+ list_del(&preq->list);
list_add_tail(&preq->list, &list);
}
}
@@ -621,9 +618,7 @@ static void map_merge_endio(struct ploop_request * preq, struct map_node * m)
list_for_each_safe(n, pn, &m->io_queue) {
preq = list_entry(n, struct ploop_request, list);
if (preq->eng_state == PLOOP_E_ENTRY) {
- preq_dbg_release(preq, OWNER_MAP_NODE_IO_QUEUE);
- list_del_init(&preq->list);
- preq_dbg_acquire(preq, OWNER_READY_QUEUE, WHO_MAP_MERGE_ENDIO);
+ list_del(&preq->list);
list_add_tail(&preq->list, &list);
}
}
@@ -722,7 +717,6 @@ static int ploop_read_map(struct ploop_map * map, struct ploop_request * preq)
} else {
__TRACE("g %p %u %p\n", preq, preq->req_cluster, m);
plo->st.map_lockouts++;
- preq_dbg_acquire(preq, OWNER_MAP_NODE_IO_QUEUE, WHO_PLOOP_READ_MAP);
list_add_tail(&preq->list, &m->io_queue);
err = 1;
}
@@ -982,7 +976,6 @@ void ploop_index_update(struct ploop_request * preq)
if (test_and_set_bit(PLOOP_MAP_WRITEBACK, &m->state)) {
preq->eng_state = PLOOP_E_INDEX_DELAY;
- preq_dbg_acquire(preq, OWNER_MAP_NODE_IO_QUEUE, WHO_PLOOP_INDEX_UPDATE1);
list_add_tail(&preq->list, &m->io_queue);
__TRACE("d %p %u %p\n", preq, preq->req_cluster, m);
return;
@@ -1026,7 +1019,6 @@ void ploop_index_update(struct ploop_request * preq)
out:
preq->eng_state = PLOOP_E_COMPLETE;
spin_lock_irq(&plo->lock);
- preq_dbg_acquire(preq, OWNER_MAP_NODE_IO_QUEUE, WHO_PLOOP_INDEX_UPDATE2);
list_add_tail(&preq->list, &plo->ready_queue);
spin_unlock_irq(&plo->lock);
return;
@@ -1060,13 +1052,11 @@ static void map_idx_swap(struct map_node *m, unsigned int idx,
}
static inline void requeue_req(struct ploop_request *preq,
- unsigned long new_eng_state, unsigned who)
+ unsigned long new_eng_state)
{
preq->eng_state = new_eng_state;
spin_lock_irq(&preq->plo->lock);
- preq_dbg_release(preq, OWNER_MAP_NODE_IO_QUEUE);
- list_del_init(&preq->list);
- preq_dbg_acquire(preq, OWNER_READY_QUEUE, who);
+ list_del(&preq->list);
list_add_tail(&preq->list, &preq->plo->ready_queue);
spin_unlock_irq(&preq->plo->lock);
}
@@ -1087,7 +1077,7 @@ static void map_wb_complete_post_process(struct ploop_map *map,
(!test_bit(PLOOP_REQ_RELOC_A, &preq->state) &&
!test_bit(PLOOP_REQ_RELOC_S, &preq->state)))) {
- requeue_req(preq, PLOOP_E_COMPLETE, WHO_MAP_WB_COMPL_PP1);
+ requeue_req(preq, PLOOP_E_COMPLETE);
return;
}
@@ -1098,7 +1088,7 @@ static void map_wb_complete_post_process(struct ploop_map *map,
preq->map = NULL;
spin_unlock_irq(&plo->lock);
- requeue_req(preq, PLOOP_E_RELOC_COMPLETE, WHO_MAP_WB_COMPL_PP2);
+ requeue_req(preq, PLOOP_E_RELOC_COMPLETE);
return;
}
@@ -1106,13 +1096,13 @@ static void map_wb_complete_post_process(struct ploop_map *map,
BUG_ON (!preq->aux_bio);
if (++plo->grow_relocated > plo->grow_end - plo->grow_start) {
- requeue_req(preq, PLOOP_E_COMPLETE, WHO_MAP_WB_COMPL_PP3);
+ requeue_req(preq, PLOOP_E_COMPLETE);
return;
}
del_lockout(preq);
preq->req_cluster++;
- requeue_req(preq, PLOOP_E_ENTRY, WHO_MAP_WB_COMPL_PP4);
+ requeue_req(preq, PLOOP_E_ENTRY);
}
static void map_wb_complete(struct map_node * m, int err)
@@ -1175,9 +1165,7 @@ static void map_wb_complete(struct map_node * m, int err)
PLOOP_REQ_SET_ERROR(preq, err);
preq->eng_state = PLOOP_E_COMPLETE;
spin_lock_irq(&plo->lock);
- preq_dbg_release(preq, OWNER_MAP_NODE_IO_QUEUE);
- list_del_init(cursor);
- preq_dbg_acquire(preq, OWNER_READY_QUEUE, WHO_MAP_WB_COMPLETE1);
+ list_del(cursor);
list_add_tail(cursor, &preq->plo->ready_queue);
spin_unlock_irq(&plo->lock);
} else {
@@ -1211,9 +1199,7 @@ static void map_wb_complete(struct map_node * m, int err)
PLOOP_REQ_SET_ERROR(preq, -ENOMEM);
preq->eng_state = PLOOP_E_COMPLETE;
spin_lock_irq(&plo->lock);
- preq_dbg_release(preq, OWNER_MAP_NODE_IO_QUEUE);
- list_del_init(cursor);
- preq_dbg_acquire(preq, OWNER_READY_QUEUE, WHO_MAP_WB_COMPLETE2);
+ list_del(cursor);
list_add_tail(cursor, &plo->ready_queue);
spin_unlock_irq(&plo->lock);
break;
@@ -1241,7 +1227,6 @@ static void map_wb_complete(struct map_node * m, int err)
if (!main_preq) {
main_preq = preq;
- preq_dbg_release(main_preq, OWNER_MAP_NODE_IO_QUEUE);
list_del_init(&main_preq->list);
}
plo->st.map_multi_updates++;
@@ -1268,7 +1253,6 @@ ploop_index_wb_complete(struct ploop_request * preq)
struct map_node * m = preq->map;
spin_lock_irq(&plo->lock);
- preq_dbg_acquire(preq, OWNER_MAP_NODE_IO_QUEUE, WHO_PLOOP_INDEX_WB_COMPLETE);
list_add_tail(&preq->list, &m->io_queue);
spin_unlock_irq(&plo->lock);
diff --git a/drivers/block/ploop/push_backup.c b/drivers/block/ploop/push_backup.c
index d92b93c..1f00e24 100644
--- a/drivers/block/ploop/push_backup.c
+++ b/drivers/block/ploop/push_backup.c
@@ -465,7 +465,7 @@ int ploop_pb_copy_cbt_to_user(struct ploop_pushbackup_desc *pbd, char *user_addr
}
static void ploop_pb_add_req_to_tree(struct ploop_request *preq,
- struct pb_set *pbs, unsigned new_owner)
+ struct pb_set *pbs)
{
struct rb_root *tree = &pbs->tree;
struct rb_node ** p = &tree->rb_node;
@@ -496,7 +496,6 @@ static void ploop_pb_add_req_to_tree(struct ploop_request *preq,
&pbs->list, pbs->list.prev->next, pbs->list.prev, preq);
BUG();
}
- preq_dbg_acquire(preq, new_owner, WHO_PLOOP_PB_ADD_REQ_TO_TREE);
list_add_tail(&preq->list, &pbs->list);
rb_link_node(&preq->reloc_link, parent, p);
@@ -506,17 +505,17 @@ static void ploop_pb_add_req_to_tree(struct ploop_request *preq,
static void ploop_pb_add_req_to_pending(struct ploop_pushbackup_desc *pbd,
struct ploop_request *preq)
{
- ploop_pb_add_req_to_tree(preq, &pbd->pending_set, OWNER_PB_PENDING_SET);
+ ploop_pb_add_req_to_tree(preq, &pbd->pending_set);
}
static void ploop_pb_add_req_to_reported(struct ploop_pushbackup_desc *pbd,
struct ploop_request *preq)
{
- ploop_pb_add_req_to_tree(preq, &pbd->reported_set, OWNER_PB_REPORTED_SET);
+ ploop_pb_add_req_to_tree(preq, &pbd->reported_set);
}
static void remove_req_from_pbs(struct pb_set *pbs,
- struct ploop_request *preq, unsigned old_owner)
+ struct ploop_request *preq)
{
unsigned long timeout = preq->plo->tune.push_backup_timeout * HZ;
bool oldest_deleted = false;
@@ -525,7 +524,6 @@ static void remove_req_from_pbs(struct pb_set *pbs,
oldest_deleted = true;
rb_erase(&preq->reloc_link, &pbs->tree);
- preq_dbg_release(preq, old_owner);
list_del_init(&preq->list);
if (timeout && oldest_deleted && !list_empty(&pbs->list) &&
@@ -548,8 +546,7 @@ static inline bool preq_match(struct ploop_request *preq, cluster_t clu,
/* returns leftmost preq which req_cluster >= clu */
static struct ploop_request *ploop_pb_get_req_from_tree(struct pb_set *pbs,
cluster_t clu, cluster_t len,
- struct ploop_request **npreq,
- unsigned old_owner)
+ struct ploop_request **npreq)
{
struct rb_root *tree = &pbs->tree;
struct rb_node *n = tree->rb_node;
@@ -569,7 +566,7 @@ static struct ploop_request *ploop_pb_get_req_from_tree(struct pb_set *pbs,
if (n)
*npreq = rb_entry(n, struct ploop_request,
reloc_link);
- remove_req_from_pbs(pbs, p, old_owner);
+ remove_req_from_pbs(pbs, p);
return p;
}
}
@@ -585,7 +582,7 @@ static struct ploop_request *ploop_pb_get_req_from_tree(struct pb_set *pbs,
n = rb_next(&p->reloc_link);
if (n)
*npreq = rb_entry(n, struct ploop_request, reloc_link);
- remove_req_from_pbs(pbs, p, old_owner);
+ remove_req_from_pbs(pbs, p);
return p;
}
@@ -594,8 +591,7 @@ static struct ploop_request *ploop_pb_get_req_from_tree(struct pb_set *pbs,
static struct ploop_request *
ploop_pb_get_first_req_from_tree(struct pb_set *pbs,
- struct ploop_request **npreq,
- unsigned old_owner)
+ struct ploop_request **npreq)
{
struct rb_root *tree = &pbs->tree;
struct ploop_request *p;
@@ -614,30 +610,27 @@ ploop_pb_get_first_req_from_tree(struct pb_set *pbs,
}
p = rb_entry(n, struct ploop_request, reloc_link);
- remove_req_from_pbs(pbs, p, old_owner);
+ remove_req_from_pbs(pbs, p);
return p;
}
static struct ploop_request *
ploop_pb_get_first_req_from_pending(struct ploop_pushbackup_desc *pbd)
{
- return ploop_pb_get_first_req_from_tree(&pbd->pending_set, NULL,
- OWNER_PB_PENDING_SET);
+ return ploop_pb_get_first_req_from_tree(&pbd->pending_set, NULL);
}
static struct ploop_request *
ploop_pb_get_first_reqs_from_pending(struct ploop_pushbackup_desc *pbd,
struct ploop_request **npreq)
{
- return ploop_pb_get_first_req_from_tree(&pbd->pending_set, npreq,
- OWNER_PB_PENDING_SET);
+ return ploop_pb_get_first_req_from_tree(&pbd->pending_set, npreq);
}
static struct ploop_request *
ploop_pb_get_first_req_from_reported(struct ploop_pushbackup_desc *pbd)
{
- return ploop_pb_get_first_req_from_tree(&pbd->reported_set, NULL,
- OWNER_PB_REPORTED_SET);
+ return ploop_pb_get_first_req_from_tree(&pbd->reported_set, NULL);
}
int ploop_pb_preq_add_pending(struct ploop_pushbackup_desc *pbd,
@@ -738,7 +731,6 @@ unsigned long ploop_pb_stop(struct ploop_pushbackup_desc *pbd, bool do_merge)
while (!RB_EMPTY_ROOT(&pbd->pending_set.tree)) {
struct ploop_request *preq =
ploop_pb_get_first_req_from_pending(pbd);
- preq_dbg_acquire(preq, OWNER_TEMP_DROP_LIST, WHO_PLOOP_PB_STOP1);
list_add(&preq->list, &drop_list);
ret++;
}
@@ -746,7 +738,6 @@ unsigned long ploop_pb_stop(struct ploop_pushbackup_desc *pbd, bool do_merge)
while (!RB_EMPTY_ROOT(&pbd->reported_set.tree)) {
struct ploop_request *preq =
ploop_pb_get_first_req_from_reported(pbd);
- preq_dbg_acquire(preq, OWNER_TEMP_DROP_LIST, WHO_PLOOP_PB_STOP2);
list_add(&preq->list, &drop_list);
ret++;
}
@@ -757,14 +748,9 @@ unsigned long ploop_pb_stop(struct ploop_pushbackup_desc *pbd, bool do_merge)
if (!list_empty(&drop_list) || !ploop_pb_bio_list_empty(pbd)) {
struct ploop_device *plo = pbd->plo;
- struct ploop_request *pr;
BUG_ON(!plo);
spin_lock_irq(&plo->lock);
- list_for_each_entry(pr, &drop_list, list) {
- preq_dbg_release(pr, OWNER_TEMP_DROP_LIST);
- preq_dbg_acquire(pr, OWNER_READY_QUEUE, WHO_PLOOP_PB_STOP3);
- }
list_splice_init(&drop_list, plo->ready_queue.prev);
return_bios_back_to_plo(plo, &pbd->bio_pending_list);
if (test_bit(PLOOP_S_WAIT_PROCESS, &plo->state))
@@ -848,7 +834,7 @@ int ploop_pb_get_pending(struct ploop_pushbackup_desc *pbd,
else
npreq = NULL;
- remove_req_from_pbs(&pbd->pending_set, preq, OWNER_PB_PENDING_SET);
+ remove_req_from_pbs(&pbd->pending_set, preq);
ploop_pb_add_req_to_reported(pbd, preq);
(*len_p)++;
@@ -941,15 +927,13 @@ static void ploop_pb_process_extent(struct pb_set *pbs, cluster_t clu,
int *n_found)
{
struct ploop_request *preq, *npreq;
- unsigned old_owner = n_found ? OWNER_PB_REPORTED_SET : OWNER_PB_PENDING_SET;
- preq = ploop_pb_get_req_from_tree(pbs, clu, len, &npreq, old_owner);
+ preq = ploop_pb_get_req_from_tree(pbs, clu, len, &npreq);
while (preq) {
struct rb_node *n;
set_bit(PLOOP_REQ_PUSH_BACKUP, &preq->ppb_state);
- preq_dbg_acquire(preq, OWNER_TEMP_READY_LIST, WHO_PLOOP_PB_PROCESS_EXTENT);
list_add(&preq->list, ready_list);
if (n_found)
@@ -964,7 +948,7 @@ static void ploop_pb_process_extent(struct pb_set *pbs, cluster_t clu,
npreq = rb_entry(n, struct ploop_request, reloc_link);
else
npreq = NULL;
- remove_req_from_pbs(pbs, preq, old_owner);
+ remove_req_from_pbs(pbs, preq);
}
}
@@ -993,13 +977,8 @@ void ploop_pb_put_reported(struct ploop_pushbackup_desc *pbd,
if (!list_empty(&ready_list)) {
struct ploop_device *plo = pbd->plo;
- struct ploop_request *pr;
spin_lock_irq(&plo->lock);
- list_for_each_entry(pr, &ready_list, list) {
- preq_dbg_release(pr, OWNER_TEMP_READY_LIST);
- preq_dbg_acquire(pr, OWNER_READY_QUEUE, WHO_PLOOP_PB_PUT_REPORTED);
- }
list_splice(&ready_list, plo->ready_queue.prev);
if (test_bit(PLOOP_S_WAIT_PROCESS, &plo->state))
wake_up_interruptible(&plo->waitq);
diff --git a/include/linux/ploop/ploop.h b/include/linux/ploop/ploop.h
index ddc9619..f13b7ca 100644
--- a/include/linux/ploop/ploop.h
+++ b/include/linux/ploop/ploop.h
@@ -561,7 +561,6 @@ struct ploop_request
unsigned long state;
unsigned long eng_state;
- atomic_t dbg_state;
int error;
struct map_node *map;
@@ -620,112 +619,6 @@ struct ploop_request
struct ploop_io *eng_io;
};
-/* ploop_request->dbg_state types & operations */
-enum {
- WHO_PLOOP_BIO_QUEUE,
- WHO_PLOOP_QUIESCE,
- WHO_PLOOP_MERGE_PROCESS,
- WHO_PLOOP_RELOCATE,
- WHO_PLOOP_RELOCBLKS_PROCESS,
- WHO_CACHED_SUBMIT,
- WHO_DIO_FSYNC_THREAD,
- WHO_KAIO_QUEUE_TRUNC_REQ,
- WHO_KAIO_QUEUE_FSYNC_REQ,
- WHO_KAIO_FSYNC_THREAD,
- WHO_KAIO_SUBMIT_ALLOC,
- WHO_KAIO_ISSUE_FLUSH,
- WHO_OVERLAP_FORWARD,
- WHO_OVERLAP_BACKWARD,
- WHO_INSERT_ENTRY_TREE1,
- WHO_INSERT_ENTRY_TREE2,
- WHO_PLOOP_PREQ_DROP,
- WHO_PLOOP_DISCARD_FINI_IOC,
- WHO_CHECK_LOCKOUT_PB,
- WHO_CHECK_LOCKOUT,
- WHO_PLOOP_COMPLETE_REQUEST1,
- WHO_PLOOP_COMPLETE_REQUEST2,
- WHO_PLOOP_FAIL_REQUEST_ENOSPC,
- WHO_PLOOP_FAIL_REQUEST,
- WHO_PLOOP_COMPLETE_IO_STATE,
- WHO_PLOOP_QUEUE_ZERO_REQUEST1,
- WHO_PLOOP_QUEUE_ZERO_REQUEST2,
- WHO_PLOOP_ADD_REQ_TO_FSYNC_QUEUE,
- WHO_PLOOP_E_RELOC_COMPLETE,
- WHO_PLOOP_COMPLETE_REQ_MERGE,
- WHO_PLOOP_ENTRY_REQUEST_PB_OUT,
- WHO_PLOOP_HANDLE_ENOSPC_REQ,
- WHO_PLOOP_PB_STOP1,
- WHO_PLOOP_PB_STOP2,
- WHO_PLOOP_PB_STOP3,
- WHO_PLOOP_THREAD1,
- WHO_PLOOP_THREAD2,
- WHO_PLOOP_START,
- WHO_MAP_INDEX_FAULT,
- WHO_MAP_READ_ENDIO,
- WHO_PLOOP_PB_PUT_REPORTED,
- WHO_PLOOP_PB_PROCESS_EXTENT,
- WHO_MAP_MERGE_ENDIO,
- WHO_PLOOP_READ_MAP,
- WHO_PLOOP_INDEX_UPDATE1,
- WHO_PLOOP_INDEX_UPDATE2,
- WHO_MAP_WB_COMPL_PP1,
- WHO_MAP_WB_COMPL_PP2,
- WHO_MAP_WB_COMPL_PP3,
- WHO_MAP_WB_COMPL_PP4,
- WHO_MAP_WB_COMPLETE1,
- WHO_MAP_WB_COMPLETE2,
- WHO_PLOOP_INDEX_WB_COMPLETE,
- WHO_PLOOP_PB_ADD_REQ_TO_TREE,
- WHO_PLOOP_FB_CHECK_RELOC_REQ,
- WHO_PLOOP_FB_PUT_ZERO_REQ,
- WHO_PLOOP_FB_INIT,
-};
-
-enum { /* owner */
- OWNER_FREE_LIST,
- OWNER_ENTRY_QUEUE,
- OWNER_READY_QUEUE,
- OWNER_DIO_FSYNC_QUEUE,
- OWNER_KAIO_FSYNC_QUEUE,
- OWNER_PREQ_DELAY_LIST,
- OWNER_TEMP_DROP_LIST,
- OWNER_MAP_NODE_IO_QUEUE,
- OWNER_TEMP_READY_LIST,
- OWNER_PB_PENDING_SET,
- OWNER_PB_REPORTED_SET,
- OWNER_FBD_FREE_ZERO_LIST,
-};
-
-#define PREQ_DBG_STATE(owner, who) (((owner) << 16) | (who))
-#define PREQ_DBG_OWNER(state) ((state) >> 16)
-#define PREQ_DBG_WHO(state) ((state) & 0xffff)
-
-static inline void preq_dbg_acquire(struct ploop_request *preq,
- unsigned new_owner, unsigned new_who)
-{
- unsigned int new_state = PREQ_DBG_STATE(new_owner, new_who);
- unsigned int old_state = atomic_xchg(&preq->dbg_state, new_state);
- if (old_state) {
- printk("preq_dbg_acquire(%p): "
- "new_owner=%d new_who=%d old_owner=%d "
- "old_who=%d\n", preq, new_owner, new_who,
- PREQ_DBG_OWNER(old_state), PREQ_DBG_WHO(old_state));
- dump_stack();
- }
-}
-
-static inline void preq_dbg_release(struct ploop_request *preq, unsigned owner)
-{
- unsigned int old_state = atomic_xchg(&preq->dbg_state, 0);
- if (owner != PREQ_DBG_OWNER(old_state)) {
- printk("preq_dbg_release(%p): "
- "expected owner=%d, but old_owner=%d "
- "old_who=%d\n", preq, owner,
- PREQ_DBG_OWNER(old_state), PREQ_DBG_WHO(old_state));
- dump_stack();
- }
-}
-
static inline struct ploop_delta * ploop_top_delta(struct ploop_device * plo)
{
return list_empty(&plo->map.delta_list) ? NULL :
@@ -908,9 +801,8 @@ static inline void ploop_acc_flush_skip_locked(struct ploop_device *plo,
plo->st.bio_flush_skip++;
}
-static inline void ploop_entry_add(struct ploop_device * plo, struct ploop_request * preq, unsigned who)
+static inline void ploop_entry_add(struct ploop_device * plo, struct ploop_request * preq)
{
- preq_dbg_acquire(preq, OWNER_ENTRY_QUEUE, who);
list_add_tail(&preq->list, &plo->entry_queue);
plo->entry_qlen++;
if (test_bit(PLOOP_REQ_SYNC, &preq->state) && (!(preq->req_rw & WRITE) || (preq->req_rw & (REQ_FLUSH|REQ_FUA)))) {
More information about the Devel
mailing list