[Devel] [PATCH RHEL7 COMMIT] ploop: debug preq->list

Konstantin Khorenko khorenko at virtuozzo.com
Wed May 31 06:04:11 PDT 2017


The commit is pushed to "branch-rh7-3.10.0-514.16.1.vz7.32.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-514.16.1.vz7.32.5
------>
commit 2d58a9112f9e37a93485fdef25cf9fc05248a9c0
Author: Maxim Patlasov <mpatlasov at virtuozzo.com>
Date:   Wed May 31 17:04:11 2017 +0400

    ploop: debug preq->list
    
    The patch doesn't change any logic, it only adds debug accounting for
    each place where ploop manipulates preq->list.
    
    preq->list is "struct list_head" linking ploop request to some list. There
    are a dozen of different lists where preq may be linked in. One of them is
    plo->pbd->reported_set.list. The way how it was corrupted in PSBM-65786
    suggests that ploop linked a preq into another list without unliniking it
    from reported_set.
    
    There are only 62 places where ploop manipulates preq->list. Let's debug them
    all: we save new owner of preq in preq->dbg_state along with list_add and
    erase it on list_del. So, if ploop attempts to repeat list corruption
    described above, the patch will detect it printing who is the actual owner of
    preq.
    
    https://jira.sw.ru/browse/PSBM-65786
    
    Signed-off-by: Maxim Patlasov <mpatlasov at virtuozzo.com>
---
 drivers/block/ploop/dev.c         |  96 ++++++++++++++++++++++++++-------
 drivers/block/ploop/discard.c     |   2 +
 drivers/block/ploop/freeblks.c    |  10 +++-
 drivers/block/ploop/io_direct.c   |   5 +-
 drivers/block/ploop/io_kaio.c     |  17 ++++--
 drivers/block/ploop/map.c         |  36 +++++++++----
 drivers/block/ploop/push_backup.c |  51 ++++++++++++------
 include/linux/ploop/ploop.h       | 110 +++++++++++++++++++++++++++++++++++++-
 8 files changed, 274 insertions(+), 53 deletions(-)

diff --git a/drivers/block/ploop/dev.c b/drivers/block/ploop/dev.c
index e7aec54..553267f 100644
--- a/drivers/block/ploop/dev.c
+++ b/drivers/block/ploop/dev.c
@@ -190,6 +190,7 @@ ploop_alloc_request(struct ploop_device * plo)
 	}
 
 	preq = list_entry(plo->free_list.next, struct ploop_request, list);
+	preq_dbg_release(preq, OWNER_FREE_LIST);
 	list_del_init(&preq->list);
 	plo->free_qlen--;
 	ploop_congest(plo);
@@ -208,10 +209,12 @@ static void ploop_grab_iocontext(struct bio *bio)
 
 /* always called with plo->lock held */
 static inline void preq_unlink(struct ploop_request * preq,
-			       struct list_head *drop_list)
+			       struct list_head *drop_list, unsigned who)
 {
-	list_del(&preq->list);
+	preq_dbg_release(preq, OWNER_ENTRY_QUEUE);
+	list_del_init(&preq->list);
 	ploop_entry_qlen_dec(preq);
+	preq_dbg_acquire(preq, OWNER_TEMP_DROP_LIST, who);
 	list_add(&preq->list, drop_list);
 }
 
@@ -246,6 +249,8 @@ void ploop_preq_drop(struct ploop_device * plo, struct list_head *drop_list,
 		BUG_ON (test_bit(PLOOP_REQ_ZERO, &preq->state));
 		ploop_test_and_clear_blockable(plo, preq);
 		drop_qlen++;
+		preq_dbg_release(preq, OWNER_TEMP_DROP_LIST);
+		preq_dbg_acquire(preq, OWNER_FREE_LIST, WHO_PLOOP_PREQ_DROP);
 	}
 
 	spin_lock_irq(&plo->lock);
@@ -301,7 +306,7 @@ static void overlap_forward(struct ploop_device * plo,
 			preq_set_sync_bit(preq);
 		merge_rw_flags_to_req(preq1->req_rw, preq);
 		rb_erase(&preq1->lockout_link, &plo->entry_tree[preq1->req_rw & WRITE]);
-		preq_unlink(preq1, drop_list);
+		preq_unlink(preq1, drop_list, WHO_OVERLAP_FORWARD);
 		plo->st.coal_mforw++;
 	}
 
@@ -332,7 +337,7 @@ static void overlap_backward(struct ploop_device * plo,
 			preq_set_sync_bit(preq);
 		merge_rw_flags_to_req(preq1->req_rw, preq);
 		rb_erase(&preq1->lockout_link, &plo->entry_tree[preq->req_rw & WRITE]);
-		preq_unlink(preq1, drop_list);
+		preq_unlink(preq1, drop_list, WHO_OVERLAP_BACKWARD);
 		plo->st.coal_mback++;
 	}
 
@@ -446,7 +451,7 @@ insert_entry_tree(struct ploop_device * plo, struct ploop_request * preq0,
 		if (test_bit(PLOOP_REQ_SYNC, &preq0->state))
 			preq_set_sync_bit(clash);
 		merge_rw_flags_to_req(preq0->req_rw, clash);
-		preq_unlink(preq0, drop_list);
+		preq_unlink(preq0, drop_list, WHO_INSERT_ENTRY_TREE1);
 		plo->st.coal_forw2++;
 
 		n = rb_next(&clash->lockout_link);
@@ -471,7 +476,7 @@ insert_entry_tree(struct ploop_device * plo, struct ploop_request * preq0,
 		if (test_bit(PLOOP_REQ_SYNC, &preq0->state))
 			preq_set_sync_bit(clash);
 		merge_rw_flags_to_req(preq0->req_rw, clash);
-		preq_unlink(preq0, drop_list);
+		preq_unlink(preq0, drop_list, WHO_INSERT_ENTRY_TREE2);
 
 		n = rb_prev(&clash->lockout_link);
 		if (n) {
@@ -499,6 +504,7 @@ ploop_bio_queue(struct ploop_device * plo, struct bio * bio,
 	BUG_ON(list_empty(&plo->free_list));
 	BUG_ON(plo->free_qlen <= 0);
 	preq = list_entry(plo->free_list.next, struct ploop_request, list);
+	preq_dbg_release(preq, OWNER_FREE_LIST);
 	list_del_init(&preq->list);
 	plo->free_qlen--;
 
@@ -542,6 +548,7 @@ ploop_bio_queue(struct ploop_device * plo, struct bio * bio,
 				clear_bit(BIO_BDEV_REUSED, &bio->bi_flags);
 			}
 			BIO_ENDIO(plo->queue, bio, err);
+			preq_dbg_acquire(preq, OWNER_FREE_LIST, WHO_PLOOP_BIO_QUEUE);
 			list_add(&preq->list, &plo->free_list);
 			plo->free_qlen++;
 			plo->bio_discard_qlen--;
@@ -576,7 +583,7 @@ ploop_bio_queue(struct ploop_device * plo, struct bio * bio,
 		plo->bio_discard_qlen--;
 	else
 		plo->bio_qlen--;
-	ploop_entry_add(plo, preq);
+	ploop_entry_add(plo, preq, WHO_PLOOP_BIO_QUEUE);
 
 	if (bio->bi_size && !(bio->bi_rw & REQ_DISCARD))
 		insert_entry_tree(plo, preq, drop_list);
@@ -593,6 +600,8 @@ ploop_get_request(struct ploop_device * plo, struct list_head * list)
 		return NULL;
 
 	preq = list_first_entry(list, struct ploop_request, list);
+	preq_dbg_release(preq, (list == &plo->ready_queue) ?
+			 OWNER_READY_QUEUE : OWNER_ENTRY_QUEUE);
 	list_del_init(&preq->list);
 	return preq;
 }
@@ -1174,6 +1183,8 @@ static int __check_lockout(struct ploop_request *preq, bool pb)
 		else if (preq->req_cluster > p->req_cluster)
 			n = n->rb_right;
 		else {
+			preq_dbg_acquire(preq, OWNER_PREQ_DELAY_LIST,
+					 pb ? WHO_CHECK_LOCKOUT_PB : WHO_CHECK_LOCKOUT);
 			list_add_tail(&preq->list, &p->delay_list);
 			plo->st.bio_lockouts++;
 			trace_preq_lockout(preq, p);
@@ -1357,12 +1368,18 @@ static void ploop_complete_request(struct ploop_request * preq)
 
 				preq->req_cluster = ~0U;
 
-				if (!list_empty(&preq->delay_list))
+				if (!list_empty(&preq->delay_list)) {
+					struct ploop_request *pr;
+					list_for_each_entry(pr, &preq->delay_list, list) {
+						preq_dbg_release(pr, OWNER_PREQ_DELAY_LIST);
+						preq_dbg_acquire(pr, OWNER_READY_QUEUE, WHO_PLOOP_COMPLETE_REQ_MERGE);
+					}
 					list_splice_init(&preq->delay_list, plo->ready_queue.prev);
+				}
 				plo->active_reqs--;
 
 				preq->eng_state = PLOOP_E_ENTRY;
-				ploop_entry_add(plo, preq);
+				ploop_entry_add(plo, preq, WHO_PLOOP_COMPLETE_REQUEST1);
 				spin_unlock_irq(&plo->lock);
 				return;
 			}
@@ -1395,8 +1412,14 @@ static void ploop_complete_request(struct ploop_request * preq)
 	del_pb_lockout(preq); /* preq may die via ploop_fail_immediate() */
 	ploop_test_and_clear_blockable(plo, preq);
 
-	if (!list_empty(&preq->delay_list))
+	if (!list_empty(&preq->delay_list)) {
+		struct ploop_request *pr;
+		list_for_each_entry(pr, &preq->delay_list, list) {
+			preq_dbg_release(pr, OWNER_PREQ_DELAY_LIST);
+			preq_dbg_acquire(pr, OWNER_READY_QUEUE, WHO_PLOOP_COMPLETE_REQUEST2);
+		}
 		list_splice_init(&preq->delay_list, plo->ready_queue.prev);
+	}
 
 	if (preq->map) {
 		map_release(preq->map);
@@ -1416,6 +1439,7 @@ static void ploop_complete_request(struct ploop_request * preq)
 		ploop_fb_put_zero_request(plo->fbd, preq);
 	} else {
 		ploop_uncongest(plo);
+		preq_dbg_acquire(preq, OWNER_FREE_LIST, WHO_PLOOP_COMPLETE_REQUEST2);
 		list_add(&preq->list, &plo->free_list);
 		plo->free_qlen++;
 		if (waitqueue_active(&plo->req_waitq))
@@ -1454,11 +1478,13 @@ void ploop_fail_request(struct ploop_request * preq, int err)
 	spin_lock_irq(&plo->lock);
 	if (err == -ENOSPC) {
 		set_bit(PLOOP_S_ENOSPC_EVENT, &plo->state);
+		preq_dbg_acquire(preq, OWNER_READY_QUEUE, WHO_PLOOP_FAIL_REQUEST_ENOSPC);
 		list_add(&preq->list, &plo->ready_queue);
 		if (waitqueue_active(&plo->event_waitq))
 			wake_up_interruptible(&plo->event_waitq);
 	} else {
 		set_bit(PLOOP_S_ABORT, &plo->state);
+		preq_dbg_acquire(preq, OWNER_READY_QUEUE, WHO_PLOOP_FAIL_REQUEST);
 		list_add_tail(&preq->list, &plo->ready_queue);
 	}
 	spin_unlock_irq(&plo->lock);
@@ -1492,6 +1518,7 @@ void ploop_complete_io_state(struct ploop_request * preq)
 	if (preq->error)
 		set_bit(PLOOP_S_ABORT, &plo->state);
 
+	preq_dbg_acquire(preq, OWNER_READY_QUEUE, WHO_PLOOP_COMPLETE_IO_STATE);
 	list_add_tail(&preq->list, &plo->ready_queue);
 	if (test_bit(PLOOP_S_WAIT_PROCESS, &plo->state) &&
 	    waitqueue_active(&plo->waitq))
@@ -1672,8 +1699,10 @@ void ploop_queue_zero_request(struct ploop_device *plo,
 	}
 	orig_preq->iblock = 0;
 	INIT_LIST_HEAD(&preq->delay_list);
+	preq_dbg_acquire(orig_preq, OWNER_PREQ_DELAY_LIST, WHO_PLOOP_QUEUE_ZERO_REQUEST1);
 	list_add_tail(&orig_preq->list, &preq->delay_list);
 
+	preq_dbg_acquire(preq, OWNER_READY_QUEUE, WHO_PLOOP_QUEUE_ZERO_REQUEST2);
 	list_add(&preq->list, &plo->ready_queue);
 	plo->active_reqs++;
 
@@ -1998,6 +2027,15 @@ ploop_entry_nullify_req(struct ploop_request *preq)
 
 	sbl.head = sbl.tail = preq->aux_bio;
 	preq->eng_state = PLOOP_E_RELOC_NULLIFY;
+
+	/* We can replace if & list_del_init with BUG_ON:
+	   the caller always does list_del_init before calling us */
+	if (preq->list.next != &preq->list ||
+	    preq->list.prev != &preq->list) {
+		printk("ploop_entry_nullify_req(%p): unexpected preq->list: %p %p\n",
+		       preq, preq->list.next, preq->list.prev);
+		dump_stack();
+	}
 	list_del_init(&preq->list);
 
 	/*
@@ -2122,6 +2160,7 @@ void ploop_add_req_to_fsync_queue(struct ploop_request * preq)
 	struct ploop_io     * top_io    = &top_delta->io;
 
 	spin_lock_irq(&plo->lock);
+	preq_dbg_acquire(preq, OWNER_DIO_FSYNC_QUEUE, WHO_PLOOP_ADD_REQ_TO_FSYNC_QUEUE);
 	list_add_tail(&preq->list, &top_io->fsync_queue);
 	top_io->fsync_qlen++;
 	if (waitqueue_active(&top_io->fsync_waitq))
@@ -2250,8 +2289,14 @@ restart:
 
 		del_pb_lockout(preq);
 		spin_lock_irq(&plo->lock);
-		if (!list_empty(&preq->delay_list))
+		if (!list_empty(&preq->delay_list)) {
+			struct ploop_request *pr;
+			list_for_each_entry(pr, &preq->delay_list, list) {
+				preq_dbg_release(pr, OWNER_PREQ_DELAY_LIST);
+				preq_dbg_acquire(pr, OWNER_READY_QUEUE, WHO_PLOOP_ENTRY_REQUEST_PB_OUT);
+			}
 			list_splice_init(&preq->delay_list, plo->ready_queue.prev);
+		}
 		spin_unlock_irq(&plo->lock);
 	}
 
@@ -2566,8 +2611,10 @@ restart:
 			spin_lock_irq(&plo->lock);
 			if (!list_empty(&preq->delay_list)) {
 				struct ploop_request *pr;
-				pr = list_entry(preq->delay_list.next,
-						struct ploop_request, list);
+				list_for_each_entry(pr, &preq->delay_list, list) {
+					preq_dbg_release(pr, OWNER_PREQ_DELAY_LIST);
+					preq_dbg_acquire(pr, OWNER_READY_QUEUE, WHO_PLOOP_E_RELOC_COMPLETE);
+				}
 				list_splice_init(&preq->delay_list,
 						 plo->ready_queue.prev);
 			}
@@ -2919,8 +2966,14 @@ static void ploop_handle_enospc_req(struct ploop_request *preq)
 
 	del_lockout(preq);
 
-	if (!list_empty(&preq->delay_list))
+	if (!list_empty(&preq->delay_list)) {
+		struct ploop_request *pr;
+		list_for_each_entry(pr, &preq->delay_list, list) {
+			preq_dbg_release(pr, OWNER_PREQ_DELAY_LIST);
+			preq_dbg_acquire(pr, OWNER_READY_QUEUE, WHO_PLOOP_HANDLE_ENOSPC_REQ);
+		}
 		list_splice_init(&preq->delay_list, plo->ready_queue.prev);
+	}
 
 	if (preq->map) {
 		map_release(preq->map);
@@ -3009,6 +3062,7 @@ static int ploop_thread(void * data)
 			if (test_bit(PLOOP_REQ_BARRIER, &preq->state)) {
 				set_bit(PLOOP_S_ATTENTION, &plo->state);
 				if (plo->active_reqs) {
+					preq_dbg_acquire(preq, OWNER_ENTRY_QUEUE, WHO_PLOOP_THREAD1);
 					list_add(&preq->list, &plo->entry_queue);
 					continue;
 				}
@@ -3019,6 +3073,7 @@ static int ploop_thread(void * data)
 				    plo->active_reqs > plo->entry_qlen &&
 				    time_before(jiffies, preq->tstamp + plo->tune.batch_entry_delay) &&
 				    !kthread_should_stop()) {
+					preq_dbg_acquire(preq, OWNER_ENTRY_QUEUE, WHO_PLOOP_THREAD2);
 					list_add(&preq->list, &plo->entry_queue);
 					once = 1;
 					mod_timer(&plo->mitigation_timer, preq->tstamp + plo->tune.batch_entry_delay);
@@ -3339,7 +3394,7 @@ void ploop_quiesce(struct ploop_device * plo)
 	init_completion(&plo->relaxed_comp);
 	plo->quiesce_comp = &qcomp;
 
-	ploop_entry_add(plo, preq);
+	ploop_entry_add(plo, preq, WHO_PLOOP_QUIESCE);
 	plo->barrier_reqs++;
 
 	if (test_bit(PLOOP_S_WAIT_PROCESS, &plo->state))
@@ -3633,7 +3688,7 @@ static void ploop_merge_process(struct ploop_device * plo)
 
 		atomic_inc(&plo->maintenance_cnt);
 
-		ploop_entry_add(plo, preq);
+		ploop_entry_add(plo, preq, WHO_PLOOP_MERGE_PROCESS);
 
 		if (test_bit(PLOOP_S_WAIT_PROCESS, &plo->state))
 			wake_up_interruptible(&plo->waitq);
@@ -3926,6 +3981,8 @@ static int ploop_start(struct ploop_device * plo, struct block_device *bdev)
 
 		preq->plo = plo;
 		INIT_LIST_HEAD(&preq->delay_list);
+		atomic_set(&preq->dbg_state,
+			   PREQ_DBG_STATE(OWNER_FREE_LIST, WHO_PLOOP_START));
 		list_add(&preq->list, &plo->free_list);
 		plo->free_qlen++;
 		plo->free_qmax++;
@@ -4084,7 +4141,8 @@ static int ploop_stop(struct ploop_device * plo, struct block_device *bdev)
 		struct ploop_request * preq;
 
 		preq = list_first_entry(&plo->free_list, struct ploop_request, list);
-		list_del(&preq->list);
+		preq_dbg_release(preq, OWNER_FREE_LIST);
+		list_del_init(&preq->list);
 		plo->free_qlen--;
 		plo->free_qmax--;
 		kfree(preq);
@@ -4260,7 +4318,7 @@ static void ploop_relocate(struct ploop_device * plo, int grow_stage)
 
 	atomic_inc(&plo->maintenance_cnt);
 
-	ploop_entry_add(plo, preq);
+	ploop_entry_add(plo, preq, WHO_PLOOP_RELOCATE);
 
 	if (test_bit(PLOOP_S_WAIT_PROCESS, &plo->state))
 		wake_up_interruptible(&plo->waitq);
@@ -4572,7 +4630,7 @@ static void ploop_relocblks_process(struct ploop_device *plo)
 
 		atomic_inc(&plo->maintenance_cnt);
 
-		ploop_entry_add(plo, preq);
+		ploop_entry_add(plo, preq, WHO_PLOOP_RELOCBLKS_PROCESS);
 
 		if (test_bit(PLOOP_S_WAIT_PROCESS, &plo->state))
 			wake_up_interruptible(&plo->waitq);
diff --git a/drivers/block/ploop/discard.c b/drivers/block/ploop/discard.c
index 828ab36..4fbf681 100644
--- a/drivers/block/ploop/discard.c
+++ b/drivers/block/ploop/discard.c
@@ -60,6 +60,8 @@ int ploop_discard_fini_ioc(struct ploop_device *plo)
 	spin_lock_irq(&plo->lock);
 	list_for_each_entry_safe(preq, tmp, &plo->entry_queue, list)
 		if (test_bit(PLOOP_REQ_DISCARD, &preq->state)) {
+			preq_dbg_release(preq, OWNER_ENTRY_QUEUE);
+			preq_dbg_acquire(preq, OWNER_TEMP_DROP_LIST, WHO_PLOOP_DISCARD_FINI_IOC);
 			list_move(&preq->list, &drop_list);
 			ploop_entry_qlen_dec(preq);
 		}
diff --git a/drivers/block/ploop/freeblks.c b/drivers/block/ploop/freeblks.c
index a74a22d..bc045a4 100644
--- a/drivers/block/ploop/freeblks.c
+++ b/drivers/block/ploop/freeblks.c
@@ -200,6 +200,7 @@ int ploop_fb_check_reloc_req(struct ploop_freeblks_desc *fbd,
 		else {
 			spin_lock_irq(&fbd->plo->lock);
 			preq->eng_state = pin_state;
+			preq_dbg_acquire(preq, OWNER_PREQ_DELAY_LIST, WHO_PLOOP_FB_CHECK_RELOC_REQ);
 			list_add_tail(&preq->list, &p->delay_list);
 			spin_unlock_irq(&fbd->plo->lock);
 			return 1;
@@ -277,13 +278,15 @@ ploop_fb_get_zero_request(struct ploop_freeblks_desc *fbd)
 
 	preq = list_entry(fbd->free_zero_list.next,
 			  struct ploop_request, list);
-	list_del(&preq->list);
+	preq_dbg_release(preq, OWNER_FBD_FREE_ZERO_LIST);
+	list_del_init(&preq->list);
 	return preq;
 }
 
 void ploop_fb_put_zero_request(struct ploop_freeblks_desc *fbd,
 			       struct ploop_request *preq)
 {
+	preq_dbg_acquire(preq, OWNER_FBD_FREE_ZERO_LIST, WHO_PLOOP_FB_PUT_ZERO_REQ);
 	list_add(&preq->list, &fbd->free_zero_list);
 }
 
@@ -790,6 +793,8 @@ struct ploop_freeblks_desc *ploop_fb_init(struct ploop_device *plo)
 
 		preq->plo = plo;
 		INIT_LIST_HEAD(&preq->delay_list);
+		atomic_set(&preq->dbg_state,
+			   PREQ_DBG_STATE(OWNER_FBD_FREE_ZERO_LIST, WHO_PLOOP_FB_INIT));
 		list_add(&preq->list, &fbd->free_zero_list);
 	}
 
@@ -838,7 +843,8 @@ void ploop_fb_fini(struct ploop_freeblks_desc *fbd, int err)
 		preq = list_first_entry(&fbd->free_zero_list,
 					struct ploop_request,
 					list);
-		list_del(&preq->list);
+		preq_dbg_release(preq, OWNER_FBD_FREE_ZERO_LIST);
+		list_del_init(&preq->list);
 		kfree(preq);
 	}
 
diff --git a/drivers/block/ploop/io_direct.c b/drivers/block/ploop/io_direct.c
index fb594c8..8077ec9 100644
--- a/drivers/block/ploop/io_direct.c
+++ b/drivers/block/ploop/io_direct.c
@@ -474,6 +474,7 @@ try_again:
 		spin_lock_irq(&plo->lock);
 		ploop_acc_flush_skip_locked(plo, preq->req_rw);
 		preq->iblock = iblk;
+		preq_dbg_acquire(preq, OWNER_DIO_FSYNC_QUEUE, WHO_CACHED_SUBMIT);
 		list_add_tail(&preq->list, &io->fsync_queue);
 		io->fsync_qlen++;
 		plo->st.bio_syncwait++;
@@ -833,11 +834,13 @@ static int dio_fsync_thread(void * data)
 		while (!list_empty(&list)) {
 			struct ploop_request * preq;
 			preq = list_entry(list.next, struct ploop_request, list);
-			list_del(&preq->list);
+			preq_dbg_release(preq, OWNER_DIO_FSYNC_QUEUE);
+			list_del_init(&preq->list);
 			if (err)
 				PLOOP_REQ_SET_ERROR(preq, err);
 
 			__set_bit(PLOOP_REQ_FSYNC_DONE, &preq->state);
+			preq_dbg_acquire(preq, OWNER_READY_QUEUE, WHO_DIO_FSYNC_THREAD);
 			list_add_tail(&preq->list, &plo->ready_queue);
 			io->fsync_qlen--;
 		}
diff --git a/drivers/block/ploop/io_kaio.c b/drivers/block/ploop/io_kaio.c
index ee9ba26..f8ef504 100644
--- a/drivers/block/ploop/io_kaio.c
+++ b/drivers/block/ploop/io_kaio.c
@@ -43,10 +43,13 @@ static void __kaio_queue_fsync_req(struct ploop_request * preq, int prio)
 	struct ploop_delta  * delta = ploop_top_delta(plo);
 	struct ploop_io     * io    = &delta->io;
 
-	if (prio)
+	if (prio) {
+		preq_dbg_acquire(preq, OWNER_KAIO_FSYNC_QUEUE, WHO_KAIO_QUEUE_TRUNC_REQ);
 		list_add(&preq->list, &io->fsync_queue);
-	else
+	} else {
+		preq_dbg_acquire(preq, OWNER_KAIO_FSYNC_QUEUE, WHO_KAIO_QUEUE_FSYNC_REQ);
 		list_add_tail(&preq->list, &io->fsync_queue);
+	}
 
 	io->fsync_qlen++;
 	if (waitqueue_active(&io->fsync_waitq))
@@ -400,7 +403,8 @@ static int kaio_fsync_thread(void * data)
 			break;
 
 		preq = list_entry(io->fsync_queue.next, struct ploop_request, list);
-		list_del(&preq->list);
+		preq_dbg_release(preq, OWNER_KAIO_FSYNC_QUEUE);
+		list_del_init(&preq->list);
 		io->fsync_qlen--;
 		if (!preq->prealloc_size)
 			plo->st.bio_fsync++;
@@ -433,6 +437,7 @@ static int kaio_fsync_thread(void * data)
 		}
 
 		spin_lock_irq(&plo->lock);
+		preq_dbg_acquire(preq, OWNER_READY_QUEUE, WHO_KAIO_FSYNC_THREAD);
 		list_add_tail(&preq->list, &plo->ready_queue);
 
 		if (test_bit(PLOOP_S_WAIT_PROCESS, &plo->state))
@@ -483,6 +488,7 @@ kaio_submit_alloc(struct ploop_io *io, struct ploop_request * preq,
 			spin_unlock_irq(&io->plo->lock);
 			return;
 		} else { /* we're not first */
+			preq_dbg_acquire(preq, OWNER_PREQ_DELAY_LIST, WHO_KAIO_SUBMIT_ALLOC);
 			list_add_tail(&preq->list,
 				      &io->prealloc_preq->delay_list);
 			return;
@@ -965,9 +971,10 @@ static void kaio_issue_flush(struct ploop_io * io, struct ploop_request *preq)
 
 	spin_lock_irq(&io->plo->lock);
 
-	if (delta->flags & PLOOP_FMT_RDONLY)
+	if (delta->flags & PLOOP_FMT_RDONLY) {
+		preq_dbg_acquire(preq, OWNER_READY_QUEUE, WHO_KAIO_ISSUE_FLUSH);
 		list_add_tail(&preq->list, &io->plo->ready_queue);
-	else
+	} else
 		kaio_queue_fsync_req(preq);
 
 	spin_unlock_irq(&io->plo->lock);
diff --git a/drivers/block/ploop/map.c b/drivers/block/ploop/map.c
index e579133..b6f2243 100644
--- a/drivers/block/ploop/map.c
+++ b/drivers/block/ploop/map.c
@@ -509,6 +509,7 @@ int map_index_fault(struct ploop_request * preq)
 
 	if (test_and_set_bit(PLOOP_MAP_READ, &m->state)) {
 		__TRACE("r %p %u %p\n", preq, preq->req_cluster, m);
+		preq_dbg_acquire(preq, OWNER_MAP_NODE_IO_QUEUE, WHO_MAP_INDEX_FAULT);
 		list_add_tail(&preq->list, &m->io_queue);
 		plo->st.merge_lockouts++;
 		spin_unlock_irq(&plo->lock);
@@ -559,7 +560,9 @@ static void map_read_endio(struct ploop_request * preq, struct map_node * m)
 	list_for_each_safe(n, pn, &m->io_queue) {
 		preq = list_entry(n, struct ploop_request, list);
 		if (preq->eng_state == PLOOP_E_ENTRY) {
-			list_del(&preq->list);
+			preq_dbg_release(preq, OWNER_MAP_NODE_IO_QUEUE);
+			list_del_init(&preq->list);
+			preq_dbg_acquire(preq, OWNER_READY_QUEUE, WHO_MAP_READ_ENDIO);
 			list_add_tail(&preq->list, &list);
 		}
 	}
@@ -618,7 +621,9 @@ flush_queue:
 	list_for_each_safe(n, pn, &m->io_queue) {
 		preq = list_entry(n, struct ploop_request, list);
 		if (preq->eng_state == PLOOP_E_ENTRY) {
-			list_del(&preq->list);
+			preq_dbg_release(preq, OWNER_MAP_NODE_IO_QUEUE);
+			list_del_init(&preq->list);
+			preq_dbg_acquire(preq, OWNER_READY_QUEUE, WHO_MAP_MERGE_ENDIO);
 			list_add_tail(&preq->list, &list);
 		}
 	}
@@ -717,6 +722,7 @@ static int ploop_read_map(struct ploop_map * map, struct ploop_request * preq)
 		} else {
 			__TRACE("g %p %u %p\n", preq, preq->req_cluster, m);
 			plo->st.map_lockouts++;
+			preq_dbg_acquire(preq, OWNER_MAP_NODE_IO_QUEUE, WHO_PLOOP_READ_MAP);
 			list_add_tail(&preq->list, &m->io_queue);
 			err = 1;
 		}
@@ -976,6 +982,7 @@ void ploop_index_update(struct ploop_request * preq)
 
 	if (test_and_set_bit(PLOOP_MAP_WRITEBACK, &m->state)) {
 		preq->eng_state = PLOOP_E_INDEX_DELAY;
+		preq_dbg_acquire(preq, OWNER_MAP_NODE_IO_QUEUE, WHO_PLOOP_INDEX_UPDATE1);
 		list_add_tail(&preq->list, &m->io_queue);
 		__TRACE("d %p %u %p\n", preq, preq->req_cluster, m);
 		return;
@@ -1019,6 +1026,7 @@ corrupted:
 out:
 	preq->eng_state = PLOOP_E_COMPLETE;
 	spin_lock_irq(&plo->lock);
+	preq_dbg_acquire(preq, OWNER_MAP_NODE_IO_QUEUE, WHO_PLOOP_INDEX_UPDATE2);
 	list_add_tail(&preq->list, &plo->ready_queue);
 	spin_unlock_irq(&plo->lock);
 	return;
@@ -1052,11 +1060,13 @@ static void map_idx_swap(struct map_node *m, unsigned int idx,
 }
 
 static inline void requeue_req(struct ploop_request *preq,
-			       unsigned long new_eng_state)
+			       unsigned long new_eng_state, unsigned who)
 {
 	preq->eng_state = new_eng_state;
 	spin_lock_irq(&preq->plo->lock);
-	list_del(&preq->list);
+	preq_dbg_release(preq, OWNER_MAP_NODE_IO_QUEUE);
+	list_del_init(&preq->list);
+	preq_dbg_acquire(preq, OWNER_READY_QUEUE, who);
 	list_add_tail(&preq->list, &preq->plo->ready_queue);
 	spin_unlock_irq(&preq->plo->lock);
 }
@@ -1077,7 +1087,7 @@ static void map_wb_complete_post_process(struct ploop_map *map,
 		   (!test_bit(PLOOP_REQ_RELOC_A, &preq->state) &&
 		    !test_bit(PLOOP_REQ_RELOC_S, &preq->state)))) {
 
-		requeue_req(preq, PLOOP_E_COMPLETE);
+		requeue_req(preq, PLOOP_E_COMPLETE, WHO_MAP_WB_COMPL_PP1);
 		return;
 	}
 
@@ -1088,7 +1098,7 @@ static void map_wb_complete_post_process(struct ploop_map *map,
 		preq->map = NULL;
 		spin_unlock_irq(&plo->lock);
 
-		requeue_req(preq, PLOOP_E_RELOC_COMPLETE);
+		requeue_req(preq, PLOOP_E_RELOC_COMPLETE, WHO_MAP_WB_COMPL_PP2);
 		return;
 	}
 
@@ -1096,13 +1106,13 @@ static void map_wb_complete_post_process(struct ploop_map *map,
 	BUG_ON (!preq->aux_bio);
 
 	if (++plo->grow_relocated > plo->grow_end - plo->grow_start) {
-		requeue_req(preq, PLOOP_E_COMPLETE);
+		requeue_req(preq, PLOOP_E_COMPLETE, WHO_MAP_WB_COMPL_PP3);
 		return;
 	}
 
 	del_lockout(preq);
 	preq->req_cluster++;
-	requeue_req(preq, PLOOP_E_ENTRY);
+	requeue_req(preq, PLOOP_E_ENTRY, WHO_MAP_WB_COMPL_PP4);
 }
 
 static void map_wb_complete(struct map_node * m, int err)
@@ -1165,7 +1175,9 @@ static void map_wb_complete(struct map_node * m, int err)
 				PLOOP_REQ_SET_ERROR(preq, err);
 				preq->eng_state = PLOOP_E_COMPLETE;
 				spin_lock_irq(&plo->lock);
-				list_del(cursor);
+				preq_dbg_release(preq, OWNER_MAP_NODE_IO_QUEUE);
+				list_del_init(cursor);
+				preq_dbg_acquire(preq, OWNER_READY_QUEUE, WHO_MAP_WB_COMPLETE1);
 				list_add_tail(cursor, &preq->plo->ready_queue);
 				spin_unlock_irq(&plo->lock);
 			} else {
@@ -1199,7 +1211,9 @@ static void map_wb_complete(struct map_node * m, int err)
 				PLOOP_REQ_SET_ERROR(preq, -ENOMEM);
 				preq->eng_state = PLOOP_E_COMPLETE;
 				spin_lock_irq(&plo->lock);
-				list_del(cursor);
+				preq_dbg_release(preq, OWNER_MAP_NODE_IO_QUEUE);
+				list_del_init(cursor);
+				preq_dbg_acquire(preq, OWNER_READY_QUEUE, WHO_MAP_WB_COMPLETE2);
 				list_add_tail(cursor, &plo->ready_queue);
 				spin_unlock_irq(&plo->lock);
 				break;
@@ -1227,6 +1241,7 @@ static void map_wb_complete(struct map_node * m, int err)
 
 			if (!main_preq) {
 				main_preq = preq;
+				preq_dbg_release(main_preq, OWNER_MAP_NODE_IO_QUEUE);
 				list_del_init(&main_preq->list);
 			}
 			plo->st.map_multi_updates++;
@@ -1253,6 +1268,7 @@ ploop_index_wb_complete(struct ploop_request * preq)
 	struct map_node * m = preq->map;
 
 	spin_lock_irq(&plo->lock);
+	preq_dbg_acquire(preq, OWNER_MAP_NODE_IO_QUEUE, WHO_PLOOP_INDEX_WB_COMPLETE);
 	list_add_tail(&preq->list, &m->io_queue);
 	spin_unlock_irq(&plo->lock);
 
diff --git a/drivers/block/ploop/push_backup.c b/drivers/block/ploop/push_backup.c
index 0a0a69c..ade5fa5 100644
--- a/drivers/block/ploop/push_backup.c
+++ b/drivers/block/ploop/push_backup.c
@@ -465,7 +465,7 @@ int ploop_pb_copy_cbt_to_user(struct ploop_pushbackup_desc *pbd, char *user_addr
 }
 
 static void ploop_pb_add_req_to_tree(struct ploop_request *preq,
-				     struct pb_set *pbs)
+				     struct pb_set *pbs, unsigned new_owner)
 {
 	struct rb_root *tree = &pbs->tree;
 	struct rb_node ** p = &tree->rb_node;
@@ -496,6 +496,7 @@ static void ploop_pb_add_req_to_tree(struct ploop_request *preq,
 		       &pbs->list, pbs->list.prev->next, pbs->list.prev, preq);
 		BUG();
 	}
+	preq_dbg_acquire(preq, new_owner, WHO_PLOOP_PB_ADD_REQ_TO_TREE);
 	list_add_tail(&preq->list, &pbs->list);
 
 	rb_link_node(&preq->reloc_link, parent, p);
@@ -505,17 +506,17 @@ static void ploop_pb_add_req_to_tree(struct ploop_request *preq,
 static void ploop_pb_add_req_to_pending(struct ploop_pushbackup_desc *pbd,
 					struct ploop_request *preq)
 {
-	ploop_pb_add_req_to_tree(preq, &pbd->pending_set);
+	ploop_pb_add_req_to_tree(preq, &pbd->pending_set, OWNER_PB_PENDING_SET);
 }
 
 static void ploop_pb_add_req_to_reported(struct ploop_pushbackup_desc *pbd,
 					 struct ploop_request *preq)
 {
-	ploop_pb_add_req_to_tree(preq, &pbd->reported_set);
+	ploop_pb_add_req_to_tree(preq, &pbd->reported_set, OWNER_PB_REPORTED_SET);
 }
 
 static void remove_req_from_pbs(struct pb_set *pbs,
-					 struct ploop_request *preq)
+				struct ploop_request *preq, unsigned old_owner)
 {
 	unsigned long timeout = preq->plo->tune.push_backup_timeout * HZ;
 	bool oldest_deleted = false;
@@ -524,6 +525,7 @@ static void remove_req_from_pbs(struct pb_set *pbs,
 		oldest_deleted = true;
 
 	rb_erase(&preq->reloc_link, &pbs->tree);
+	preq_dbg_release(preq, old_owner);
 	list_del_init(&preq->list);
 
 	if (timeout && oldest_deleted && !list_empty(&pbs->list) &&
@@ -546,7 +548,8 @@ static inline bool preq_match(struct ploop_request *preq, cluster_t clu,
 /* returns leftmost preq which req_cluster >= clu */
 static struct ploop_request *ploop_pb_get_req_from_tree(struct pb_set *pbs,
 						cluster_t clu, cluster_t len,
-						struct ploop_request **npreq)
+						struct ploop_request **npreq,
+						unsigned old_owner)
 {
 	struct rb_root *tree = &pbs->tree;
 	struct rb_node *n = tree->rb_node;
@@ -566,7 +569,7 @@ static struct ploop_request *ploop_pb_get_req_from_tree(struct pb_set *pbs,
 			if (n)
 				*npreq = rb_entry(n, struct ploop_request,
 						  reloc_link);
-			remove_req_from_pbs(pbs, p);
+			remove_req_from_pbs(pbs, p, old_owner);
 			return p;
 		}
 	}
@@ -582,7 +585,7 @@ static struct ploop_request *ploop_pb_get_req_from_tree(struct pb_set *pbs,
 		n = rb_next(&p->reloc_link);
 		if (n)
 			*npreq = rb_entry(n, struct ploop_request, reloc_link);
-		remove_req_from_pbs(pbs, p);
+		remove_req_from_pbs(pbs, p, old_owner);
 		return p;
 	}
 
@@ -591,7 +594,8 @@ static struct ploop_request *ploop_pb_get_req_from_tree(struct pb_set *pbs,
 
 static struct ploop_request *
 ploop_pb_get_first_req_from_tree(struct pb_set *pbs,
-				 struct ploop_request **npreq)
+				 struct ploop_request **npreq,
+				 unsigned old_owner)
 {
 	struct rb_root *tree = &pbs->tree;
 	static struct ploop_request *p;
@@ -610,27 +614,30 @@ ploop_pb_get_first_req_from_tree(struct pb_set *pbs,
 	}
 
 	p = rb_entry(n, struct ploop_request, reloc_link);
-	remove_req_from_pbs(pbs, p);
+	remove_req_from_pbs(pbs, p, old_owner);
 	return p;
 }
 
 static struct ploop_request *
 ploop_pb_get_first_req_from_pending(struct ploop_pushbackup_desc *pbd)
 {
-	return ploop_pb_get_first_req_from_tree(&pbd->pending_set, NULL);
+	return ploop_pb_get_first_req_from_tree(&pbd->pending_set, NULL,
+						OWNER_PB_PENDING_SET);
 }
 
 static struct ploop_request *
 ploop_pb_get_first_reqs_from_pending(struct ploop_pushbackup_desc *pbd,
 				     struct ploop_request **npreq)
 {
-	return ploop_pb_get_first_req_from_tree(&pbd->pending_set, npreq);
+	return ploop_pb_get_first_req_from_tree(&pbd->pending_set, npreq,
+						OWNER_PB_PENDING_SET);
 }
 
 static struct ploop_request *
 ploop_pb_get_first_req_from_reported(struct ploop_pushbackup_desc *pbd)
 {
-	return ploop_pb_get_first_req_from_tree(&pbd->reported_set, NULL);
+	return ploop_pb_get_first_req_from_tree(&pbd->reported_set, NULL,
+						OWNER_PB_REPORTED_SET);
 }
 
 int ploop_pb_preq_add_pending(struct ploop_pushbackup_desc *pbd,
@@ -731,6 +738,7 @@ unsigned long ploop_pb_stop(struct ploop_pushbackup_desc *pbd, bool do_merge)
 	while (!RB_EMPTY_ROOT(&pbd->pending_set.tree)) {
 		struct ploop_request *preq =
 			ploop_pb_get_first_req_from_pending(pbd);
+		preq_dbg_acquire(preq, OWNER_TEMP_DROP_LIST, WHO_PLOOP_PB_STOP1);
 		list_add(&preq->list, &drop_list);
 		ret++;
 	}
@@ -738,6 +746,7 @@ unsigned long ploop_pb_stop(struct ploop_pushbackup_desc *pbd, bool do_merge)
 	while (!RB_EMPTY_ROOT(&pbd->reported_set.tree)) {
 		struct ploop_request *preq =
 			ploop_pb_get_first_req_from_reported(pbd);
+		preq_dbg_acquire(preq, OWNER_TEMP_DROP_LIST, WHO_PLOOP_PB_STOP2);
 		list_add(&preq->list, &drop_list);
 		ret++;
 	}
@@ -748,9 +757,14 @@ unsigned long ploop_pb_stop(struct ploop_pushbackup_desc *pbd, bool do_merge)
 
 	if (!list_empty(&drop_list) || !ploop_pb_bio_list_empty(pbd)) {
 		struct ploop_device *plo = pbd->plo;
+		struct ploop_request *pr;
 
 		BUG_ON(!plo);
 		spin_lock_irq(&plo->lock);
+		list_for_each_entry(pr, &drop_list, list) {
+			preq_dbg_release(pr, OWNER_TEMP_DROP_LIST);
+			preq_dbg_acquire(pr, OWNER_READY_QUEUE, WHO_PLOOP_PB_STOP3);
+		}
 		list_splice_init(&drop_list, plo->ready_queue.prev);
 		return_bios_back_to_plo(plo, &pbd->bio_pending_list);
 		if (test_bit(PLOOP_S_WAIT_PROCESS, &plo->state))
@@ -832,7 +846,7 @@ int ploop_pb_get_pending(struct ploop_pushbackup_desc *pbd,
 		else
 			npreq = NULL;
 
-		remove_req_from_pbs(&pbd->pending_set, preq);
+		remove_req_from_pbs(&pbd->pending_set, preq, OWNER_PB_PENDING_SET);
 		ploop_pb_add_req_to_reported(pbd, preq);
 
 		(*len_p)++;
@@ -925,13 +939,15 @@ static void ploop_pb_process_extent(struct pb_set *pbs, cluster_t clu,
 				    int *n_found)
 {
 	struct ploop_request *preq, *npreq;
+	unsigned old_owner = n_found ? OWNER_PB_REPORTED_SET : OWNER_PB_PENDING_SET;
 
-	preq = ploop_pb_get_req_from_tree(pbs, clu, len, &npreq);
+	preq = ploop_pb_get_req_from_tree(pbs, clu, len, &npreq, old_owner);
 
 	while (preq) {
 		struct rb_node *n;
 
 		set_bit(PLOOP_REQ_PUSH_BACKUP, &preq->ppb_state);
+		preq_dbg_acquire(preq, OWNER_TEMP_READY_LIST, WHO_PLOOP_PB_PROCESS_EXTENT);
 		list_add(&preq->list, ready_list);
 
 		if (n_found)
@@ -946,7 +962,7 @@ static void ploop_pb_process_extent(struct pb_set *pbs, cluster_t clu,
 			npreq = rb_entry(n, struct ploop_request, reloc_link);
 		else
 			npreq = NULL;
-		remove_req_from_pbs(pbs, preq);
+		remove_req_from_pbs(pbs, preq, old_owner);
 	}
 }
 
@@ -975,8 +991,13 @@ void ploop_pb_put_reported(struct ploop_pushbackup_desc *pbd,
 
 	if (!list_empty(&ready_list)) {
 		struct ploop_device *plo = pbd->plo;
+		struct ploop_request *pr;
 
 		spin_lock_irq(&plo->lock);
+		list_for_each_entry(pr, &ready_list, list) {
+			preq_dbg_release(pr, OWNER_TEMP_READY_LIST);
+			preq_dbg_acquire(pr, OWNER_READY_QUEUE, WHO_PLOOP_PB_PUT_REPORTED);
+		}
 		list_splice(&ready_list, plo->ready_queue.prev);
 		if (test_bit(PLOOP_S_WAIT_PROCESS, &plo->state))
 			wake_up_interruptible(&plo->waitq);
diff --git a/include/linux/ploop/ploop.h b/include/linux/ploop/ploop.h
index c7261c4..1f0e9fa 100644
--- a/include/linux/ploop/ploop.h
+++ b/include/linux/ploop/ploop.h
@@ -561,6 +561,7 @@ struct ploop_request
 
 	unsigned long		state;
 	unsigned long		eng_state;
+	atomic_t		dbg_state;
 	int			error;
 
 	struct map_node		*map;
@@ -619,6 +620,112 @@ struct ploop_request
 	struct ploop_io	       *eng_io;
 };
 
+/* ploop_request->dbg_state types & operations */
+enum {
+	WHO_PLOOP_BIO_QUEUE,
+	WHO_PLOOP_QUIESCE,
+	WHO_PLOOP_MERGE_PROCESS,
+	WHO_PLOOP_RELOCATE,
+	WHO_PLOOP_RELOCBLKS_PROCESS,
+	WHO_CACHED_SUBMIT,
+	WHO_DIO_FSYNC_THREAD,
+	WHO_KAIO_QUEUE_TRUNC_REQ,
+	WHO_KAIO_QUEUE_FSYNC_REQ,
+	WHO_KAIO_FSYNC_THREAD,
+	WHO_KAIO_SUBMIT_ALLOC,
+	WHO_KAIO_ISSUE_FLUSH,
+	WHO_OVERLAP_FORWARD,
+	WHO_OVERLAP_BACKWARD,
+	WHO_INSERT_ENTRY_TREE1,
+	WHO_INSERT_ENTRY_TREE2,
+	WHO_PLOOP_PREQ_DROP,
+	WHO_PLOOP_DISCARD_FINI_IOC,
+	WHO_CHECK_LOCKOUT_PB,
+	WHO_CHECK_LOCKOUT,
+	WHO_PLOOP_COMPLETE_REQUEST1,
+	WHO_PLOOP_COMPLETE_REQUEST2,
+	WHO_PLOOP_FAIL_REQUEST_ENOSPC,
+	WHO_PLOOP_FAIL_REQUEST,
+	WHO_PLOOP_COMPLETE_IO_STATE,
+	WHO_PLOOP_QUEUE_ZERO_REQUEST1,
+	WHO_PLOOP_QUEUE_ZERO_REQUEST2,
+	WHO_PLOOP_ADD_REQ_TO_FSYNC_QUEUE,
+	WHO_PLOOP_E_RELOC_COMPLETE,
+	WHO_PLOOP_COMPLETE_REQ_MERGE,
+	WHO_PLOOP_ENTRY_REQUEST_PB_OUT,
+	WHO_PLOOP_HANDLE_ENOSPC_REQ,
+	WHO_PLOOP_PB_STOP1,
+	WHO_PLOOP_PB_STOP2,
+	WHO_PLOOP_PB_STOP3,
+	WHO_PLOOP_THREAD1,
+	WHO_PLOOP_THREAD2,
+	WHO_PLOOP_START,
+	WHO_MAP_INDEX_FAULT,
+	WHO_MAP_READ_ENDIO,
+	WHO_PLOOP_PB_PUT_REPORTED,
+	WHO_PLOOP_PB_PROCESS_EXTENT,
+	WHO_MAP_MERGE_ENDIO,
+	WHO_PLOOP_READ_MAP,
+	WHO_PLOOP_INDEX_UPDATE1,
+	WHO_PLOOP_INDEX_UPDATE2,
+	WHO_MAP_WB_COMPL_PP1,
+	WHO_MAP_WB_COMPL_PP2,
+	WHO_MAP_WB_COMPL_PP3,
+	WHO_MAP_WB_COMPL_PP4,
+	WHO_MAP_WB_COMPLETE1,
+	WHO_MAP_WB_COMPLETE2,
+	WHO_PLOOP_INDEX_WB_COMPLETE,
+	WHO_PLOOP_PB_ADD_REQ_TO_TREE,
+	WHO_PLOOP_FB_CHECK_RELOC_REQ,
+	WHO_PLOOP_FB_PUT_ZERO_REQ,
+	WHO_PLOOP_FB_INIT,
+};
+
+enum { /* owner */
+	OWNER_FREE_LIST,
+	OWNER_ENTRY_QUEUE,
+	OWNER_READY_QUEUE,
+	OWNER_DIO_FSYNC_QUEUE,
+	OWNER_KAIO_FSYNC_QUEUE,
+	OWNER_PREQ_DELAY_LIST,
+	OWNER_TEMP_DROP_LIST,
+	OWNER_MAP_NODE_IO_QUEUE,
+	OWNER_TEMP_READY_LIST,
+	OWNER_PB_PENDING_SET,
+	OWNER_PB_REPORTED_SET,
+	OWNER_FBD_FREE_ZERO_LIST,
+};
+
+#define PREQ_DBG_STATE(owner, who) (((owner) << 16) | (who))
+#define PREQ_DBG_OWNER(state) ((state) >> 16)
+#define PREQ_DBG_WHO(state) ((state) & 0xffff)
+
+static inline void preq_dbg_acquire(struct ploop_request *preq,
+				    unsigned new_owner, unsigned new_who)
+{
+	unsigned int new_state = PREQ_DBG_STATE(new_owner, new_who);
+	unsigned int old_state = atomic_xchg(&preq->dbg_state, new_state);
+	if (old_state) {
+		printk("preq_dbg_acquire(%p): "
+			"new_owner=%d new_who=%d old_owner=%d "
+		       "old_who=%d\n", preq, new_owner, new_who,
+		       PREQ_DBG_OWNER(old_state), PREQ_DBG_WHO(old_state));
+		dump_stack();
+	}
+}
+
+static inline void preq_dbg_release(struct ploop_request *preq, unsigned owner)
+{
+	unsigned int old_state = atomic_xchg(&preq->dbg_state, 0);
+	if (owner != PREQ_DBG_OWNER(old_state)) {
+		printk("preq_dbg_release(%p): "
+		       "expected owner=%d, but old_owner=%d "
+		       "old_who=%d\n", preq, owner,
+		       PREQ_DBG_OWNER(old_state), PREQ_DBG_WHO(old_state));
+		dump_stack();
+	}
+}
+
 static inline struct ploop_delta * ploop_top_delta(struct ploop_device * plo)
 {
 	return list_empty(&plo->map.delta_list) ? NULL :
@@ -802,8 +909,9 @@ static inline void ploop_acc_flush_skip_locked(struct ploop_device *plo,
 		plo->st.bio_flush_skip++;
 }
 
-static inline void ploop_entry_add(struct ploop_device * plo, struct ploop_request * preq)
+static inline void ploop_entry_add(struct ploop_device * plo, struct ploop_request * preq, unsigned who)
 {
+	preq_dbg_acquire(preq, OWNER_ENTRY_QUEUE, who);
 	list_add_tail(&preq->list, &plo->entry_queue);
 	plo->entry_qlen++;
 	if (test_bit(PLOOP_REQ_SYNC, &preq->state) && (!(preq->req_rw & WRITE) || (preq->req_rw & (REQ_FLUSH|REQ_FUA)))) {


More information about the Devel mailing list