[Devel] [PATCH 3/3] fuse: implement exclusive wakeup for blocked_waitq

Maxim Patlasov mpatlasov at parallels.com
Wed Dec 26 04:45:09 PST 2012


The patch solves thundering herd problem. So far as previous patches ensured
that only allocations for background may block, it's safe to wake up one
waiter. Whoever it is, it will wake up another one in request_end() afterwards.

Signed-off-by: Maxim Patlasov <mpatlasov at parallels.com>
---
 fs/fuse/dev.c |   21 ++++++++++++++++-----
 1 files changed, 16 insertions(+), 5 deletions(-)

diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index c7bef93..7ed2096 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -112,8 +112,8 @@ struct fuse_req *fuse_get_req_internal(struct fuse_conn *fc,
 		int intr;
 
 		block_sigs(&oldset);
-		intr = wait_event_interruptible(fc->blocked_waitq,
-						!fc->blocked);
+		intr = wait_event_interruptible_exclusive(fc->blocked_waitq,
+							  !fc->blocked);
 		restore_sigs(&oldset);
 		err = -EINTR;
 		if (intr)
@@ -224,6 +224,13 @@ struct fuse_req *fuse_get_req_nofail(struct fuse_conn *fc, struct file *file)
 void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
 {
 	if (atomic_dec_and_test(&req->count)) {
+		if (unlikely(req->background)) {
+			spin_lock(&fc->lock);
+			if (!fc->blocked)
+				wake_up(&fc->blocked_waitq);
+			spin_unlock(&fc->lock);
+		}
+
 		if (req->waiting)
 			atomic_dec(&fc->num_waiting);
 
@@ -321,10 +328,14 @@ __releases(fc->lock)
 	list_del(&req->intr_entry);
 	req->state = FUSE_REQ_FINISHED;
 	if (req->background) {
-		if (fc->num_background == fc->max_background) {
+		req->background = 0;
+
+		if (fc->num_background == fc->max_background)
 			fc->blocked = 0;
-			wake_up_all(&fc->blocked_waitq);
-		}
+
+		if (!fc->blocked)
+			wake_up(&fc->blocked_waitq);
+
 		if (fc->num_background == fc->congestion_threshold &&
 		    fc->connected && fc->bdi_initialized) {
 			clear_bdi_congested(&fc->bdi, BLK_RW_SYNC);




More information about the Devel mailing list