[Devel] [PATCH vz7 25/46] fuse: no fc->lock for iqueue parts

Maxim Patlasov mpatlasov at virtuozzo.com
Fri Mar 24 19:22:46 PDT 2017


Backport from ml:

commit fd22d62ed0c36e260ac3e13167bc073f28407c70
Author: Miklos Szeredi <mszeredi at suse.cz>
Date:   Wed Jul 1 16:26:03 2015 +0200

    fuse: no fc->lock for iqueue parts

    Remove fc->lock protection from input queue members, now protected by
    fiq->waitq.lock.

    Signed-off-by: Miklos Szeredi <mszeredi at suse.cz>
    Reviewed-by: Ashish Samant <ashish.samant at oracle.com>

Signed-off-by: Maxim Patlasov <mpatlasov at virtuozzo.com>
---
 fs/fuse/dev.c |   72 ++++++++++++++++-----------------------------------------
 1 file changed, 20 insertions(+), 52 deletions(-)

diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 5a65afa..de6731b 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -323,7 +323,6 @@ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
 	forget->forget_one.nodeid = nodeid;
 	forget->forget_one.nlookup = nlookup;
 
-	spin_lock(&fc->lock);
 	spin_lock(&fiq->waitq.lock);
 	if (fiq->connected) {
 		fiq->forget_list_tail->next = forget;
@@ -334,7 +333,6 @@ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
 		kfree(forget);
 	}
 	spin_unlock(&fiq->waitq.lock);
-	spin_unlock(&fc->lock);
 }
 
 static void flush_bg_queue(struct fuse_conn *fc)
@@ -426,13 +424,11 @@ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
 		if (!err)
 			return;
 
-		spin_lock(&fc->lock);
 		set_bit(FR_INTERRUPTED, &req->flags);
 		/* matches barrier in fuse_dev_do_read() */
 		smp_mb__after_atomic();
 		if (test_bit(FR_SENT, &req->flags))
 			queue_interrupt(fiq, req);
-		spin_unlock(&fc->lock);
 	}
 
 	if (!test_bit(FR_FORCE, &req->flags)) {
@@ -443,19 +439,16 @@ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
 		if (!err)
 			return;
 
-		spin_lock(&fc->lock);
 		spin_lock(&fiq->waitq.lock);
 		/* Request is not yet in userspace, bail out */
 		if (test_bit(FR_PENDING, &req->flags)) {
 			list_del(&req->list);
 			spin_unlock(&fiq->waitq.lock);
-			spin_unlock(&fc->lock);
 			__fuse_put_request(req);
 			req->out.h.error = -EINTR;
 			return;
 		}
 		spin_unlock(&fiq->waitq.lock);
-		spin_unlock(&fc->lock);
 	}
 
 	/*
@@ -471,14 +464,11 @@ static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req,
 	struct fuse_iqueue *fiq = &fc->iq;
 
 	BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
-	spin_lock(&fc->lock);
 	spin_lock(&fiq->waitq.lock);
 	if (!fiq->connected) {
-		spin_unlock(&fc->lock);
 		spin_unlock(&fiq->waitq.lock);
 		req->out.h.error = -ENOTCONN;
 	} else if (ff && test_bit(FUSE_S_FAIL_IMMEDIATELY, &ff->ff_state)) {
-		spin_unlock(&fc->lock);
 		spin_unlock(&fiq->waitq.lock);
 		req->out.h.error = -EIO;
 	} else {
@@ -488,7 +478,6 @@ static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req,
 		   after request_end() */
 		__fuse_get_request(req);
 		spin_unlock(&fiq->waitq.lock);
-		spin_unlock(&fc->lock);
 
 		request_wait_answer(fc, req);
 		/* Pairs with smp_wmb() in request_end() */
@@ -569,14 +558,12 @@ static int fuse_request_send_notify_reply(struct fuse_conn *fc,
 
 	__clear_bit(FR_ISREPLY, &req->flags);
 	req->in.h.unique = unique;
-	spin_lock(&fc->lock);
 	spin_lock(&fiq->waitq.lock);
 	if (fiq->connected) {
 		queue_request(fiq, req);
 		err = 0;
 	}
 	spin_unlock(&fiq->waitq.lock);
-	spin_unlock(&fc->lock);
 
 	return err;
 }
@@ -1050,13 +1037,10 @@ static int request_pending(struct fuse_iqueue *fiq)
 }
 
 /* Wait until a request is available on the pending list */
-static void request_wait(struct fuse_conn *fc)
-__releases(fc->iq.waitq.lock)
-__releases(fc->lock)
-__acquires(fc->lock)
-__acquires(fc->iq.waitq.lock)
+static void request_wait(struct fuse_iqueue *fiq)
+__releases(fiq->waitq.lock)
+__acquires(fiq->waitq.lock)
 {
-	struct fuse_iqueue *fiq = &fc->iq;
 	DECLARE_WAITQUEUE(wait, current);
 
 	add_wait_queue_exclusive(&fiq->waitq, &wait);
@@ -1066,9 +1050,7 @@ __acquires(fc->iq.waitq.lock)
 			break;
 
 		spin_unlock(&fiq->waitq.lock);
-		spin_unlock(&fc->lock);
 		schedule();
-		spin_lock(&fc->lock);
 		spin_lock(&fiq->waitq.lock);
 	}
 	set_current_state(TASK_RUNNING);
@@ -1081,14 +1063,13 @@ __acquires(fc->iq.waitq.lock)
  * Unlike other requests this is assembled on demand, without a need
  * to allocate a separate fuse_req structure.
  *
- * Called with fc->lock held, releases it
+ * Called with fiq->waitq.lock held, releases it
  */
-static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_copy_state *cs,
+static int fuse_read_interrupt(struct fuse_iqueue *fiq,
+			       struct fuse_copy_state *cs,
 			       size_t nbytes, struct fuse_req *req)
-__releases(fc->iq.waitq.lock)
-__releases(fc->lock)
+__releases(fiq->waitq.lock)
 {
-	struct fuse_iqueue *fiq = &fc->iq;
 	struct fuse_in_header ih;
 	struct fuse_interrupt_in arg;
 	unsigned reqsize = sizeof(ih) + sizeof(arg);
@@ -1104,7 +1085,6 @@ __releases(fc->lock)
 	arg.unique = req->in.h.unique;
 
 	spin_unlock(&fiq->waitq.lock);
-	spin_unlock(&fc->lock);
 	if (nbytes < reqsize)
 		return -EINVAL;
 
@@ -1138,14 +1118,12 @@ static struct fuse_forget_link *dequeue_forget(struct fuse_iqueue *fiq,
 	return head;
 }
 
-static int fuse_read_single_forget(struct fuse_conn *fc,
+static int fuse_read_single_forget(struct fuse_iqueue *fiq,
 				   struct fuse_copy_state *cs,
 				   size_t nbytes)
-__releases(fc->iq.waitq.lock)
-__releases(fc->lock)
+__releases(fiq->waitq.lock)
 {
 	int err;
-	struct fuse_iqueue *fiq = &fc->iq;
 	struct fuse_forget_link *forget = dequeue_forget(fiq, 1, NULL);
 	struct fuse_forget_in arg = {
 		.nlookup = forget->forget_one.nlookup,
@@ -1158,7 +1136,6 @@ __releases(fc->lock)
 	};
 
 	spin_unlock(&fiq->waitq.lock);
-	spin_unlock(&fc->lock);
 	kfree(forget);
 	if (nbytes < ih.len)
 		return -EINVAL;
@@ -1174,16 +1151,14 @@ __releases(fc->lock)
 	return ih.len;
 }
 
-static int fuse_read_batch_forget(struct fuse_conn *fc,
+static int fuse_read_batch_forget(struct fuse_iqueue *fiq,
 				   struct fuse_copy_state *cs, size_t nbytes)
-__releases(fc->iq.waitq.lock)
-__releases(fc->lock)
+__releases(fiq->waitq.lock)
 {
 	int err;
 	unsigned max_forgets;
 	unsigned count;
 	struct fuse_forget_link *head;
-	struct fuse_iqueue *fiq = &fc->iq;
 	struct fuse_batch_forget_in arg = { .count = 0 };
 	struct fuse_in_header ih = {
 		.opcode = FUSE_BATCH_FORGET,
@@ -1193,14 +1168,12 @@ __releases(fc->lock)
 
 	if (nbytes < ih.len) {
 		spin_unlock(&fiq->waitq.lock);
-		spin_unlock(&fc->lock);
 		return -EINVAL;
 	}
 
 	max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
 	head = dequeue_forget(fiq, max_forgets, &count);
 	spin_unlock(&fiq->waitq.lock);
-	spin_unlock(&fc->lock);
 
 	arg.count = count;
 	ih.len += count * sizeof(struct fuse_forget_one);
@@ -1227,17 +1200,15 @@ __releases(fc->lock)
 	return ih.len;
 }
 
-static int fuse_read_forget(struct fuse_conn *fc, struct fuse_copy_state *cs,
+static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq,
+			    struct fuse_copy_state *cs,
 			    size_t nbytes)
-__releases(fc->iq.waitq.lock)
-__releases(fc->lock)
+__releases(fiq->waitq.lock)
 {
-	struct fuse_iqueue *fiq = &fc->iq;
-
 	if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL)
-		return fuse_read_single_forget(fc, cs, nbytes);
+		return fuse_read_single_forget(fiq, cs, nbytes);
 	else
-		return fuse_read_batch_forget(fc, cs, nbytes);
+		return fuse_read_batch_forget(fiq, cs, nbytes);
 }
 
 /*
@@ -1259,14 +1230,13 @@ static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file,
 	unsigned reqsize;
 
  restart:
-	spin_lock(&fc->lock);
 	spin_lock(&fiq->waitq.lock);
 	err = -EAGAIN;
 	if ((file->f_flags & O_NONBLOCK) && fiq->connected &&
 	    !request_pending(fiq))
 		goto err_unlock;
 
-	request_wait(fc);
+	request_wait(fiq);
 	err = -ENODEV;
 	if (!fiq->connected)
 		goto err_unlock;
@@ -1277,12 +1247,12 @@ static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file,
 	if (!list_empty(&fiq->interrupts)) {
 		req = list_entry(fiq->interrupts.next, struct fuse_req,
 				 intr_entry);
-		return fuse_read_interrupt(fc, cs, nbytes, req);
+		return fuse_read_interrupt(fiq, cs, nbytes, req);
 	}
 
 	if (forget_pending(fiq)) {
 		if (list_empty(&fiq->pending) || fiq->forget_batch-- > 0)
-			return fuse_read_forget(fc, cs, nbytes);
+			return fuse_read_forget(fc, fiq, cs, nbytes);
 
 		if (fiq->forget_batch <= -8)
 			fiq->forget_batch = 16;
@@ -1293,6 +1263,7 @@ static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file,
 	list_del_init(&req->list);
 	spin_unlock(&fiq->waitq.lock);
 
+	spin_lock(&fc->lock);
 	list_add(&req->list, &fc->io);
 
 	in = &req->in;
@@ -1339,7 +1310,6 @@ static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file,
 
  err_unlock:
 	spin_unlock(&fiq->waitq.lock);
-	spin_unlock(&fc->lock);
 	return err;
 }
 
@@ -2091,14 +2061,12 @@ static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
 	fiq = &fc->iq;
 	poll_wait(file, &fiq->waitq, wait);
 
-	spin_lock(&fc->lock);
 	spin_lock(&fiq->waitq.lock);
 	if (!fiq->connected)
 		mask = POLLERR;
 	else if (request_pending(fiq))
 		mask |= POLLIN | POLLRDNORM;
 	spin_unlock(&fiq->waitq.lock);
-	spin_unlock(&fc->lock);
 
 	return mask;
 }



More information about the Devel mailing list