[Devel] [PATCH vz7 13/46] fuse: rework abort

Maxim Patlasov mpatlasov at virtuozzo.com
Fri Mar 24 19:17:27 PDT 2017


Backport from ml:

commit 41f982747e8175a4eb5e8d1939bdbb10f435b7f6
Author: Miklos Szeredi <mszeredi at suse.cz>
Date:   Wed Jul 1 16:25:59 2015 +0200

    fuse: rework abort

    Splice fc->pending and fc->processing lists into a common kill list while
    holding fc->lock.

    By the time we release fc->lock, pending and processing lists are empty and
    the io list contains only locked requests.

    Signed-off-by: Miklos Szeredi <mszeredi at suse.cz>
    Reviewed-by: Ashish Samant <ashish.samant at oracle.com>

Signed-off-by: Maxim Patlasov <mpatlasov at virtuozzo.com>
---
 fs/fuse/dev.c |   21 ++++++++++-----------
 1 file changed, 10 insertions(+), 11 deletions(-)

diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index a1e44fe..0091d12 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -2108,9 +2108,6 @@ static void end_polls(struct fuse_conn *fc)
  * asynchronous request and the tricky deadlock (see
  * Documentation/filesystems/fuse.txt).
  *
- * Request progression from one list to the next is prevented by fc->connected
- * being false.
- *
  * Aborting requests under I/O goes as follows: 1: Separate out unlocked
  * requests, they should be finished off immediately.  Locked requests will be
  * finished after unlock; see unlock_request(). 2: Finish off the unlocked
@@ -2123,7 +2120,8 @@ void fuse_abort_conn(struct fuse_conn *fc)
 	spin_lock(&fc->lock);
 	if (fc->connected) {
 		struct fuse_req *req, *next;
-		LIST_HEAD(to_end);
+		LIST_HEAD(to_end1);
+		LIST_HEAD(to_end2);
 
 		fc->connected = 0;
 		fc->blocked = 0;
@@ -2133,19 +2131,20 @@ void fuse_abort_conn(struct fuse_conn *fc)
 			spin_lock(&req->waitq.lock);
 			set_bit(FR_ABORTED, &req->flags);
 			if (!test_bit(FR_LOCKED, &req->flags))
-				list_move(&req->list, &to_end);
+				list_move(&req->list, &to_end1);
 			spin_unlock(&req->waitq.lock);
 		}
-		while (!list_empty(&to_end)) {
-			req = list_first_entry(&to_end, struct fuse_req, list);
+		fc->max_background = UINT_MAX;
+		flush_bg_queue(fc);
+		list_splice_init(&fc->pending, &to_end2);
+		list_splice_init(&fc->processing, &to_end2);
+		while (!list_empty(&to_end1)) {
+			req = list_first_entry(&to_end1, struct fuse_req, list);
 			__fuse_get_request(req);
 			request_end(fc, req);
 			spin_lock(&fc->lock);
 		}
-		fc->max_background = UINT_MAX;
-		flush_bg_queue(fc);
-		end_requests(fc, &fc->pending);
-		end_requests(fc, &fc->processing);
+		end_requests(fc, &to_end2);
 		while (forget_pending(fc))
 			kfree(dequeue_forget(fc, 1, NULL));
 		end_polls(fc);



More information about the Devel mailing list