[Devel] [PATCH VZ9 01/20] fs/fuse: fix broken 'fuse_invalidate_files()'

Alexey Kuznetsov kuznet at virtuozzo.com
Fri Oct 6 13:42:13 MSK 2023


Use correct lock to protect the kill_requests operation on all request
queues. Test of the FUSE_S_FAIL_IMMEDIATELY bit needs to be done with
corresponding lock to prevent new requests being added to a queue after
it has been killed.

Adjust the order of queues that kill_request operation is performed as
following:

bg_queue -> [ kio queues ] -> input queue -> process queue.

This follows the order that a fuse request could be moved around among
queues. The most import part is that the bg_queue needs to be killed
first because otherwise requests from it could be flushed to input queue
after the input queue has been killed.

Signed-off-by: Liu Kui <Kui.Liu at acronis.com>
Acked-by: Alexey Kuznetsov <kuznet at acronis.com>
---
 fs/fuse/inode.c | 33 +++++++++++++++++++++++----------
 1 file changed, 23 insertions(+), 10 deletions(-)

diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 91b5591..c377700 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -512,15 +512,14 @@ int fuse_invalidate_files(struct fuse_conn *fc, u64 nodeid)
 
 	fi = get_fuse_inode(inode);
 
+	/* Mark that invalidate files is in progress */
 	spin_lock(&fi->lock);
+	set_bit(FUSE_I_INVAL_FILES, &fi->state);
 	list_for_each_entry(ff, &fi->rw_files, rw_entry) {
 		set_bit(FUSE_S_FAIL_IMMEDIATELY, &ff->ff_state);
 	}
 	spin_unlock(&fi->lock);
 
-	/* Mark that invalidate files is in progress */
-	set_bit(FUSE_I_INVAL_FILES, &fi->state);
-
 	/* let them see FUSE_S_FAIL_IMMEDIATELY */
 	wake_up_all(&fc->blocked_waitq);
 
@@ -528,24 +527,38 @@ int fuse_invalidate_files(struct fuse_conn *fc, u64 nodeid)
 	if (!err || err == -EIO) { /* AS_EIO might trigger -EIO */
 		struct fuse_dev *fud;
 		spin_lock(&fc->lock);
+
+		/*
+		 * Clean bg_queue first to prevent requests being flushed
+		 * to an input queue after it has been cleaned .
+		 */
+		spin_lock(&fc->bg_lock);
+		fuse_kill_requests(fc, inode, &fc->bg_queue);
+		spin_unlock(&fc->bg_lock);
+
+		if (fc->kio.op && fc->kio.op->kill_requests)
+			fc->kio.op->kill_requests(fc, inode);
+
+		spin_lock(&fc->main_iq.lock);
+		fuse_kill_requests(fc, inode, &fc->main_iq.pending);
+		spin_unlock(&fc->main_iq.lock);
+
 		list_for_each_entry(fud, &fc->devices, entry) {
 			struct fuse_pqueue *fpq = &fud->pq;
 			struct fuse_iqueue *fiq = fud->fiq;
 			int i;
+
+			spin_lock(&fiq->lock);
+			fuse_kill_requests(fc, inode, &fiq->pending);
+			spin_unlock(&fiq->lock);
+
 			spin_lock(&fpq->lock);
 			for (i = 0; i < FUSE_PQ_HASH_SIZE; i++)
 				fuse_kill_requests(fc, inode, &fpq->processing[i]);
 			fuse_kill_requests(fc, inode, &fpq->io);
 			spin_unlock(&fpq->lock);
 
-			spin_lock(&fiq->waitq.lock);
-			fuse_kill_requests(fc, inode, &fiq->pending);
-			spin_unlock(&fiq->waitq.lock);
 		}
-		fuse_kill_requests(fc, inode, &fc->main_iq.pending);
-		fuse_kill_requests(fc, inode, &fc->bg_queue);
-		if (fc->kio.op && fc->kio.op->kill_requests)
-			fc->kio.op->kill_requests(fc, inode);
 
 		wake_up(&fi->page_waitq); /* readpage[s] can wait on fuse wb */
 		spin_unlock(&fc->lock);
-- 
1.8.3.1



More information about the Devel mailing list