[Devel] [PATCH 1/8] fuse: introduce fc->bg_lock

Kirill Tkhai ktkhai at virtuozzo.com
Wed Apr 3 18:37:06 MSK 2019


ms commit ae2dffa39485

To reduce contention of fc->lock, this patch introduces bg_lock for
protection of fields related to background queue. These are:
max_background, congestion_threshold, num_background, active_background,
bg_queue and blocked.

This allows next patch to make async reads not requiring fc->lock, so async
reads and writes will have better performance executed in parallel.

Signed-off-by: Kirill Tkhai <ktkhai at virtuozzo.com>
Signed-off-by: Miklos Szeredi <mszeredi at redhat.com>
---
 fs/fuse/control.c                  |    8 ++++----
 fs/fuse/dev.c                      |   24 ++++++++++++++----------
 fs/fuse/file.c                     |    2 +-
 fs/fuse/fuse_i.h                   |    8 ++++++--
 fs/fuse/inode.c                    |    5 +++++
 fs/fuse/kio/pcs/pcs_fuse_kdirect.c |   18 +++++++++---------
 6 files changed, 39 insertions(+), 26 deletions(-)

diff --git a/fs/fuse/control.c b/fs/fuse/control.c
index db733f4318d4..254f048359a8 100644
--- a/fs/fuse/control.c
+++ b/fs/fuse/control.c
@@ -126,12 +126,12 @@ static ssize_t fuse_conn_max_background_write(struct file *file,
 	if (ret > 0) {
 		struct fuse_conn *fc = fuse_ctl_file_conn_get(file);
 		if (fc) {
-			spin_lock(&fc->lock);
+			spin_lock(&fc->bg_lock);
 			fc->max_background = val;
 			fc->blocked = fc->num_background >= fc->max_background;
 			if (!fc->blocked)
 				wake_up(&fc->blocked_waitq);
-			spin_unlock(&fc->lock);
+			spin_unlock(&fc->bg_lock);
 			fuse_conn_put(fc);
 		}
 	}
@@ -172,7 +172,7 @@ static ssize_t fuse_conn_congestion_threshold_write(struct file *file,
 	if (!fc)
 		goto out;
 
-	spin_lock(&fc->lock);
+	spin_lock(&fc->bg_lock);
 	fc->congestion_threshold = val;
 	if (fc->sb) {
 		if (fc->num_background < fc->congestion_threshold) {
@@ -183,7 +183,7 @@ static ssize_t fuse_conn_congestion_threshold_write(struct file *file,
 			set_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC);
 		}
 	}
-	spin_unlock(&fc->lock);
+	spin_unlock(&fc->bg_lock);
 	fuse_conn_put(fc);
 out:
 	return ret;
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 269753235c60..1ee112b1cd7e 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -310,10 +310,10 @@ void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
 			 * We get here in the unlikely case that a background
 			 * request was allocated but not sent
 			 */
-			spin_lock(&fc->lock);
+			spin_lock(&fc->bg_lock);
 			if (!fc->blocked)
 				wake_up(&fc->blocked_waitq);
-			spin_unlock(&fc->lock);
+			spin_unlock(&fc->bg_lock);
 		}
 
 		if (test_bit(FR_WAITING, &req->flags)) {
@@ -441,7 +441,7 @@ void request_end(struct fuse_conn *fc, struct fuse_req *req)
 	WARN_ON(test_bit(FR_PENDING, &req->flags));
 	WARN_ON(test_bit(FR_SENT, &req->flags));
 	if (bg) {
-		spin_lock(&fc->lock);
+		spin_lock(&fc->bg_lock);
 		clear_bit(FR_BACKGROUND, &req->flags);
 		if (fc->num_background == fc->max_background) {
 			fc->blocked = 0;
@@ -465,7 +465,7 @@ void request_end(struct fuse_conn *fc, struct fuse_req *req)
 		fc->num_background--;
 		fc->active_background--;
 		flush_bg_queue(fc, fiq);
-		spin_unlock(&fc->lock);
+		spin_unlock(&fc->bg_lock);
 	}
 	if (req->end) {
 		req->end(fc, req);
@@ -603,8 +603,8 @@ EXPORT_SYMBOL_GPL(fuse_request_send);
  *
  * fc->connected must have been checked previously
  */
-void fuse_request_send_background_locked(struct fuse_conn *fc,
-					 struct fuse_req *req)
+void fuse_request_send_background_nocheck(struct fuse_conn *fc,
+					  struct fuse_req *req)
 {
 	struct fuse_iqueue *fiq = req->fiq;
 
@@ -615,6 +615,7 @@ void fuse_request_send_background_locked(struct fuse_conn *fc,
 		atomic_inc(&fc->num_waiting);
 	}
 	__set_bit(FR_ISREPLY, &req->flags);
+	spin_lock(&fc->bg_lock);
 	fc->num_background++;
 	if (fc->num_background == fc->max_background)
 		fc->blocked = 1;
@@ -630,12 +631,13 @@ void fuse_request_send_background_locked(struct fuse_conn *fc,
 		req->in.h.unique = fuse_get_unique(fiq);
 		queue_request(fiq, req);
 		spin_unlock(&fiq->waitq.lock);
-
-		return;
+		goto unlock;
 	}
 
 	list_add_tail(&req->list, &fc->bg_queue);
 	flush_bg_queue(fc, fiq);
+unlock:
+	spin_unlock(&fc->bg_lock);
 }
 
 void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
@@ -656,7 +658,7 @@ void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
 		spin_unlock(&fc->lock);
 		request_end(fc, req);
 	} else if (fc->connected) {
-		fuse_request_send_background_locked(fc, req);
+		fuse_request_send_background_nocheck(fc, req);
 		spin_unlock(&fc->lock);
 	} else {
 		spin_unlock(&fc->lock);
@@ -2283,7 +2285,6 @@ void fuse_abort_conn(struct fuse_conn *fc)
 		int i;
 
 		fc->connected = 0;
-		fc->blocked = 0;
 		fuse_set_initialized(fc);
 		list_for_each_entry(fud, &fc->devices, entry) {
 			struct fuse_pqueue *fpq = &fud->pq;
@@ -2308,10 +2309,13 @@ void fuse_abort_conn(struct fuse_conn *fc)
 		if (fc->kio.op)
 			fc->kio.op->conn_abort(fc);
 
+		spin_lock(&fc->bg_lock);
+		fc->blocked = 0;
 		fc->max_background = UINT_MAX;
 		for_each_online_cpu(cpu)
 			flush_bg_queue(fc, per_cpu_ptr(fc->iqs, cpu));
 		flush_bg_queue(fc, &fc->main_iq);
+		spin_unlock(&fc->bg_lock);
 
 		for_each_online_cpu(cpu)
 			fuse_abort_iqueue(per_cpu_ptr(fc->iqs, cpu), &to_end2);
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index bed0db32c23a..edc314bd7156 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1957,7 +1957,7 @@ __acquires(fc->lock)
 
 	req->in.args[1].size = inarg->size;
 	fi->writectr++;
-	fuse_request_send_background_locked(fc, req);
+	fuse_request_send_background_nocheck(fc, req);
 	return;
 
  out_free:
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index f55c0e9c6e3e..a6eb566f8789 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -603,6 +603,10 @@ struct fuse_conn {
 	/** The list of background requests set aside for later queuing */
 	struct list_head bg_queue;
 
+	/** Protects: max_background, congestion_threshold, num_background,
+	 * active_background, bg_queue, blocked */
+	spinlock_t bg_lock;
+
 	/** Flag indicating that INIT reply has been received. Allocating
 	 * any fuse request will be suspended until the flag is set */
 	int initialized;
@@ -975,8 +979,8 @@ void fuse_request_check_and_send(struct fuse_conn *fc, struct fuse_req *req,
  */
 void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req);
 
-void fuse_request_send_background_locked(struct fuse_conn *fc,
-					 struct fuse_req *req);
+void fuse_request_send_background_nocheck(struct fuse_conn *fc,
+					  struct fuse_req *req);
 
 /* Abort all requests */
 void fuse_abort_conn(struct fuse_conn *fc);
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 9240de624797..6a3f1b40e9a1 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -447,7 +447,9 @@ int fuse_invalidate_files(struct fuse_conn *fc, u64 nodeid)
 			spin_unlock(&fiq->waitq.lock);
 		}
 		fuse_kill_requests(fc, inode, &fc->main_iq.pending);
+		spin_lock(&fc->bg_lock);
 		fuse_kill_requests(fc, inode, &fc->bg_queue);
+		spin_unlock(&fc->bg_lock);
 		if (fc->kio.op && fc->kio.op->kill_requests)
 			fc->kio.op->kill_requests(fc, inode);
 
@@ -796,6 +798,7 @@ int fuse_conn_init(struct fuse_conn *fc)
 	int cpu;
 	memset(fc, 0, sizeof(*fc));
 	spin_lock_init(&fc->lock);
+	spin_lock_init(&fc->bg_lock);
 	mutex_init(&fc->inst_mutex);
 	init_rwsem(&fc->killsb);
 	atomic_set(&fc->count, 1);
@@ -1053,6 +1056,7 @@ static void process_init_limits(struct fuse_conn *fc, struct fuse_init_out *arg)
 	sanitize_global_limit(&max_user_bgreq);
 	sanitize_global_limit(&max_user_congthresh);
 
+	spin_lock(&fc->bg_lock);
 	if (arg->max_background) {
 		fc->max_background = arg->max_background;
 
@@ -1066,6 +1070,7 @@ static void process_init_limits(struct fuse_conn *fc, struct fuse_init_out *arg)
 		    fc->congestion_threshold > max_user_congthresh)
 			fc->congestion_threshold = max_user_congthresh;
 	}
+	spin_unlock(&fc->bg_lock);
 }
 
 static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
diff --git a/fs/fuse/kio/pcs/pcs_fuse_kdirect.c b/fs/fuse/kio/pcs/pcs_fuse_kdirect.c
index 010f90621df0..e0a8f6364f53 100644
--- a/fs/fuse/kio/pcs/pcs_fuse_kdirect.c
+++ b/fs/fuse/kio/pcs/pcs_fuse_kdirect.c
@@ -984,10 +984,10 @@ static void pcs_fuse_submit(struct pcs_fuse_cluster *pfc, struct fuse_req *req,
 	DTRACE("do fuse_request_end req:%p op:%d err:%d\n", &r->req, r->req.in.h.opcode, r->req.out.h.error);
 
 	if (lk)
-		spin_unlock(&pfc->fc->lock);
+		spin_unlock(&pfc->fc->bg_lock);
 	request_end(pfc->fc, &r->req);
 	if (lk)
-		spin_lock(&pfc->fc->lock);
+		spin_lock(&pfc->fc->bg_lock);
 	return;
 
 submit:
@@ -1126,10 +1126,10 @@ static int pcs_kio_classify_req(struct fuse_conn *fc, struct fuse_req *req, bool
 		if (!(inarg->valid & FATTR_SIZE))
 			return 1;
 		if (lk)
-			spin_unlock(&fc->lock);
+			spin_unlock(&fc->bg_lock);
 		pcs_kio_setattr_handle(fi, req);
 		if (lk)
-			spin_lock(&fc->lock);
+			spin_lock(&fc->bg_lock);
 		return 1;
 	}
 	case FUSE_IOCTL: {
@@ -1194,10 +1194,10 @@ static int kpcs_req_send(struct fuse_conn* fc, struct fuse_req *req, bool bg, bo
 			__clear_bit(FR_PENDING, &req->flags);
 			req->out.h.error = ret;
 			if (lk)
-				spin_unlock(&fc->lock);
+				spin_unlock(&fc->bg_lock);
 			request_end(fc, req);
 			if (lk)
-				spin_lock(&fc->lock);
+				spin_lock(&fc->bg_lock);
 			return 0;
 		}
 		return 1;
@@ -1207,10 +1207,10 @@ static int kpcs_req_send(struct fuse_conn* fc, struct fuse_req *req, bool bg, bo
 	if (!bg)
 		atomic_inc(&req->count);
 	else if (!lk) {
-		spin_lock(&fc->lock);
+		spin_lock(&fc->bg_lock);
 		if (fc->num_background + 1 >= fc->max_background ||
 		    !fc->connected) {
-			spin_unlock(&fc->lock);
+			spin_unlock(&fc->bg_lock);
 			return 1;
 		}
 		fc->num_background++;
@@ -1221,7 +1221,7 @@ static int kpcs_req_send(struct fuse_conn* fc, struct fuse_req *req, bool bg, bo
 			set_bdi_congested(&fc->bdi, BLK_RW_SYNC);
 			set_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
 		}
-		spin_unlock(&fc->lock);
+		spin_unlock(&fc->bg_lock);
 	}
 	__clear_bit(FR_PENDING, &req->flags);
 



More information about the Devel mailing list