[Devel] [PATCH RHEL9 COMMIT] fs/fuse kio: avoid acquiring rpc's mutex lock in krpc ioctl operation

Konstantin Khorenko khorenko at virtuozzo.com
Mon Oct 7 20:15:06 MSK 2024


The commit is pushed to "branch-rh9-5.14.0-427.35.1.vz9.76.x-ovz" and will appear at git at bitbucket.org:openvz/vzkernel.git
after rh9-5.14.0-427.35.1.vz9.76.4
------>
commit c6c291e4cfa160ec354a2199dfc34b5ea15ec7d1
Author: Liu Kui <kui.liu at virtuozzo.com>
Date:   Tue Oct 1 13:07:58 2024 +0800

    fs/fuse kio: avoid acquiring rpc's mutex lock in krpc ioctl operation
    
    Acquiring rpc's mutex lock in krpc ioctl operation could potentially
    block the calling userspace process for very long time, thus stall the
    evloop process in case of vstorage client.
    
    https://virtuozzo.atlassian.net/browse/VSTOR-93139
    
    Signed-off-by: Liu Kui <kui.liu at virtuozzo.com>
    Acked-by: Alexey Kuznetsov <kuznet at virtuozzo.com>
---
 fs/fuse/kio/pcs/pcs_cs.c       |  2 +-
 fs/fuse/kio/pcs/pcs_krpc.c     |  2 +-
 fs/fuse/kio/pcs/pcs_rpc.c      | 32 +++++++++++++++++++++++++-------
 fs/fuse/kio/pcs/pcs_rpc.h      |  2 ++
 fs/fuse/kio/pcs/pcs_rpc_clnt.c | 28 ++++++----------------------
 5 files changed, 35 insertions(+), 31 deletions(-)

diff --git a/fs/fuse/kio/pcs/pcs_cs.c b/fs/fuse/kio/pcs/pcs_cs.c
index 2e16b8dabc91..efa69cd352ab 100644
--- a/fs/fuse/kio/pcs/pcs_cs.c
+++ b/fs/fuse/kio/pcs/pcs_cs.c
@@ -1019,7 +1019,7 @@ static void pcs_cs_destroy(struct pcs_cs *cs)
 
 	if (cs->rpc) {
 		cs->rpc->clnt_cs = NULL;
-		pcs_rpc_clnt_close(cs->rpc);
+		pcs_rpc_close(cs->rpc);
 		cs->rpc = NULL;
 	}
 	call_rcu(&cs->rcu, cs_destroy_rcu);
diff --git a/fs/fuse/kio/pcs/pcs_krpc.c b/fs/fuse/kio/pcs/pcs_krpc.c
index 3bc0d0376761..6ab61455641e 100644
--- a/fs/fuse/kio/pcs/pcs_krpc.c
+++ b/fs/fuse/kio/pcs/pcs_krpc.c
@@ -903,7 +903,7 @@ static void __pcs_krpc_destroy(struct pcs_krpc *krpc)
 
 	if (krpc->rpc) {
 		krpc->rpc->clnt_krpc = NULL;
-		pcs_rpc_clnt_close(krpc->rpc);
+		pcs_rpc_close(krpc->rpc);
 		krpc->rpc = NULL;
 	}
 	pcs_krpc_put(krpc);
diff --git a/fs/fuse/kio/pcs/pcs_rpc.c b/fs/fuse/kio/pcs/pcs_rpc.c
index a4f3ae1fdf5b..79254431592a 100644
--- a/fs/fuse/kio/pcs/pcs_rpc.c
+++ b/fs/fuse/kio/pcs/pcs_rpc.c
@@ -265,21 +265,37 @@ void rpc_abort(struct pcs_rpc * ep, int fatal, int error)
 	pcs_rpc_put(ep);
 }
 
-/* Client close. */
-void pcs_rpc_close(struct pcs_rpc * ep)
+static void rpc_close_work(struct work_struct *w)
 {
-	TRACE("pcs_rpc_close");
-	mutex_lock(&ep->mutex);
-	BUG_ON(ep->flags & PCS_RPC_F_DEAD);
-	BUG_ON(ep->flags & PCS_RPC_F_PASSIVE);
+	struct pcs_rpc *ep = container_of(w, struct pcs_rpc, close_work);
 
-	ep->flags |= PCS_RPC_F_DEAD;
+	mutex_lock(&ep->mutex);
 	rpc_abort(ep, 1, PCS_ERR_NET_ABORT);
 	ep->state = PCS_RPC_DESTROY;
 	mutex_unlock(&ep->mutex);
 
 	pcs_rpc_put(ep);
+}
+
+/* Client close. */
+void pcs_rpc_close(struct pcs_rpc *ep)
+{
+	TRACE("pcs_rpc_close, nr_clnts=%d", ep->nr_clnts);
+
+	spin_lock(&ep->clnt_lock);
+	BUG_ON(ep->flags & PCS_RPC_F_DEAD);
 
+	ep->nr_clnts--;
+	if (!ep->nr_clnts) {
+		/* close the rpc if we're the last rpc client */
+		ep->flags |= PCS_RPC_F_DEAD;
+		memset(&ep->peer_id, 0, sizeof(PCS_NODE_ID_T));
+		pcs_rpc_get(ep);
+		queue_work(pcs_cleanup_wq, &ep->close_work);
+	}
+	spin_unlock(&ep->clnt_lock);
+
+	pcs_rpc_put(ep);
 }
 
 void pcs_rpc_attach_new_ep(struct pcs_rpc * ep, struct pcs_rpc_engine * eng)
@@ -300,6 +316,7 @@ void pcs_rpc_attach_new_ep(struct pcs_rpc * ep, struct pcs_rpc_engine * eng)
 	INIT_LIST_HEAD(&ep->lru_link);
 
 	spin_lock_init(&ep->q_lock);
+	spin_lock_init(&ep->clnt_lock);
 	mutex_init(&ep->mutex);
 	ep->accounted = 0;
 	ep->netlat_min = ~0U;
@@ -1000,6 +1017,7 @@ void pcs_rpc_configure_new_ep(struct pcs_rpc * ep, struct pcs_rpc_params *parm,
 	ep->kill_arrow = 0;
 
 	INIT_WORK(&ep->work, rpc_queue_work);
+	INIT_WORK(&ep->close_work, rpc_close_work);
 	INIT_DELAYED_WORK(&ep->timer_work, timer_work);
 	INIT_DELAYED_WORK(&ep->calendar_work, calendar_work);
 
diff --git a/fs/fuse/kio/pcs/pcs_rpc.h b/fs/fuse/kio/pcs/pcs_rpc.h
index 9a651a812cf7..613b711c7822 100644
--- a/fs/fuse/kio/pcs/pcs_rpc.h
+++ b/fs/fuse/kio/pcs/pcs_rpc.h
@@ -153,9 +153,11 @@ struct pcs_rpc
 	struct hlist_head	kill_calendar[RPC_MAX_CALENDAR];
 	struct llist_node	cleanup_node;
 
+	spinlock_t	clnt_lock;	/* protect following field */
 	struct pcs_cs		*clnt_cs;
 	struct pcs_krpc		*clnt_krpc;
 	int nr_clnts;
+	struct work_struct  close_work;
 };
 
 struct pcs_rpc_engine
diff --git a/fs/fuse/kio/pcs/pcs_rpc_clnt.c b/fs/fuse/kio/pcs/pcs_rpc_clnt.c
index bd1eec849abc..cfc81c053254 100644
--- a/fs/fuse/kio/pcs/pcs_rpc_clnt.c
+++ b/fs/fuse/kio/pcs/pcs_rpc_clnt.c
@@ -136,10 +136,10 @@ struct pcs_rpc *pcs_rpc_clnt_create(struct pcs_rpc_engine *eng, PCS_NODE_ID_T *p
 	spin_unlock(&eng->lock);
 
 	if (ep) {
-		mutex_lock(&ep->mutex);
-		if (ep->state != PCS_RPC_DESTROY)
+		spin_lock(&ep->clnt_lock);
+		if (!(ep->flags & PCS_RPC_F_DEAD))
 			goto found;
-		mutex_unlock(&ep->mutex);
+		spin_unlock(&ep->clnt_lock);
 		pcs_rpc_put(ep);
 	}
 
@@ -159,28 +159,12 @@ struct pcs_rpc *pcs_rpc_clnt_create(struct pcs_rpc_engine *eng, PCS_NODE_ID_T *p
 	else
 		ep->flags &= ~PCS_RPC_F_LOCAL;
 
-	mutex_lock(&ep->mutex);
+	spin_lock(&ep->clnt_lock);
+
 found:
 	ep->nr_clnts++;
-	mutex_unlock(&ep->mutex);
+	spin_unlock(&ep->clnt_lock);
 
 	return ep;
 }
 
-void pcs_rpc_clnt_close(struct pcs_rpc *ep)
-{
-	mutex_lock(&ep->mutex);
-	BUG_ON(ep->flags & PCS_RPC_F_DEAD);
-	BUG_ON(ep->flags & PCS_RPC_F_PASSIVE);
-
-	ep->nr_clnts--;
-	if (!ep->nr_clnts) {
-		/* close the rpc if we're the last rpc client */
-		ep->flags |= PCS_RPC_F_DEAD;
-		rpc_abort(ep, 1, PCS_ERR_NET_ABORT);
-		ep->state = PCS_RPC_DESTROY;
-	}
-	mutex_unlock(&ep->mutex);
-
-	pcs_rpc_put(ep);
-}


More information about the Devel mailing list