[Devel] [PATCH VZ9] fs/fuse/kio: fix unwarranted warning due to	flush request race
    Liu Kui 
    kui.liu at virtuozzo.com
       
    Wed Oct 15 14:07:58 MSK 2025
    
    
  
The warning in pcs_map_queue_resolve() was triggered not by an expected
bug, but due to a race in processing flush requests originating from the
timer.
Straighten out the map resolve process flow to fix the race by avoiding
repeated lock/unlock of the map lock across several functions.
Additionally, promote the warning condition in pcs_map_queue_resolve() to
an assertion in map_submit() to trigger a coredump if the condition truly
occurs.
Related to: #VSTOR-116825
https://virtuozzo.atlassian.net/browse/VSTOR-116825
Signed-off-by: Liu Kui <kui.liu at virtuozzo.com>
---
 fs/fuse/kio/pcs/pcs_fuse_kdirect.c | 11 ----
 fs/fuse/kio/pcs/pcs_map.c          | 91 +++++++++++++++---------------
 2 files changed, 44 insertions(+), 58 deletions(-)
diff --git a/fs/fuse/kio/pcs/pcs_fuse_kdirect.c b/fs/fuse/kio/pcs/pcs_fuse_kdirect.c
index 5444fb8aeb05..1031c43bbec7 100644
--- a/fs/fuse/kio/pcs/pcs_fuse_kdirect.c
+++ b/fs/fuse/kio/pcs/pcs_fuse_kdirect.c
@@ -601,22 +601,11 @@ int fuse_map_resolve(struct pcs_map_entry *m, int direction)
 	size_t map_sz;
 	int err;
 
-	spin_lock(&m->lock);
-
-	if (m->state & PCS_MAP_DEAD) {
-		spin_unlock(&m->lock);
-		pcs_map_put(m);
-		return 0;
-	}
 	di = pcs_dentry_from_mapping(m->mapping);
 	fm = get_fuse_mount(&di->inode->inode);
 
 	DTRACE("enter m: " MAP_FMT ", dir:%d \n", MAP_ARGS(m),	direction);
 
-	BUG_ON(!(m->state & PCS_MAP_RESOLVING));
-
-	spin_unlock(&m->lock);
-
 	map_sz = sizeof(*map_ioc) + PCS_MAX_CS_CNT * sizeof(struct pcs_cs_info);
 	map_ioc = kzalloc(map_sz, GFP_NOIO);
 	if (!map_ioc)
diff --git a/fs/fuse/kio/pcs/pcs_map.c b/fs/fuse/kio/pcs/pcs_map.c
index 62b083b4947f..8baff296aaa7 100644
--- a/fs/fuse/kio/pcs/pcs_map.c
+++ b/fs/fuse/kio/pcs/pcs_map.c
@@ -1266,23 +1266,14 @@ void pcs_map_complete(struct pcs_map_entry *m, struct pcs_ioc_getmap *omap)
 
 /* Atomically schedule map resolve and push ireq to wait completion */
 static void pcs_map_queue_resolve(struct pcs_map_entry * m, struct pcs_int_request *ireq, int direction)
+__releases(m->lock)
 {
 	LIST_HEAD(l);
 	int ret;
 
-	DTRACE("enter m:%p, state:%x, ireq:%p dir:%d \n", m, m->state, ireq, direction);
-
-	spin_lock(&m->lock);
-	/* This should not happen unless aio_dio/fsync vs truncate race */
-	if (m->state & PCS_MAP_DEAD) {
-		spin_unlock(&m->lock);
+	assert_spin_locked(&m->lock);
 
-		/* If this happens, it's assumed this is a bug that needs to be fixed */
-		WARN_ON_ONCE(1);
-		list_add(&ireq->list, &l);
-		pcs_ireq_queue_fail(&l, PCS_ERR_NET_ABORT);
-		return;
-	}
+	DTRACE("enter m:%p, state:%x, ireq:%p dir:%d \n", m, m->state, ireq, direction);
 	DTRACE("dentry: "DENTRY_FMT, DENTRY_ARGS(pcs_dentry_from_map(m)));
 	DTRACE("%p {%p %p}\n",ireq,  ireq->list.next, ireq->list.prev);
 	BUG_ON(!list_empty(&ireq->list));
@@ -1293,8 +1284,7 @@ static void pcs_map_queue_resolve(struct pcs_map_entry * m, struct pcs_int_reque
 		return;
 	}
 	/* If converting a hole, adjust res_offset */
-	if (direction && !m->cs_list && !(m->state & PCS_MAP_RESOLVING)
-	    && ireq->type == PCS_IREQ_IOCHUNK)
+	if (direction && !m->cs_list && (ireq->type == PCS_IREQ_IOCHUNK))
 		m->res_offset = ireq->iochunk.chunk + ireq->iochunk.offset;
 
 	m->state |= PCS_MAP_RESOLVING;
@@ -1307,8 +1297,10 @@ static void pcs_map_queue_resolve(struct pcs_map_entry * m, struct pcs_int_reque
 	if (ret) {
 		TRACE("map error: %d for " MAP_FMT "\n", ret, MAP_ARGS(m));
 		spin_lock(&m->lock);
-		pcs_map_truncate(m, &l);
-		map_del_lru(m);
+		if (!(m->state & PCS_MAP_DEAD)) {
+			pcs_map_truncate(m, &l);
+			map_del_lru(m);
+		}
 		spin_unlock(&m->lock);
 		pcs_ireq_queue_fail(&l, PCS_ERR_NOMEM);
 		pcs_map_put(m);
@@ -1582,37 +1574,34 @@ static void pcs_cs_wakeup(struct pcs_cs * cs)
 
 		sreq->flags |= IREQ_F_REQUEUED;
 
-		if (sreq->type != PCS_IREQ_FLUSH) {
-			FUSE_KDTRACE(sreq->cc->fc,
-				     "wakeup {%p} cpu%u %d %u/%u " DENTRY_FMT " %llu+%llu",
-				     sreq, smp_processor_id(), cs->cong_queue_len,
-				     cs->in_flight, cs->eff_cwnd, DENTRY_ARGS(sreq->dentry),
-				     sreq->iochunk.chunk + sreq->iochunk.offset,
-				     sreq->iochunk.size);
-			map = pcs_find_get_map(sreq->dentry, sreq->iochunk.chunk +
-						   ((sreq->flags & IREQ_F_MAPPED) ? 0 : sreq->iochunk.offset));
-			if (map) {
-				if (sreq->iochunk.map)
-					pcs_map_put(sreq->iochunk.map);
-				sreq->iochunk.map = map;
-				if (sreq->iochunk.flow) {
-					struct pcs_int_request * preq = sreq->completion_data.parent;
-
-					pcs_flow_confirm(sreq->iochunk.flow, &map->mapping->ftab, preq->apireq.req->type == PCS_REQ_T_WRITE,
-							 preq->apireq.req->pos, preq->apireq.req->size,
-							 &sreq->cc->maps.ftab);
-				}
-				map_submit(map, sreq);
-			} else {
-				map_queue_on_limit(sreq);
+		if (sreq->type == PCS_IREQ_FLUSH) {
+			map = sreq->flushreq.map;
+			map_submit(map, sreq);
+			continue;
+		}
+
+		FUSE_KDTRACE(sreq->cc->fc,
+			     "wakeup {%p} cpu%u %d %u/%u " DENTRY_FMT " %llu+%llu",
+			     sreq, smp_processor_id(), cs->cong_queue_len,
+			     cs->in_flight, cs->eff_cwnd, DENTRY_ARGS(sreq->dentry),
+			     sreq->iochunk.chunk + sreq->iochunk.offset,
+			     sreq->iochunk.size);
+		map = pcs_find_get_map(sreq->dentry, sreq->iochunk.chunk +
+					   ((sreq->flags & IREQ_F_MAPPED) ? 0 : sreq->iochunk.offset));
+		if (map) {
+			if (sreq->iochunk.map)
+				pcs_map_put(sreq->iochunk.map);
+			sreq->iochunk.map = map;
+			if (sreq->iochunk.flow) {
+				struct pcs_int_request * preq = sreq->completion_data.parent;
+
+				pcs_flow_confirm(sreq->iochunk.flow, &map->mapping->ftab, preq->apireq.req->type == PCS_REQ_T_WRITE,
+						 preq->apireq.req->pos, preq->apireq.req->size,
+						 &sreq->cc->maps.ftab);
 			}
+			map_submit(map, sreq);
 		} else {
-			map = sreq->flushreq.map;
-			if (map->state & PCS_MAP_DEAD) {
-				pcs_clear_error(&sreq->error);
-				ireq_complete(sreq);
-			} else
-				map_submit(map, sreq);
+			map_queue_on_limit(sreq);
 		}
 	}
 }
@@ -2474,12 +2463,20 @@ void map_submit(struct pcs_map_entry * m, struct pcs_int_request *ireq)
 		if (ireq->type == PCS_IREQ_IOCHUNK && !(ireq->flags & IREQ_F_MAPPED))
 			ireq->iochunk.hbuf.map_version = m->version;
 
-		if (!(m->state & (1 << direction)) || (m->state & PCS_MAP_DEAD) ||
-		    map_chk_stale(m)) {
+		if (m->state & PCS_MAP_DEAD) {
+			/* This should not happen unless aio_dio/fsync vs truncate race */
+			BUG_ON(ireq->type != PCS_IREQ_FLUSH);
 			spin_unlock(&m->lock);
+			pcs_clear_error(&ireq->error);
+			ireq_complete(ireq);
+			return;
+		}
+
+		if (!(m->state & (1 << direction)) || map_chk_stale(m)) {
 			pcs_map_queue_resolve(m, ireq, direction);
 			return;
 		}
+
 		DTRACE("enter m: " MAP_FMT ", ireq:%p \n", MAP_ARGS(m),	 ireq);
 
 		csl = m->cs_list;
-- 
2.39.5 (Apple Git-154)
    
    
More information about the Devel
mailing list