[Devel] [PATCH RHEL9 COMMIT] dm-qcow2: use kvmalloc for bvec allocation during rq splitting

Konstantin Khorenko khorenko at virtuozzo.com
Fri Jun 27 13:28:01 MSK 2025


The commit is pushed to "branch-rh9-5.14.0-427.55.1.vz9.82.x-ovz" and will appear at git at bitbucket.org:openvz/vzkernel.git
after rh9-5.14.0-427.55.1.vz9.82.1
------>
commit 2db18f05cecc95dd4099a0a3cfeb480c4974bc53
Author: Andrey Zhadchenko <andrey.zhadchenko at virtuozzo.com>
Date:   Fri Jun 27 11:07:13 2025 +0200

    dm-qcow2: use kvmalloc for bvec allocation during rq splitting
    
    as we saw up to 32768 bios in a single request.
    kmalloc with GFP_NOIO simply fails on a loaded system.
    
    [ 1351.057243] kworker/u9:8: page allocation failure: order:6, mode:0x40c00(GFP_NOIO|__GFP_COMP), nodemask=(null),cpuset=/,mems_allowed=0
    [ 1351.058056] CPU: 2 PID: 12720 Comm: kworker/u9:8 ve: / Kdump: loaded Tainted: G               X  -------  ---  5.14.0-427.44.1.vz9.80.39 #1 80.39
    [ 1351.058543] Hardware name: Acronis OpenStack Compute/Virtuozzo, BIOS 1.16.1-1.vz9.2 04/01/2014
    [ 1351.058936] Workqueue: dm-qcow2 do_qcow2_work [dm_qcow2]
    [ 1351.059214] Call Trace:
    [ 1351.059390]  <TASK>
    [ 1351.059560]  dump_stack_lvl+0x34/0x48
    [ 1351.059762]  warn_alloc+0x138/0x160
    [ 1351.059982]  ? __alloc_pages_direct_compact+0xa7/0x280
    [ 1351.060259]  __alloc_pages_slowpath.constprop.0+0x76f/0x7e0
    [ 1351.060543]  __alloc_pages+0x3bb/0x3f0
    [ 1351.060759]  ? create_bvec_from_rq+0x8f/0x130 [dm_qcow2]
    [ 1351.061011]  __kmalloc_large_node+0x79/0x100
    [ 1351.061231]  __kmalloc+0xca/0x140
    [ 1351.061430]  create_bvec_from_rq+0x8f/0x130 [dm_qcow2]
    [ 1351.061679]  do_qcow2_work+0x3cd/0xbc0 [dm_qcow2]
    [ 1351.061925]  ? trigger_cpulimit_balance+0x6b/0x180
    [ 1351.062167]  ? _raw_spin_unlock+0xa/0x30
    [ 1351.062391]  ? finish_task_switch.isra.0+0x8c/0x2a0
    [ 1351.062625]  process_one_work+0x1e5/0x3b0
    [ 1351.062838]  worker_thread+0x50/0x3a0
    [ 1351.063044]  ? __pfx_worker_thread+0x10/0x10
    [ 1351.063321]  kthread+0xe0/0x100
    [ 1351.063516]  ? __pfx_kthread+0x10/0x10
    [ 1351.063713]  ret_from_fork+0x2c/0x50
    [ 1351.063935]  </TASK>
    
    Also fix similar place in qcow2_rw_pages_sync().
    
    https://virtuozzo.atlassian.net/browse/VSTOR-109363
    Signed-off-by: Andrey Zhadchenko <andrey.zhadchenko at virtuozzo.com>
    
    Feature: dm-qcow2: block device over QCOW2 files driver
---
 drivers/md/dm-qcow2-map.c    | 6 +++---
 drivers/md/dm-qcow2-target.c | 4 ++--
 2 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/drivers/md/dm-qcow2-map.c b/drivers/md/dm-qcow2-map.c
index f7cb036bb416e..80552e2ab7342 100644
--- a/drivers/md/dm-qcow2-map.c
+++ b/drivers/md/dm-qcow2-map.c
@@ -3501,8 +3501,8 @@ static struct bio_vec *create_bvec_from_rq(struct request *rq)
 	rq_for_each_bvec(bv, rq, rq_iter)
 		nr_bvec++;
 
-	bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec),
-			     GFP_NOIO);
+	bvec = kvmalloc_array(nr_bvec, sizeof(struct bio_vec),
+			      GFP_NOIO);
 	if (!bvec)
 		goto out;
 
@@ -4188,7 +4188,7 @@ static void qrq_endio(struct qcow2_target *tgt, struct qio *qio,
 	struct request *rq = qrq->rq;
 
 	if (qrq->bvec)
-		kfree(qrq->bvec);
+		kvfree(qrq->bvec);
 	/*
 	 * Here is exit point for rq, and here we handle ENOSPC.
 	 * Embedded qios will be reinitialized like they've just
diff --git a/drivers/md/dm-qcow2-target.c b/drivers/md/dm-qcow2-target.c
index 6e2e583ba0b8b..e4fb1f9359105 100644
--- a/drivers/md/dm-qcow2-target.c
+++ b/drivers/md/dm-qcow2-target.c
@@ -51,7 +51,7 @@ static int qcow2_rw_pages_sync(unsigned int rw, struct qcow2 *qcow2,
 
 	bvec = &bvec_on_stack;
 	if (nr != 1)
-		bvec = kmalloc(nr * sizeof(*bvec), GFP_NOIO);
+		bvec = kvmalloc(nr * sizeof(*bvec), GFP_NOIO);
 	if (!bvec)
 		return -ENOMEM;
 
@@ -81,7 +81,7 @@ static int qcow2_rw_pages_sync(unsigned int rw, struct qcow2 *qcow2,
 	}
 
 	if (bvec != &bvec_on_stack)
-		kfree(bvec);
+		kvfree(bvec);
 	return ret;
 }
 ALLOW_ERROR_INJECTION(qcow2_rw_pages_sync, ERRNO);


More information about the Devel mailing list