[Devel] [PATCH RHEL8 COMMIT] dm-qcow2: Introduce memcache for qrq

Konstantin Khorenko khorenko at virtuozzo.com
Fri Sep 10 20:19:19 MSK 2021


The commit is pushed to "branch-rh8-4.18.0-305.3.1.vz8.7.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh8-4.18.0-305.3.1.vz8.7.11
------>
commit 5c97641a95ce22954bef387c2d5c9756ab24e9a0
Author: Kirill Tkhai <ktkhai at virtuozzo.com>
Date:   Fri Sep 10 20:19:19 2021 +0300

    dm-qcow2: Introduce memcache for qrq
    
    Signed-off-by: Kirill Tkhai <ktkhai at virtuozzo.com>
---
 drivers/md/dm-qcow2-map.c    |  4 ++--
 drivers/md/dm-qcow2-target.c | 20 ++++++++++++++++----
 drivers/md/dm-qcow2.h        |  2 ++
 3 files changed, 20 insertions(+), 6 deletions(-)

diff --git a/drivers/md/dm-qcow2-map.c b/drivers/md/dm-qcow2-map.c
index 5fcfb8bffdcb..19b08fd990d5 100644
--- a/drivers/md/dm-qcow2-map.c
+++ b/drivers/md/dm-qcow2-map.c
@@ -4001,7 +4001,7 @@ static void qrq_endio(struct qcow2_target *tgt, struct qio *unused,
 
 	if (qrq->bvec)
 		kfree(qrq->bvec);
-	kfree(qrq);
+	mempool_free(qrq, tgt->qrq_pool);
 	dm_complete_request(rq, bi_status);
 }
 
@@ -4064,7 +4064,7 @@ int qcow2_clone_and_map(struct dm_target *ti, struct request *rq,
 	struct qcow2_rq *qrq;
 	struct qio *qio;
 
-	qrq = kmalloc(sizeof(*qrq) + sizeof(*qio), GFP_ATOMIC);
+	qrq = mempool_alloc(tgt->qrq_pool, GFP_ATOMIC);
 	if (!qrq)
 		return DM_MAPIO_KILL;
 	init_qrq(qrq, rq);
diff --git a/drivers/md/dm-qcow2-target.c b/drivers/md/dm-qcow2-target.c
index d3a99c26f489..30d4275cc21c 100644
--- a/drivers/md/dm-qcow2-target.c
+++ b/drivers/md/dm-qcow2-target.c
@@ -12,6 +12,8 @@ module_param(kernel_sets_dirty_bit, bool, 0444);
 MODULE_PARM_DESC(kernel_sets_dirty_bit,
 		"Dirty bit is set by kernel, not by userspace");
 
+static struct kmem_cache *qrq_cache;
+
 static void qcow2_set_service_operations(struct dm_target *ti, bool allowed)
 {
 	struct qcow2_target *tgt = to_qcow2_target(ti);
@@ -223,9 +225,11 @@ static void qcow2_tgt_destroy(struct qcow2_target *tgt)
 		flush_deferred_activity_all(tgt);
 		/* Now kill the queue */
 		destroy_workqueue(tgt->wq);
-		mempool_destroy(tgt->qio_pool);
 	}
 
+	mempool_destroy(tgt->qio_pool);
+	mempool_destroy(tgt->qrq_pool);
+
 	for (i = 0; i < 2; i++)
 		percpu_ref_exit(&tgt->inflight_ref[i]);
 
@@ -401,9 +405,11 @@ static struct qcow2_target *alloc_qcow2_target(struct dm_target *ti)
 	tgt = kzalloc(sizeof(*tgt), GFP_KERNEL);
 	if (!tgt)
 		return NULL;
+	tgt->qrq_pool = mempool_create_slab_pool(QCOW2_QRQ_POOL_SIZE,
+						 qrq_cache);
 	tgt->qio_pool = mempool_create_kmalloc_pool(MIN_QIOS,
 						    sizeof(struct qio));
-	if (!tgt->qio_pool) {
+	if (!tgt->qrq_pool || !tgt->qio_pool) {
 		ti->error = "Can't create mempool";
 		goto out_target;
 	}
@@ -439,6 +445,7 @@ static struct qcow2_target *alloc_qcow2_target(struct dm_target *ti)
 	destroy_workqueue(tgt->wq);
 out_pool:
 	mempool_destroy(tgt->qio_pool);
+	mempool_destroy(tgt->qrq_pool);
 out_target:
 	kfree(tgt);
 	return NULL;
@@ -721,7 +728,6 @@ static int qcow2_ctr(struct dm_target *ti, unsigned int argc, char **argv)
 	tgt = alloc_qcow2_target(ti);
 	if (!tgt)
 		return -ENOMEM;
-
 	/*
 	 * Userspace passes deltas in bottom, ..., top order,
 	 * but we attach it vise versa: from top to bottom.
@@ -915,9 +921,14 @@ static int __init dm_qcow2_init(void)
 {
 	int ret;
 
+	qrq_cache = kmem_cache_create("qcow2-qrq", sizeof(struct qcow2_rq) +
+				      sizeof(struct qio), 0, 0, NULL);
+	if (!qrq_cache)
+		return -ENOMEM;
+
 	ret = dm_register_target(&qcow2_target);
 	if (ret)
-		DMERR("qcow2 target registration failed: %d", ret);
+		kmem_cache_destroy(qrq_cache);
 
 	return ret;
 }
@@ -925,6 +936,7 @@ static int __init dm_qcow2_init(void)
 static void __exit dm_qcow2_exit(void)
 {
 	dm_unregister_target(&qcow2_target);
+	kmem_cache_destroy(qrq_cache);
 }
 
 module_init(dm_qcow2_init);
diff --git a/drivers/md/dm-qcow2.h b/drivers/md/dm-qcow2.h
index d7d668824b74..0dc65f43df9e 100644
--- a/drivers/md/dm-qcow2.h
+++ b/drivers/md/dm-qcow2.h
@@ -104,6 +104,8 @@ struct md_page {
 
 struct qcow2_target {
 	struct dm_target *ti;
+#define QCOW2_QRQ_POOL_SIZE 512 /* Twice nr_requests from blk_mq_init_sched() */
+	mempool_t *qrq_pool;
 	mempool_t *qio_pool;
 	/*
 	 * start_processing_qrq() is the only place during IO handling,


More information about the Devel mailing list