[Devel] [PATCH rh7 10/12] ploop: Set up discard limits
Kirill Tkhai
ktkhai at virtuozzo.com
Fri Mar 1 18:14:15 MSK 2019
Logical neighbouring blocks usually are not physically
neighbours, so we may issue discard only for a single
block. Thus, we request block layer to split discard
requests in (1 << (cluster_log + 9)) bytes bios.
This is the basis. Later we may introduce some performance
improvements for corner cases on top of this.
Signed-off-by: Kirill Tkhai <ktkhai at virtuozzo.com>
---
drivers/block/ploop/dev.c | 6 +++---
drivers/block/ploop/io_direct.c | 1 +
drivers/block/ploop/io_kaio.c | 5 +----
include/linux/ploop/ploop.h | 11 +++++++++++
4 files changed, 16 insertions(+), 7 deletions(-)
diff --git a/drivers/block/ploop/dev.c b/drivers/block/ploop/dev.c
index 9ac436201b1b..bf508a9b1bc8 100644
--- a/drivers/block/ploop/dev.c
+++ b/drivers/block/ploop/dev.c
@@ -4026,10 +4026,10 @@ static int ploop_start(struct ploop_device * plo, struct block_device *bdev)
blk_queue_merge_bvec(q, ploop_merge_bvec);
blk_queue_flush(q, REQ_FLUSH);
- if (top_delta->io.ops->queue_settings)
- top_delta->io.ops->queue_settings(&top_delta->io, q);
+ top_delta->io.ops->queue_settings(&top_delta->io, q);
+ /* REQ_WRITE_SAME is not supported */
+ blk_queue_max_write_same_sectors(q, 0);
- blk_queue_max_discard_sectors(q, INT_MAX);
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
queue_flag_clear_unlocked(QUEUE_FLAG_STANDBY, q);
diff --git a/drivers/block/ploop/io_direct.c b/drivers/block/ploop/io_direct.c
index b0d7095864fc..18563eaa4f1a 100644
--- a/drivers/block/ploop/io_direct.c
+++ b/drivers/block/ploop/io_direct.c
@@ -1776,6 +1776,7 @@ static int dio_congested(struct ploop_io * io, int bits)
static void dio_queue_settings(struct ploop_io * io, struct request_queue * q)
{
+ ploop_set_discard_limits(io->plo);
blk_queue_stack_limits(q, bdev_get_queue(io->files.bdev));
}
diff --git a/drivers/block/ploop/io_kaio.c b/drivers/block/ploop/io_kaio.c
index f5d9936d4cd4..84da1cec5643 100644
--- a/drivers/block/ploop/io_kaio.c
+++ b/drivers/block/ploop/io_kaio.c
@@ -1078,10 +1078,7 @@ static void kaio_unplug(struct ploop_io * io)
static void kaio_queue_settings(struct ploop_io * io, struct request_queue * q)
{
blk_set_stacking_limits(&q->limits);
- blk_queue_max_write_same_sectors(q, 0);
-
- q->limits.discard_alignment = PAGE_SIZE;
- q->limits.discard_granularity = PAGE_SIZE;
+ ploop_set_discard_limits(io->plo);
}
static void kaio_issue_flush(struct ploop_io * io, struct ploop_request *preq)
diff --git a/include/linux/ploop/ploop.h b/include/linux/ploop/ploop.h
index 71a55573bdf6..b209c9f096a8 100644
--- a/include/linux/ploop/ploop.h
+++ b/include/linux/ploop/ploop.h
@@ -853,6 +853,17 @@ static inline bool whole_block(struct ploop_device * plo, struct ploop_request *
return !(preq->req_sector & (cluster_size_in_sec(plo) - 1));
}
+static inline void ploop_set_discard_limits(struct ploop_device *plo)
+{
+ struct request_queue *q = plo->queue;
+ /*
+ * In PLOOP_FMT_PLOOP1 format, neighbouring virtual clusters
+ * are not neighbours on backed device, so we expect block
+ * subsystem splits discards in single-cluster requests.
+ */
+ q->limits.discard_granularity = cluster_size_in_bytes(plo);
+ q->limits.max_discard_sectors = (1 << plo->cluster_log);
+}
struct map_node;
int ploop_fastmap(struct ploop_map * map, cluster_t block, iblock_t *result);
More information about the Devel
mailing list