[Devel] [PATCH 09/10] ploop: Fallocate cluster in cached_submit() during hole reuse
Kirill Tkhai
ktkhai at virtuozzo.com
Tue Mar 26 18:19:23 MSK 2019
__map_extent_bmap() is for raw format, when we don't have
information about presence of a cluster.
Ploop1 must allocate all the space in beginning of cached_submit()
function. Otherwise, we can't control what is going on.
Signed-off-by: Kirill Tkhai <ktkhai at virtuozzo.com>
---
drivers/block/ploop/io_direct.c | 27 +++++++++++++++++++++++----
drivers/block/ploop/io_direct_map.c | 11 +++++++++++
2 files changed, 34 insertions(+), 4 deletions(-)
diff --git a/drivers/block/ploop/io_direct.c b/drivers/block/ploop/io_direct.c
index ad95cce91f6e..1667989c9c3b 100644
--- a/drivers/block/ploop/io_direct.c
+++ b/drivers/block/ploop/io_direct.c
@@ -389,21 +389,29 @@ cached_submit(struct ploop_io *io, iblock_t iblk, struct ploop_request * preq,
loff_t pos, end_pos, start, end;
loff_t clu_siz = cluster_size_in_bytes(plo);
struct bio_iter biter;
- loff_t new_size;
+ loff_t new_size, prealloc;
loff_t used_pos;
bool may_fallocate = dio_may_fallocate(io);
+ bool reusing, once = true;
trace_cached_submit(preq);
pos = (loff_t)iblk << (plo->cluster_log + 9);
end_pos = pos + clu_siz;
used_pos = (loff_t)(io->alloc_head - 1) << (io->plo->cluster_log + 9);
+ reusing = (end_pos <= used_pos);
+
+ if (reusing) {
+ /* Reusing a hole */
+ prealloc = clu_siz;
+ goto try_again;
+ }
file_start_write(io->files.file);
- if (use_prealloc && end_pos > used_pos && may_fallocate) {
+ if (use_prealloc && (end_pos > used_pos) && may_fallocate) {
if (unlikely(io->prealloced_size < used_pos + clu_siz)) {
- loff_t prealloc = end_pos;
+ prealloc = end_pos;
if (prealloc > PLOOP_MAX_PREALLOC(plo))
prealloc = PLOOP_MAX_PREALLOC(plo);
try_again:
@@ -423,7 +431,8 @@ cached_submit(struct ploop_io *io, iblock_t iblk, struct ploop_request * preq,
if (err)
goto end_write;
- io->prealloced_size = pos + prealloc;
+ if (io->prealloced_size < pos + prealloc)
+ io->prealloced_size = pos + prealloc;
}
}
@@ -434,6 +443,16 @@ cached_submit(struct ploop_io *io, iblock_t iblk, struct ploop_request * preq,
if (unlikely(IS_ERR(em))) {
err = PTR_ERR(em);
+ if (err == -ENOENT && once) {
+ /*
+ * Boundary cluster: temporary crutch
+ * before io->alloc_head is reworked
+ * to not be incremented in caller.
+ */
+ once = false;
+ prealloc = clu_siz;
+ goto try_again;
+ }
goto end_write;
}
diff --git a/drivers/block/ploop/io_direct_map.c b/drivers/block/ploop/io_direct_map.c
index 7934bc8ecfb1..84a3986b8769 100644
--- a/drivers/block/ploop/io_direct_map.c
+++ b/drivers/block/ploop/io_direct_map.c
@@ -703,7 +703,18 @@ static struct extent_map *__map_extent_bmap(struct ploop_io *io,
}
if (fieinfo.fi_extents_mapped != 1) {
+ struct ploop_device *plo = io->plo;
ploop_extent_put(em);
+ /*
+ * In case of io_direct we may support discards
+ * in multi-delta case, since all allocated blocks
+ * are added to extent tree. But we follow generic
+ * way, and encode discarded blocks by zeroing
+ * their indexes in maps (ploop1).
+ */
+ if (!test_bit(PLOOP_MAP_IDENTICAL, &plo->map.flags))
+ return ERR_PTR(-ENOENT);
+
ret = fallocate_cluster(io, inode, start_off, len, align_to_clu);
if (!ret)
goto again;
More information about the Devel
mailing list