[Devel] [PATCH RHEL7 COMMIT] ploop: give aligned regions into fuse fallocate()

Konstantin Khorenko khorenko at virtuozzo.com
Thu Feb 15 17:34:48 MSK 2018


The commit is pushed to "branch-rh7-3.10.0-693.17.1.vz7.43.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-693.17.1.vz7.43.5
------>
commit 05026ad794b6f7012bb9e4e861a1f74b199750de
Author: Andrei Vagin <avagin at openvz.org>
Date:   Thu Feb 15 17:34:48 2018 +0300

    ploop: give aligned regions into fuse fallocate()
    
    fuse fallocate() can have requirements about granularity and alignment
    for regions. For exmple, vstorage requires that all regions have to
    be aligned onto 4096.
    
    A block device has optimal values of granularity and alignment for
    discard requests, but it has to handle unaligned requests too.
    
    Each block device has the discard_zeroes_data attribute, which say
    whether a block device returns zero for discarded block or not.
    
    If we set it to 1, unaligned parts has to be filled in with zeros,
    otherwise we can ignore them.
    
    Signed-off-by: Andrei Vagin <avagin at openvz.org>
---
 drivers/block/ploop/io_kaio.c | 80 +++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 80 insertions(+)

diff --git a/drivers/block/ploop/io_kaio.c b/drivers/block/ploop/io_kaio.c
index 22037f143151..998ad7454a66 100644
--- a/drivers/block/ploop/io_kaio.c
+++ b/drivers/block/ploop/io_kaio.c
@@ -246,6 +246,80 @@ static size_t kaio_kreq_pack(struct kaio_req *kreq, int *nr_segs,
 	return copy;
 }
 
+static int kaio_fill_zero_submit(struct file *file,
+		struct ploop_request *preq, loff_t off, size_t size)
+{
+	struct page *zero_page = ZERO_PAGE(0);
+	int nr_segs = 1, err = -ENOMEM;
+	struct kaio_req *kreq;
+
+	BUG_ON(size > PAGE_SIZE);
+
+	if (size == 0)
+		return 0;
+
+	kreq = kaio_kreq_alloc(preq, &nr_segs);
+	if (!kreq) {
+		PLOOP_REQ_SET_ERROR(preq, -ENOMEM);
+		return err;
+	}
+
+	kreq->bvecs[0].bv_page = zero_page;
+	kreq->bvecs[0].bv_len = size;
+	kreq->bvecs[0].bv_offset = 0;
+	atomic_inc(&preq->io_count);
+
+	err = kaio_kernel_submit(file, kreq, 1, size, off, REQ_WRITE);
+	if (err) {
+		PLOOP_REQ_SET_ERROR(preq, err);
+		ploop_complete_io_request(preq);
+		kfree(kreq);
+		return err;
+	}
+
+	return 0;
+}
+
+static int preprocess_discard_req(struct file *file, struct ploop_request *preq,
+		loff_t *poff, size_t *psize)
+{
+	unsigned int alignment, granularity, zeroes_data;
+	loff_t off = *poff, off_align;
+	size_t size = *psize;
+
+	alignment   = preq->plo->queue->limits.discard_alignment;
+	granularity = preq->plo->queue->limits.discard_granularity;
+	zeroes_data = preq->plo->queue->limits.discard_zeroes_data;
+
+	if (alignment) {
+		off_align = round_up(off, alignment);
+
+		if (zeroes_data &&
+		    kaio_fill_zero_submit(file, preq,
+						off, off_align - off))
+			return -1;
+
+		size -= (off_align - off);
+		off = off_align;
+	}
+
+	if (granularity) {
+		size_t size_align;
+
+		size_align = round_down(size, granularity);
+		if (zeroes_data &&
+		    kaio_fill_zero_submit(file, preq,
+			    off + size_align, size - size_align))
+			return -1;
+
+		size = size_align;
+	}
+
+	*poff = off;
+	*psize = size;
+	return 0;
+}
+
 /*
  * WRITE case:
  *
@@ -284,6 +358,11 @@ static void kaio_sbl_submit(struct file *file, struct ploop_request *preq,
 	ploop_prepare_io_request(preq);
 
 	size <<= 9;
+
+	if ((rw & REQ_DISCARD) &&
+	    preprocess_discard_req(file, preq, &off, &size))
+		goto out;
+
 	while (size > 0) {
 		struct kaio_req *kreq;
 		int nr_segs;
@@ -311,6 +390,7 @@ static void kaio_sbl_submit(struct file *file, struct ploop_request *preq,
 		size -= copy;
 	}
 
+out:
 	kaio_complete_io_request(preq);
 }
 


More information about the Devel mailing list