[Devel] [PATCH RHEL9 COMMIT] fs/fuse: pcs: corrupted writes in encrypted journalless mode
Konstantin Khorenko
khorenko at virtuozzo.com
Fri Apr 26 19:00:43 MSK 2024
The commit is pushed to "branch-rh9-5.14.0-362.8.1.vz9.35.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh9-5.14.0-362.8.1.vz9.35.17
------>
commit 678b95f5ab2c292376f3079425d47711375ef541
Author: Alexey Kuznetsov <kuznet at virtuozzo.com>
Date: Fri Apr 26 23:19:02 2024 +0800
fs/fuse: pcs: corrupted writes in encrypted journalless mode
It is a shame. The bug is stupid and effect is disasterous.
khorenko@: Previously if we had a big write request and have to split it
in 64Kb blocks, init_crypted_data() for every splitted block took FIRST
64K from the request and encrypted/wrote it.
https://pmc.acronis.work/browse/VSTOR-84977
Fixes: 61973b1b2da3 ("fuse: cs acceleration for writes")
Signed-off-by: Alexey Kuznetsov <kuznet at acronis.com>
Feature: vStorage
---
fs/fuse/kio/pcs/pcs_cs_accel.c | 43 ++++++++++++++++++++++++++++++++++--------
1 file changed, 35 insertions(+), 8 deletions(-)
diff --git a/fs/fuse/kio/pcs/pcs_cs_accel.c b/fs/fuse/kio/pcs/pcs_cs_accel.c
index a18069f22d85..9be94a610aaa 100644
--- a/fs/fuse/kio/pcs/pcs_cs_accel.c
+++ b/fs/fuse/kio/pcs/pcs_cs_accel.c
@@ -932,7 +932,8 @@ static int init_crypted_data(struct pcs_int_request * ireq, int idx)
struct pcs_int_request *parent = ireq->completion_data.parent;
struct pcs_fuse_req * r;
struct bio_vec * bvec;
- int n, nvec;
+ int n, nvec, first;
+ unsigned int off, start_off, end_off;
u64 pos;
u64 chunk_id;
struct pcs_csa_context * csa_ctx;
@@ -951,14 +952,38 @@ static int init_crypted_data(struct pcs_int_request * ireq, int idx)
if (!bvec)
return -ENOMEM;
- for (n = 0; n < nvec; n++) {
- bvec[n] = r->exec.io.bvec[n];
- if ((bvec[n].bv_offset|bvec[n].bv_len)&511)
+ off = 0;
+ start_off = ireq->iochunk.dio_offset;
+ end_off = start_off + ireq->iochunk.size;
+
+ for (first = 0; first < nvec; first++) {
+ struct bio_vec *v = r->exec.io.bvec + first;
+ unsigned int next = off + v->bv_len;
+
+ if (next > start_off)
+ break;
+ off = next;
+ }
+
+ for (n = 0; n < nvec - first && off < end_off; n++) {
+ struct bio_vec *v = r->exec.io.bvec + first + n;
+ unsigned int next = off + v->bv_len;
+
+ bvec[n] = *v;
+ if (off < start_off) {
+ bvec[n].bv_offset += start_off - off;
+ bvec[n].bv_len -= start_off - off;
+ }
+ if (next > end_off)
+ bvec[n].bv_len -= next - end_off;
+ if ((bvec[n].bv_offset | bvec[n].bv_len) & 511)
goto out;
bvec[n].bv_page = alloc_page(GFP_NOIO);
if (!bvec[n].bv_page)
goto out;
+ off = next;
}
+ nvec = n;
rcu_read_lock();
csa_ctx = rcu_dereference(ireq->iochunk.csl->cs[idx].cslink.cs->csa_ctx);
@@ -969,17 +994,19 @@ static int init_crypted_data(struct pcs_int_request * ireq, int idx)
pos = ireq->iochunk.offset;
chunk_id = ireq->iochunk.map->id;
- for (n = 0; n < nvec; n++) {
+ for (n = 0; n < nvec; n++, first++) {
if (tfm->base.base.__crt_alg->cra_priority == 400)
- encrypt_page_ctr(tfm, bvec[n].bv_page, r->exec.io.bvec[n].bv_page, bvec[n].bv_offset, bvec[n].bv_len, pos, chunk_id);
+ encrypt_page_ctr(tfm, bvec[n].bv_page, r->exec.io.bvec[first].bv_page,
+ bvec[n].bv_offset, bvec[n].bv_len, pos, chunk_id);
else
- encrypt_page_xts(tfm, bvec[n].bv_page, r->exec.io.bvec[n].bv_page, bvec[n].bv_offset, bvec[n].bv_len, pos, chunk_id);
+ encrypt_page_xts(tfm, bvec[n].bv_page, r->exec.io.bvec[first].bv_page,
+ bvec[n].bv_offset, bvec[n].bv_len, pos, chunk_id);
pos += bvec[n].bv_len;
}
rcu_read_unlock();
ireq->iochunk.acr.awr[idx].bvec_copy = bvec;
- ireq->iochunk.acr.awr[idx].num_copy_bvecs = n;
+ ireq->iochunk.acr.awr[idx].num_copy_bvecs = nvec;
return 0;
out:
More information about the Devel
mailing list