[Devel] [PATCH RHEL7 COMMIT] cbt: new api: blk_cbt_map_merge()
Konstantin Khorenko
khorenko at virtuozzo.com
Fri May 27 02:15:21 PDT 2016
The commit is pushed to "branch-rh7-3.10.0-327.18.2.vz7.14.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-327.18.2.vz7.14.8
------>
commit 9593dfbcdf328c1c9fe5e1f5cb8c11ccef7ddf08
Author: Maxim Patlasov <mpatlasov at virtuozzo.com>
Date: Fri May 27 13:15:20 2016 +0400
cbt: new api: blk_cbt_map_merge()
Patchset description:
ploop: push_backup: preserve CBT mask
The first patch of the set implements a helper to merge a copy of
CBT mask back to CBT: blk_cbt_map_merge().
The second patch implements the following logic: keep a copy of
CBT mask until either userspace reports success/failure of
push_backup or ploop is released ultimately.
If user reports success, free the copy. In all other cases,
the copy is merged back to CBT. This will allow re-do
push_backup if backup tool crashes in the middle.
https://jira.sw.ru/browse/PSBM-47429
Maxim Patlasov (2):
cbt: new api: blk_cbt_map_merge()
ploop: push_backup: merge pbd->cbt_map back to CBT
========================================
This patch description:
New api blk_cbt_map_copy_once() allows to merge a copy -- acquired earlier by
blk_cbt_map_copy_once() -- back to main CBT mask. This is useful to handle
userspace backup tool crashes.
https://jira.sw.ru/browse/PSBM-47429
Signed-off-by: Maxim Patlasov <mpatlasov at virtuozzo.com>
---
block/blk-cbt.c | 67 ++++++++++++++++++++++++++++++++++++++++++++++++++
include/linux/blkdev.h | 3 +++
2 files changed, 70 insertions(+)
diff --git a/block/blk-cbt.c b/block/blk-cbt.c
index 8ba52fb..d635186 100644
--- a/block/blk-cbt.c
+++ b/block/blk-cbt.c
@@ -348,6 +348,73 @@ fail:
}
EXPORT_SYMBOL(blk_cbt_map_copy_once);
+static void blk_cbt_page_merge(struct page *pg_from, struct page *pg_to)
+{
+ u32 *from = page_address(pg_from);
+ u32 *to = page_address(pg_to);
+ u32 *fin = to + PAGE_SIZE/sizeof(*to);
+
+ while (to < fin) {
+ *to |= *from;
+ to++;
+ from++;
+ }
+}
+
+int blk_cbt_map_merge(struct request_queue *q, __u8 *uuid,
+ struct page **map, blkcnt_t block_max,
+ blkcnt_t block_bits)
+{
+ struct cbt_info *cbt;
+ unsigned long i;
+
+ mutex_lock(&cbt_mutex);
+ cbt = q->cbt;
+
+ if (!cbt) {
+ mutex_unlock(&cbt_mutex);
+ return -ENOENT;
+ }
+
+ BUG_ON(!cbt->map);
+ BUG_ON(!cbt->block_max);
+
+ if (!map || !uuid || memcmp(uuid, cbt->uuid, sizeof(cbt->uuid)) ||
+ block_max != cbt->block_max || block_bits != cbt->block_bits) {
+ mutex_unlock(&cbt_mutex);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < NR_PAGES(cbt->block_max); i++) {
+ struct page *page_main = cbt->map[i];
+ struct page *page_addon = map[i];
+
+ BUG_ON(page_main == CBT_PAGE_MISSED);
+ BUG_ON(page_addon == CBT_PAGE_MISSED);
+
+ if (!page_addon)
+ continue;
+
+ if (!page_main) {
+ int ret = cbt_page_alloc(&cbt, i, 0);
+ if (ret) {
+ mutex_unlock(&cbt_mutex);
+ return ret;
+ }
+ page_main = cbt->map[i];
+ BUG_ON(page_main == NULL);
+ BUG_ON(page_main == CBT_PAGE_MISSED);
+ }
+
+ spin_lock_page(page_main);
+ blk_cbt_page_merge(page_addon, page_main);
+ unlock_page(page_main);
+ }
+ mutex_unlock(&cbt_mutex);
+ return 0;
+}
+EXPORT_SYMBOL(blk_cbt_map_merge);
+
void blk_cbt_update_size(struct block_device *bdev)
{
struct request_queue *q;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 56c3a08..c16f4da 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1654,6 +1654,9 @@ extern int blk_cbt_ioctl(struct block_device *bdev, unsigned cmd, char __user *a
extern int blk_cbt_map_copy_once(struct request_queue *q, __u8 *uuid,
struct page ***map_ptr, blkcnt_t *block_max,
blkcnt_t *block_bits);
+extern int blk_cbt_map_merge(struct request_queue *q, __u8 *uuid,
+ struct page **map, blkcnt_t block_max,
+ blkcnt_t block_bits);
#else /* CONFIG_BLK_DEV_CBT */
static inline void blk_cbt_update_size(struct block_device *bdev)
{
More information about the Devel
mailing list