[Devel] [PATCH rh7 2/2] ploop: push_backup: improve ioctl(PLOOP_IOC_PUSH_BACKUP_IO)
Maxim Patlasov
mpatlasov at virtuozzo.com
Tue May 17 17:05:17 PDT 2016
The patch substitutes pbd->ppb_offset with pbd->reported_map, the bitmask
of cluster-blocks that were already backed up according to information
the kernel ploop has received from backup tool via these ioctls.
Using full-fledged bitmask instead of scalar ppb_offset abolishes the
following limitation: from now, backup tool can report non-adjacent
extents scattered over the whole [0, bdev_size) range. It is also OK to
report an extent more than once.
https://jira.sw.ru/browse/PSBM-45000
Signed-off-by: Maxim Patlasov <mpatlasov at virtuozzo.com>
---
drivers/block/ploop/push_backup.c | 53 ++++++++++++++++++++++++-------------
1 file changed, 35 insertions(+), 18 deletions(-)
diff --git a/drivers/block/ploop/push_backup.c b/drivers/block/ploop/push_backup.c
index c4f87e9..5636509 100644
--- a/drivers/block/ploop/push_backup.c
+++ b/drivers/block/ploop/push_backup.c
@@ -26,7 +26,6 @@ struct ploop_pushbackup_desc {
struct page **ppb_map; /* Ploop Push Backup mask */
struct page **reported_map; /* what userspace reported as backed up */
cluster_t ppb_block_max; /* first invalid index in ppb_map */
- cluster_t ppb_offset; /* [0, ppb_offset) is ACKed by userspace */
spinlock_t ppb_lock;
struct completion ppb_comp;
@@ -194,6 +193,38 @@ static bool check_bit_in_map(struct page **map, u64 map_max, u64 blk)
return do_bit_in_map(map, map_max, blk, CHECK_BIT);
}
+static void set_bits_in_map(struct page **map, u64 map_max, u64 blk, u64 cnt)
+{
+ if (blk + cnt > map_max) {
+ printk("set_bits_in_map: extent [0, %llu) is out of range"
+ " [%llu, %llu)\n", blk, blk + cnt, map_max);
+ return;
+ }
+
+ while (cnt) {
+ unsigned long idx = blk >> (PAGE_SHIFT + 3);
+ unsigned long off = blk & (BITS_PER_PAGE -1);
+ unsigned long len;
+ void *addr = page_address(map[idx]);
+
+ len = min_t(unsigned long, BITS_PER_PAGE - off, cnt);
+ cnt -= len;
+ blk += len;
+
+ while (len) {
+ if ((off & 31) == 0 && len >= 32) {
+ *(u32 *)(addr + (off >> 3)) = -1;
+ off += 32;
+ len -= 32;
+ } else {
+ __set_bit(off, addr);
+ off += 1;
+ len -= 1;
+ }
+ }
+ }
+}
+
/* intentionally lockless */
void ploop_pb_clear_bit(struct ploop_pushbackup_desc *pbd, cluster_t clu)
{
@@ -450,8 +481,8 @@ int ploop_pb_preq_add_pending(struct ploop_pushbackup_desc *pbd,
return -EINTR;
}
- /* if (preq matches pbd->reported_map) return -EALREADY; */
- if (preq->req_cluster < pbd->ppb_offset) {
+ if (check_bit_in_map(pbd->reported_map, pbd->ppb_block_max,
+ preq->req_cluster)) {
spin_unlock(&pbd->ppb_lock);
return -EALREADY;
}
@@ -632,21 +663,7 @@ void ploop_pb_put_reported(struct ploop_pushbackup_desc *pbd,
* -- see "push_backup special processing" in ploop_entry_request()
* for details.
*/
-
- /*
- * "If .. else if .." below will be fully reworked when switching
- * from pbd->ppb_offset to pbd->reported_map. All we need here is
- * actaully simply to set bits corresponding to [clu, clu+len) in
- * pbd->reported_map.
- */
- if (pbd->ppb_offset >= clu) { /* lucky strike */
- if (clu + len > pbd->ppb_offset) {
- pbd->ppb_offset = clu + len;
- }
- } else if (n_found != len) { /* a hole, bad luck */
- printk("ploop: push_backup ERR: off=%u ext=[%u, %u) found %d\n",
- pbd->ppb_offset, clu, clu + len, n_found);
- }
+ set_bits_in_map(pbd->reported_map, pbd->ppb_block_max, clu, len);
spin_unlock(&pbd->ppb_lock);
More information about the Devel
mailing list