[Devel] [PATCH rh7] ploop: Add interface to dump cached BAT

Kirill Tkhai ktkhai at virtuozzo.com
Mon Mar 23 11:31:00 MSK 2020


This adds an interface for dumping cached in kernel BAT to userspace.
The new ioctl(PLOOP_IOC_DUMP_CACHED_BAT) expects struct ploop_dump_bat_ctl
parameter. A caller has to set desired delta level, start cluster
and number of clusters to dump. It returns array or clusters mapping.
The array member contains zero or not-zero value, in case of related cluster
is cached for the delta. Otherwise it contains PLOOP_DUMP_BAT_UNCACHED_INDEX
(0xFFFFFFFFU).

Example: dump cached clusters from 0 to 10485759 for delta with level 1:

main()
{
        int ret, fd = open("/dev/ploop12345", O_RDONLY);
        struct ploop_dump_bat_ctl *ctl;
        unsigned int i, nr_clusters = 10485760;

        if (fd < 0) {
                perror("open");
                exit(1);
        }

        ctl = malloc(sizeof(*ctl) + nr_clusters * sizeof(unsigned int));
        if (!ctl) {
                perror("malloc");
                exit(1);
        }

        ctl->level = 1; /* Delta level */
        ctl->start_cluster = 0;
        ctl->nr_clusters = nr_clusters;

        ret = ioctl(fd, PLOOP_IOC_DUMP_CACHED_BAT, ctl);
        if (ret) {
                perror("ioctl");
                exit(1);
        }

       for (i = 0; i < nr_clusters; i++)
                if (ctl->bat[i] != PLOOP_DUMP_BAT_UNCACHED_INDEX)
                        printf("%d->%d\n", i, ctl->bat[i]);
}

Signed-off-by: Kirill Tkhai <ktkhai at virtuozzo.com>
---
 drivers/block/ploop/dev.c        |   35 ++++++++++++++++++++++
 drivers/block/ploop/fmt_ploop1.c |    1 +
 drivers/block/ploop/map.c        |   61 ++++++++++++++++++++++++++++++++++++++
 include/linux/ploop/ploop.h      |    4 ++
 include/linux/ploop/ploop_if.h   |   12 +++++++
 5 files changed, 113 insertions(+)

diff --git a/drivers/block/ploop/dev.c b/drivers/block/ploop/dev.c
index 69430472d93d..da124fa50250 100644
--- a/drivers/block/ploop/dev.c
+++ b/drivers/block/ploop/dev.c
@@ -5345,6 +5345,38 @@ static int ploop_thaw(struct ploop_device *plo)
 	return err;
 }
 
+static int ploop_dump_cached_bat(struct ploop_device *plo, unsigned long arg)
+{
+	struct ploop_dump_bat_ctl ctl, __user *uctl;
+	struct ploop_delta *delta;
+	u32 end_cluster;
+
+	uctl = (struct ploop_dump_bat_ctl __user *)arg;
+
+	if (!test_bit(PLOOP_S_RUNNING, &plo->state))
+		return -ENODEV;
+
+	if (plo->maintenance_type != PLOOP_MNTN_OFF)
+		return -EBUSY;
+
+	if (copy_from_user(&ctl, uctl, sizeof(ctl)))
+		return -EFAULT;
+
+	delta = find_delta(plo, ctl.level);
+	if (!delta)
+		return -ENOENT;
+
+	end_cluster = ctl.start_cluster + ctl.nr_clusters - 1;
+	if (end_cluster <= ctl.start_cluster)
+		return -EINVAL;
+
+	if (!delta->ops->dump_bat)
+		return -ENOTSUPP;
+
+	return delta->ops->dump_bat(delta, ctl.start_cluster,
+				    end_cluster, &uctl->bat[0]);
+}
+
 static int ploop_ioctl(struct block_device *bdev, fmode_t fmode, unsigned int cmd,
 		       unsigned long arg)
 {
@@ -5464,6 +5496,9 @@ static int ploop_ioctl(struct block_device *bdev, fmode_t fmode, unsigned int cm
 	case PLOOP_IOC_THAW:
 		err = ploop_thaw(plo);
 		break;
+	case PLOOP_IOC_DUMP_CACHED_BAT:
+		err = ploop_dump_cached_bat(plo, arg);
+		break;
 	default:
 		err = -EINVAL;
 	}
diff --git a/drivers/block/ploop/fmt_ploop1.c b/drivers/block/ploop/fmt_ploop1.c
index 40e24a31689b..326862a2fd07 100644
--- a/drivers/block/ploop/fmt_ploop1.c
+++ b/drivers/block/ploop/fmt_ploop1.c
@@ -920,6 +920,7 @@ static struct ploop_delta_ops ploop1_delta_ops =
 	.prepare_grow	=	ploop1_prepare_grow,
 	.complete_grow	=	ploop1_complete_grow,
 	.add_free_blk	=	ploop1_add_free_blk,
+	.dump_bat	=	ploop_map_dump_bat,
 };
 
 static int __init pfmt_ploop1_mod_init(void)
diff --git a/drivers/block/ploop/map.c b/drivers/block/ploop/map.c
index f6a38e40d8c4..6276fb9ec414 100644
--- a/drivers/block/ploop/map.c
+++ b/drivers/block/ploop/map.c
@@ -1225,6 +1225,67 @@ ploop_index_wb_complete(struct ploop_request * preq)
 	map_wb_complete(m, preq->error);
 }
 
+int ploop_map_dump_bat(struct ploop_delta *delta, u32 start_cluster,
+			u32 end_cluster, u32 __user *to_addr)
+{
+	struct ploop_device *plo = delta->plo;
+	struct ploop_map *map = &plo->map;
+	unsigned int i, idx, level, count;
+	struct map_node *m;
+	struct page *page;
+	u32 *bat;
+	int ret;
+
+	page = alloc_page(GFP_KERNEL);
+	if (!page)
+		return -ENOMEM;
+	bat = kmap(page);
+
+	while (start_cluster <= end_cluster) {
+		idx = (start_cluster + PLOOP_MAP_OFFSET) & (INDEX_PER_PAGE - 1);
+		count = INDEX_PER_PAGE - idx;
+		if (count > end_cluster + 1 - start_cluster)
+			count = end_cluster + 1 - start_cluster;
+
+		spin_lock_irq(&plo->lock);
+		m = map_lookup(map, start_cluster);
+		if (!m || !test_bit(PLOOP_MAP_UPTODATE, &m->state)) {
+			BUILD_BUG_ON(PLOOP_DUMP_BAT_UNCACHED_INDEX != 0xFFFFFFFFU);
+			memset(&bat[idx], 0xff, count * sizeof(u32));
+			goto unlock;
+		}
+
+		for (i = idx; i < idx + count; i++) {
+			if (m->levels)
+				level = m->levels[i];
+			else
+				level = MAP_LEVEL(m);
+
+			if (level != delta->level)
+				bat[i] = PLOOP_DUMP_BAT_UNCACHED_INDEX;
+			else
+				bat[i] = ((map_index_t *)page_address(m->page))[i];
+		}
+unlock:
+		spin_unlock_irq(&map->plo->lock);
+
+		if (copy_to_user(to_addr, &bat[idx], count * sizeof(u32))) {
+			ret = -EFAULT;
+			goto out;
+		}
+
+		start_cluster += count;
+		to_addr += count;
+	}
+
+	ret = 0;
+out:
+	kunmap(page);
+	put_page(page);
+	return ret;
+}
+EXPORT_SYMBOL(ploop_map_dump_bat);
+
 void ploop_map_start(struct ploop_map * map, u64 bd_size)
 {
 	struct ploop_device * plo = map->plo;
diff --git a/include/linux/ploop/ploop.h b/include/linux/ploop/ploop.h
index a22696e9c42e..4b58d530435e 100644
--- a/include/linux/ploop/ploop.h
+++ b/include/linux/ploop/ploop.h
@@ -285,6 +285,8 @@ struct ploop_delta_ops
 	int		(*prepare_grow)(struct ploop_delta *, u64 *new_size, int *reloc);
 	int		(*complete_grow)(struct ploop_delta *, u64 new_size);
 	void		(*add_free_blk)(struct ploop_delta *, struct ploop_request *);
+	int		(*dump_bat)(struct ploop_delta *delta, u32 start_cluster,
+				    u32 end_cluster, u32 __user *to_addr);
 };
 
 /* Virtual image. */
@@ -893,6 +895,8 @@ void ploop_index_update(struct ploop_request * preq);
 void ploop_index_wb_complete(struct ploop_request * preq);
 int __init ploop_map_init(void);
 void ploop_map_exit(void);
+int ploop_map_dump_bat(struct ploop_delta *delta, u32 start_cluster,
+			u32 end_cluster, u32 __user *to_addr);
 void ploop_add_req_to_fsync_queue(struct ploop_request * preq);
 int ploop_submit_alloc(struct ploop_delta *delta, struct ploop_request *preq,
 		       struct bio_list *sbl, unsigned int size, iblock_t iblk);
diff --git a/include/linux/ploop/ploop_if.h b/include/linux/ploop/ploop_if.h
index 852e04d50eb9..213fc3adb289 100644
--- a/include/linux/ploop/ploop_if.h
+++ b/include/linux/ploop/ploop_if.h
@@ -227,6 +227,15 @@ struct ploop_push_backup_stop_ctl
 	__u32	status; /* for sanity: non-zero if pending or active queue is not empty */
 } __attribute__ ((aligned (8)));
 
+struct ploop_dump_bat_ctl
+{
+	__u32	level;
+	__u32	start_cluster;
+	__u32	nr_clusters;
+#define PLOOP_DUMP_BAT_UNCACHED_INDEX	0xFFFFFFFFU
+	__u32	bat[0];
+} __attribute__ ((aligned (8)));
+
 /* maintenance types */
 enum {
 	PLOOP_MNTN_OFF = 0,  /* no maintenance is in progress */
@@ -367,6 +376,9 @@ struct ploop_track_extent
 /* Unfreeze FS mounted over ploop */
 #define PLOOP_IOC_THAW		_IO(PLOOPCTLTYPE, 33)
 
+/* Get cached BAT */
+#define PLOOP_IOC_DUMP_CACHED_BAT _IOW(PLOOPCTLTYPE, 34, struct ploop_dump_bat_ctl)
+
 /* Events exposed via /sys/block/ploopN/pstate/event */
 #define PLOOP_EVENT_ABORTED	1
 #define PLOOP_EVENT_STOPPED	2




More information about the Devel mailing list