[Devel] [PATCH RHEL7 COMMIT] ploop: Export map defines to separate header file

Konstantin Khorenko khorenko at virtuozzo.com
Thu Mar 28 12:37:39 MSK 2019


The commit is pushed to "branch-rh7-3.10.0-957.10.1.vz7.85.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-957.10.1.vz7.85.5
------>
commit 4a90c34aea751d472c33d62f2912f1e6f2db20df
Author: Kirill Tkhai <ktkhai at virtuozzo.com>
Date:   Thu Mar 28 12:37:37 2019 +0300

    ploop: Export map defines to separate header file
    
    Signed-off-by: Kirill Tkhai <ktkhai at virtuozzo.com>
    
    =====================
    Patchset description:
    
    ploop: Discard with zeroing of ploop1 indexes support
    
    https://jira.sw.ru/browse/PSBM-92367
    https://pmc.acronis.com/browse/VSTOR-19972
    
    Kirill Tkhai (10):
          ploop: Export map defines to separate header file
          ploop: Make submit_alloc() return int value
          ploop: Introduce ploop_submit_alloc() helper
          ploop: Prohibit discard ioctls
          ploop: Prohibit PLOOP_IOC_UPDATE_INDEX on singular list
          ploop: Introduce data_off_in_clusters() helper
          ploop: Add .complete_merge method
          ploop: Zero indexes on discard
          ploop: Fallocate cluster in cached_submit() during hole reuse
          ploop: Populate and maintain holes bitmap
---
 drivers/block/ploop/fmt_ploop1.c |  4 +--
 drivers/block/ploop/map.c        | 52 +------------------------------------
 drivers/block/ploop/map.h        | 56 ++++++++++++++++++++++++++++++++++++++++
 3 files changed, 58 insertions(+), 54 deletions(-)

diff --git a/drivers/block/ploop/fmt_ploop1.c b/drivers/block/ploop/fmt_ploop1.c
index 1281a343bff4..382737b4cb6c 100644
--- a/drivers/block/ploop/fmt_ploop1.c
+++ b/drivers/block/ploop/fmt_ploop1.c
@@ -12,13 +12,11 @@
 
 #include <linux/ploop/ploop.h>
 #include "ploop1_image.h"
+#include "map.h"
 
 /* The implementaion of ploop1 (PVD) delta format, defined in ploop1_fmt.h
  */
 
-#define INDEX_PER_PAGE	     (PAGE_SIZE  / 4)
-#define INDEX_PER_PAGE_SHIFT (PAGE_SHIFT - 2)
-
 struct ploop1_private
 {
 	struct page	*dyn_page;
diff --git a/drivers/block/ploop/map.c b/drivers/block/ploop/map.c
index 7b08001aa58b..8a28148c17cb 100644
--- a/drivers/block/ploop/map.c
+++ b/drivers/block/ploop/map.c
@@ -40,12 +40,7 @@
 #include <linux/version.h>
 
 #include <linux/ploop/ploop.h>
-
-/* This defines slot in mapping page. Right now it is 32 bit
- * and therefore it directly matches ploop1 structure. */
-typedef u32 map_index_t;
-
-#define INDEX_PER_PAGE	(PAGE_SIZE / sizeof(map_index_t))
+#include "map.h"
 
 static struct kmem_cache * ploop_map_cache;
 
@@ -53,51 +48,6 @@ static LIST_HEAD(map_lru);
 static DEFINE_SPINLOCK(map_lru_lock);
 static atomic_t map_pages_nr = ATOMIC_INIT(0);
 
-/*
- * Additional information for each page is:
- * 1. rb tree link
- * 2. Page
- * 3. mn_start, mn_end - the first and the last index
- * (correspondingly) the page maps to iblocks.
- * 4. lru linkage
- * 5. delta level of whole page, it is delta, where this page
- *    is backed.
- * 6. Array of delta levels for each map_index in the page.
- *    If page is backed at level N, those levels cannot be >N.
- *    If all the levels == N, array of levels is not allocated.
- *    When at least one level < N, it is stored in the array.
- *    Note, that in this case exporting page to disk implies
- *    clearing irrelevant entries.
- */
-
-struct map_node
-{
-	struct rb_node		rb_link;
-	cluster_t		mn_start;
-	cluster_t		mn_end;
-	unsigned long		state;
-	atomic_t		refcnt;
-	struct ploop_map	*parent;
-
-	struct page		*page;
-	struct list_head	lru;
-	u8			*levels;
-
-	/* List of preq's blocking on this mapping.
-	 *
-	 * We queue here several kinds of requests:
-	 * 1. If mapping is not uptodate, all the requests which need
-	 *    this mapping are queued here. preq state is ENTRY.
-	 * 2. If preq requires index update and it is delayed
-	 *    because writeback is in progress. preq state is INDEX_DELAY,
-	 *    new index is kept in preq->iblock.
-	 * 3. If preq's started index update, preq state is INDEX_WB,
-	 *    new indices are sent to io, but they are not inserted
-	 *    into mapping until writeback is complete.
-	 */
-	struct list_head	io_queue;
-};
-
 cluster_t map_get_mn_end(struct map_node *m)
 {
 	return m->mn_end;
diff --git a/drivers/block/ploop/map.h b/drivers/block/ploop/map.h
new file mode 100644
index 000000000000..580f23ccfbb0
--- /dev/null
+++ b/drivers/block/ploop/map.h
@@ -0,0 +1,56 @@
+#ifndef __PLOOP_MAP_H
+#define __PLOOP_MAP_H
+
+/* This defines slot in mapping page. Right now it is 32 bit
+ * and therefore it directly matches ploop1 structure. */
+typedef u32 map_index_t;
+
+#define INDEX_PER_PAGE	(PAGE_SIZE / sizeof(map_index_t))
+#define INDEX_PER_PAGE_SHIFT (PAGE_SHIFT - 2)
+
+/*
+ * Additional information for each page is:
+ * 1. rb tree link
+ * 2. Page
+ * 3. mn_start, mn_end - the first and the last index
+ * (correspondingly) the page maps to iblocks.
+ * 4. lru linkage
+ * 5. delta level of whole page, it is delta, where this page
+ *    is backed.
+ * 6. Array of delta levels for each map_index in the page.
+ *    If page is backed at level N, those levels cannot be >N.
+ *    If all the levels == N, array of levels is not allocated.
+ *    When at least one level < N, it is stored in the array.
+ *    Note, that in this case exporting page to disk implies
+ *    clearing irrelevant entries.
+ */
+
+struct map_node
+{
+	struct rb_node		rb_link;
+	cluster_t		mn_start;
+	cluster_t		mn_end;
+	unsigned long		state;
+	atomic_t		refcnt;
+	struct ploop_map	*parent;
+
+	struct page		*page;
+	struct list_head	lru;
+	u8			*levels;
+
+	/* List of preq's blocking on this mapping.
+	 *
+	 * We queue here several kinds of requests:
+	 * 1. If mapping is not uptodate, all the requests which need
+	 *    this mapping are queued here. preq state is ENTRY.
+	 * 2. If preq requires index update and it is delayed
+	 *    because writeback is in progress. preq state is INDEX_DELAY,
+	 *    new index is kept in preq->iblock.
+	 * 3. If preq's started index update, preq state is INDEX_WB,
+	 *    new indices are sent to io, but they are not inserted
+	 *    into mapping until writeback is complete.
+	 */
+	struct list_head	io_queue;
+};
+
+#endif /* __PLOOP_MAP_H */



More information about the Devel mailing list