[Devel] [PATCH RHEL9 COMMIT] dm-ploop: Prefix all ploop functions with ploop_

Konstantin Khorenko khorenko at virtuozzo.com
Mon Aug 15 21:17:26 MSK 2022


The commit is pushed to "branch-rh9-5.14.0-70.13.1.vz9.16.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh9-5.14.0-70.13.1.vz9.16.8
------>
commit 58de9643d4cf4dd320dfd60d32a06763020e260b
Author: Alexander Atanasov <alexander.atanasov at virtuozzo.com>
Date:   Mon Aug 15 12:20:54 2022 +0300

    dm-ploop: Prefix all ploop functions with ploop_
    
    Easier to read oops and avoid name collisions when instrumenting
    functions for fault injection where only the function name is used
    as an identifier.
    
    Make workqueue functions naming consistent do_ploop_*_work.
    
    No functional changes
    
    https://jira.sw.ru/browse/PSBM-141449
    
    Signed-off-by: Alexander Atanasov <alexander.atanasov at virtuozzo.com>
    
    Feature: dm-ploop: ploop target driver fault injection
---
 drivers/md/dm-ploop-bat.c    |  56 ++---
 drivers/md/dm-ploop-cmd.c    | 120 +++++-----
 drivers/md/dm-ploop-map.c    | 547 ++++++++++++++++++++++---------------------
 drivers/md/dm-ploop-target.c |  24 +-
 drivers/md/dm-ploop.h        | 105 +++++----
 5 files changed, 444 insertions(+), 408 deletions(-)

diff --git a/drivers/md/dm-ploop-bat.c b/drivers/md/dm-ploop-bat.c
index b7b1dd2f2a0c..99f046dfec3b 100644
--- a/drivers/md/dm-ploop-bat.c
+++ b/drivers/md/dm-ploop-bat.c
@@ -11,7 +11,7 @@
 #include <linux/mm.h>
 #include "dm-ploop.h"
 
-struct md_page * md_page_find(struct ploop *ploop, u32 id)
+struct md_page *ploop_md_page_find(struct ploop *ploop, u32 id)
 {
 	struct rb_node *node;
 	struct md_page *md;
@@ -55,12 +55,12 @@ static void __md_page_insert(struct rb_root *root, struct md_page *new_md)
 	rb_insert_color(&new_md->node, root);
 }
 
-void md_page_insert(struct ploop *ploop, struct md_page *new_md)
+void ploop_md_page_insert(struct ploop *ploop, struct md_page *new_md)
 {
 	__md_page_insert(&ploop->bat_entries, new_md);
 }
 
-static struct md_page * alloc_md_page(u32 id)
+static struct md_page *ploop_alloc_md_page(u32 id)
 {
 	struct md_page *md;
 	struct page *page;
@@ -101,20 +101,20 @@ void ploop_free_md_page(struct md_page *md)
 	kfree(md);
 }
 
-int prealloc_md_pages(struct rb_root *root, u32 nr_bat_entries,
+int ploop_prealloc_md_pages(struct rb_root *root, u32 nr_bat_entries,
 		      u32 new_nr_bat_entries)
 {
 	u32 i, nr_pages, new_nr_pages;
 	struct md_page *md;
 	void *addr;
 
-	new_nr_pages = bat_clu_to_page_nr(new_nr_bat_entries - 1) + 1;
+	new_nr_pages = ploop_bat_clu_to_page_nr(new_nr_bat_entries - 1) + 1;
 	nr_pages = 0;
 	if (nr_bat_entries)
-		nr_pages = bat_clu_to_page_nr(nr_bat_entries - 1) + 1;
+		nr_pages = ploop_bat_clu_to_page_nr(nr_bat_entries - 1) + 1;
 
 	for (i = nr_pages; i < new_nr_pages; i++) {
-		md = alloc_md_page(i);
+		md = ploop_alloc_md_page(i);
 		if (!md)
 			return -ENOMEM;
 		addr = kmap_atomic(md->page);
@@ -127,17 +127,17 @@ int prealloc_md_pages(struct rb_root *root, u32 nr_bat_entries,
 	return 0;
 }
 
-bool try_update_bat_entry(struct ploop *ploop, u32 clu, u8 level, u32 dst_clu)
+bool ploop_try_update_bat_entry(struct ploop *ploop, u32 clu, u8 level, u32 dst_clu)
 {
-	u32 *bat_entries, id = bat_clu_to_page_nr(clu);
-	struct md_page *md = md_page_find(ploop, id);
+	u32 *bat_entries, id = ploop_bat_clu_to_page_nr(clu);
+	struct md_page *md = ploop_md_page_find(ploop, id);
 
 	lockdep_assert_held(&ploop->bat_rwlock);
 
 	if (!md)
 		return false;
 
-	clu = bat_clu_idx_in_page(clu); /* relative offset */
+	clu = ploop_bat_clu_idx_in_page(clu); /* relative offset */
 
 	if (md->bat_levels[clu] == level) {
 		bat_entries = kmap_atomic(md->page);
@@ -234,7 +234,7 @@ static int ploop_delta_check_header(struct ploop *ploop,
 				    u32 *delta_nr_be_ret)
 {
 	u32 bytes, delta_nr_be, offset_clusters, bat_clusters;
-	struct md_page *md0 = md_first_entry(md_root);
+	struct md_page *md0 = ploop_md_first_entry(md_root);
 	struct ploop_pvd_header *d_hdr;
 	int ret = -EPROTO;
 
@@ -262,7 +262,7 @@ static int ploop_delta_check_header(struct ploop *ploop,
 	return ret;
 }
 
-static int convert_bat_entries(struct ploop *ploop, struct rb_root *md_root,
+static int ploop_convert_bat_entries(struct ploop *ploop, struct rb_root *md_root,
 			       u32 nr_be, u32 nr_pages, loff_t file_size)
 {
 	u32 i, end, bytes, bat_clusters, page_id, *bat_entries, max_file_clu;
@@ -311,9 +311,9 @@ int ploop_read_delta_metadata(struct ploop *ploop, struct file *file,
 	int ret;
 
 	ret = -ENOMEM;
-	if (prealloc_md_pages(md_root, 0, 1))
+	if (ploop_prealloc_md_pages(md_root, 0, 1))
 		goto out;
-	bvec[0].bv_page = md_first_entry(md_root)->page;
+	bvec[0].bv_page = ploop_md_first_entry(md_root)->page;
 	bvec[0].bv_len = PAGE_SIZE;
 	bvec[0].bv_offset = 0;
 
@@ -335,7 +335,7 @@ int ploop_read_delta_metadata(struct ploop *ploop, struct file *file,
 	nr_segs = size / PAGE_SIZE;
 
 	ret = -ENOMEM;
-	if (prealloc_md_pages(md_root, 1, delta_nr_be))
+	if (ploop_prealloc_md_pages(md_root, 1, delta_nr_be))
 		goto out;
 
 	bvec = kvmalloc(sizeof(*bvec) * nr_segs, GFP_KERNEL);
@@ -364,12 +364,13 @@ int ploop_read_delta_metadata(struct ploop *ploop, struct file *file,
 
 	file_size = i_size_read(file->f_mapping->host);
 
-	ret = convert_bat_entries(ploop, md_root, delta_nr_be, nr_segs, file_size);
+	ret = ploop_convert_bat_entries(ploop, md_root, delta_nr_be, nr_segs,
+					file_size);
 
 	*delta_nr_be_ret = delta_nr_be;
 out:
 	if (ret)
-		free_md_pages_tree(md_root);
+		ploop_free_md_pages_tree(md_root);
 	if (bvec != &bvec_on_stack)
 		kvfree(bvec);
 	return ret;
@@ -389,8 +390,9 @@ static void ploop_set_not_hole(struct ploop *ploop, u32 dst_clu)
  * ...
  * n)add oldest delta
  */
-static void apply_delta_mappings(struct ploop *ploop, struct ploop_delta *deltas,
-				 u32 level, struct rb_root *md_root, u64 nr_be)
+static void ploop_apply_delta_mappings(struct ploop *ploop,
+	struct ploop_delta *deltas, u32 level, struct rb_root *md_root,
+	u64 nr_be)
 {
 	map_index_t *bat_entries, *d_bat_entries = NULL;
 	bool is_top_level, is_raw, stop = false;
@@ -399,10 +401,10 @@ static void apply_delta_mappings(struct ploop *ploop, struct ploop_delta *deltas
 	struct rb_node *node;
 
 	is_raw = deltas[level].is_raw;
-	is_top_level = (level == top_level(ploop));
+	is_top_level = (level == ploop_top_level(ploop));
 
 	if (!is_raw)
-		d_md = md_first_entry(md_root);
+		d_md = ploop_md_first_entry(md_root);
 
 	write_lock_irq(&ploop->bat_rwlock);
 	ploop_for_each_md_page(ploop, md, node) {
@@ -419,7 +421,7 @@ static void apply_delta_mappings(struct ploop *ploop, struct ploop_delta *deltas
 		init_be_iter(nr_be, md->id, &i, &end);
 
 		for (; i <= end; i++) {
-			clu = page_clu_idx_to_bat_clu(md->id, i);
+			clu = ploop_page_clu_idx_to_bat_clu(md->id, i);
 			if (clu == nr_be - 1)
 				stop = true;
 
@@ -449,7 +451,7 @@ static void apply_delta_mappings(struct ploop *ploop, struct ploop_delta *deltas
 		if (stop)
 			break;
 		if (!is_raw)
-			d_md = md_next_entry(d_md);
+			d_md = ploop_md_next_entry(d_md);
 	}
 	write_unlock_irq(&ploop->bat_rwlock);
 }
@@ -489,10 +491,10 @@ int ploop_add_delta(struct ploop *ploop, u32 level, struct file *file, bool is_r
 	}
 
 	ret = -EBADSLT; /* Lower delta can't be bigger then upper */
-	if (level != top_level(ploop) && nr_be > deltas[level + 1].nr_be)
+	if (level != ploop_top_level(ploop) && nr_be > deltas[level + 1].nr_be)
 		goto out;
 
-	apply_delta_mappings(ploop, deltas, level, &md_root, nr_be);
+	ploop_apply_delta_mappings(ploop, deltas, level, &md_root, nr_be);
 
 	deltas[level].file = file;
 	deltas[level].file_size = file_size;
@@ -501,6 +503,6 @@ int ploop_add_delta(struct ploop *ploop, u32 level, struct file *file, bool is_r
 	deltas[level].is_raw = is_raw;
 	ret = 0;
 out:
-	free_md_pages_tree(&md_root);
+	ploop_free_md_pages_tree(&md_root);
 	return ret;
 }
diff --git a/drivers/md/dm-ploop-cmd.c b/drivers/md/dm-ploop-cmd.c
index 429a03cce392..2faa6d500983 100644
--- a/drivers/md/dm-ploop-cmd.c
+++ b/drivers/md/dm-ploop-cmd.c
@@ -44,7 +44,7 @@ static void ploop_advance_holes_bitmap(struct ploop *ploop,
 		ploop_init_be_iter(ploop, md->id, &i, &end);
 		bat_entries = kmap_atomic(md->page);
 		for (; i <= end; i++) {
-			if (!md_page_cluster_is_in_top_delta(ploop, md, i))
+			if (!ploop_md_page_cluster_is_in_top_delta(ploop, md, i))
 				continue;
 			dst_clu = bat_entries[i];
 			/* This may happen after grow->shrink->(now) grow */
@@ -58,7 +58,7 @@ static void ploop_advance_holes_bitmap(struct ploop *ploop,
 	write_unlock_irq(&ploop->bat_rwlock);
 }
 
-static int wait_for_completion_maybe_killable(struct completion *comp,
+static int ploop_wait_for_completion_maybe_killable(struct completion *comp,
 					      bool killable)
 {
 	int ret = 0;
@@ -95,7 +95,7 @@ static int ploop_inflight_bios_ref_switch(struct ploop *ploop, bool killable)
 
 	if (ploop->inflight_ref_comp_pending) {
 		/* Previous completion was interrupted */
-		ret = wait_for_completion_maybe_killable(comp, killable);
+		ret = ploop_wait_for_completion_maybe_killable(comp, killable);
 		if (ret)
 			return ret;
 		ploop->inflight_ref_comp_pending = false;
@@ -110,7 +110,7 @@ static int ploop_inflight_bios_ref_switch(struct ploop *ploop, bool killable)
 
 	percpu_ref_kill(&ploop->inflight_bios_ref[index]);
 
-	ret = wait_for_completion_maybe_killable(comp, killable);
+	ret = ploop_wait_for_completion_maybe_killable(comp, killable);
 	if (ret) {
 		ploop->inflight_ref_comp_pending = true;
 		return ret;
@@ -130,7 +130,7 @@ static void ploop_resume_submitting_pios(struct ploop *ploop)
 	list_splice_tail_init(&ploop->suspended_pios, &list);
 	spin_unlock_irq(&ploop->deferred_lock);
 
-	submit_embedded_pios(ploop, &list);
+	ploop_submit_embedded_pios(ploop, &list);
 }
 
 static int ploop_suspend_submitting_pios(struct ploop *ploop)
@@ -162,8 +162,8 @@ static u32 ploop_find_bat_entry(struct ploop *ploop, u32 dst_clu, bool *is_locke
 		for (; i <= end; i++) {
 			if (bat_entries[i] != dst_clu)
 				continue;
-			if (md_page_cluster_is_in_top_delta(ploop, md, i)) {
-				clu = page_clu_idx_to_bat_clu(md->id, i);
+			if (ploop_md_page_cluster_is_in_top_delta(ploop, md, i)) {
+				clu = ploop_page_clu_idx_to_bat_clu(md->id, i);
 				break;
 			}
 		}
@@ -176,16 +176,16 @@ static u32 ploop_find_bat_entry(struct ploop *ploop, u32 dst_clu, bool *is_locke
 	*is_locked = false;
 	if (clu != UINT_MAX) {
 		spin_lock_irq(&ploop->deferred_lock);
-		*is_locked = find_lk_of_cluster(ploop, clu);
+		*is_locked = ploop_find_lk_of_cluster(ploop, clu);
 		spin_unlock_irq(&ploop->deferred_lock);
 	}
 
 	return clu;
 }
 
-void pio_prepare_offsets(struct ploop *ploop, struct pio *pio, u32 clu)
+void ploop_pio_prepare_offsets(struct ploop *ploop, struct pio *pio, u32 clu)
 {
-	int i, nr_pages = nr_pages_in_cluster(ploop);
+	int i, nr_pages = ploop_nr_pages_in_cluster(ploop);
 
 	pio->bi_iter.bi_idx = 0;
 	pio->bi_iter.bi_bvec_done = 0;
@@ -199,7 +199,8 @@ void pio_prepare_offsets(struct ploop *ploop, struct pio *pio, u32 clu)
 	pio->bi_iter.bi_size = CLU_SIZE(ploop);
 }
 
-static void wake_completion(struct pio *pio, void *data, blk_status_t status)
+static void ploop_wake_completion(struct pio *pio, void *data,
+				  blk_status_t status)
 {
 	struct completion *completion = data;
 
@@ -211,13 +212,13 @@ static int ploop_read_cluster_sync(struct ploop *ploop, struct pio *pio,
 {
 	DECLARE_COMPLETION_ONSTACK(completion);
 
-	init_pio(ploop, REQ_OP_READ, pio);
-	pio_prepare_offsets(ploop, pio, dst_clu);
+	ploop_init_pio(ploop, REQ_OP_READ, pio);
+	ploop_pio_prepare_offsets(ploop, pio, dst_clu);
 
-	pio->endio_cb = wake_completion;
+	pio->endio_cb = ploop_wake_completion;
 	pio->endio_cb_data = &completion;
 
-	map_and_submit_rw(ploop, dst_clu, pio, top_level(ploop));
+	ploop_map_and_submit_rw(ploop, dst_clu, pio, ploop_top_level(ploop));
 	wait_for_completion(&completion);
 
 	if (pio->bi_status)
@@ -229,7 +230,7 @@ static int ploop_read_cluster_sync(struct ploop *ploop, struct pio *pio,
 static int ploop_write_cluster_sync(struct ploop *ploop, struct pio *pio,
 				    u32 dst_clu)
 {
-	struct file *file = top_delta(ploop)->file;
+	struct file *file = ploop_top_delta(ploop)->file;
 	DECLARE_COMPLETION_ONSTACK(completion);
 	int ret;
 
@@ -237,13 +238,13 @@ static int ploop_write_cluster_sync(struct ploop *ploop, struct pio *pio,
 	if (ret)
 		return ret;
 
-	init_pio(ploop, REQ_OP_WRITE, pio);
-	pio_prepare_offsets(ploop, pio, dst_clu);
+	ploop_init_pio(ploop, REQ_OP_WRITE, pio);
+	ploop_pio_prepare_offsets(ploop, pio, dst_clu);
 
-	pio->endio_cb = wake_completion;
+	pio->endio_cb = ploop_wake_completion;
 	pio->endio_cb_data = &completion;
 
-	map_and_submit_rw(ploop, dst_clu, pio, top_level(ploop));
+	ploop_map_and_submit_rw(ploop, dst_clu, pio, ploop_top_level(ploop));
 	wait_for_completion(&completion);
 
 	if (pio->bi_status)
@@ -340,7 +341,8 @@ static int ploop_grow_relocate_cluster(struct ploop *ploop,
 
 	/* Update local BAT copy */
 	write_lock_irq(&ploop->bat_rwlock);
-	WARN_ON(!try_update_bat_entry(ploop, clu, top_level(ploop), new_dst));
+	WARN_ON(!ploop_try_update_bat_entry(ploop, clu,
+			ploop_top_level(ploop), new_dst));
 	write_unlock_irq(&ploop->bat_rwlock);
 not_occupied:
 	/*
@@ -419,7 +421,7 @@ static void ploop_add_md_pages(struct ploop *ploop, struct rb_root *from)
         while ((node = from->rb_node) != NULL) {
 		md = rb_entry(node, struct md_page, node);
 		rb_erase(node, from);
-		md_page_insert(ploop, md);
+		ploop_md_page_insert(ploop, md);
 	}
 }
 /*
@@ -428,7 +430,7 @@ static void ploop_add_md_pages(struct ploop *ploop, struct rb_root *from)
  * so be careful(!) and protective. Update indexes only after clu
  * data is written to disk.
  */
-static int process_resize_cmd(struct ploop *ploop, struct ploop_cmd *cmd)
+static int ploop_process_resize_cmd(struct ploop *ploop, struct ploop_cmd *cmd)
 {
 	u32 dst_clu;
 	int ret = 0;
@@ -464,9 +466,9 @@ static int process_resize_cmd(struct ploop *ploop, struct ploop_cmd *cmd)
 	return ret;
 }
 
-struct pio *alloc_pio_with_pages(struct ploop *ploop)
+struct pio *ploop_alloc_pio_with_pages(struct ploop *ploop)
 {
-	int i, nr_pages = nr_pages_in_cluster(ploop);
+	int i, nr_pages = ploop_nr_pages_in_cluster(ploop);
 	struct pio *pio;
 	u32 size;
 
@@ -495,7 +497,7 @@ struct pio *alloc_pio_with_pages(struct ploop *ploop)
 	return NULL;
 }
 
-void free_pio_with_pages(struct ploop *ploop, struct pio *pio)
+void ploop_free_pio_with_pages(struct ploop *ploop, struct pio *pio)
 {
 	int i, nr_pages = pio->bi_vcnt;
 	struct page *page;
@@ -504,7 +506,7 @@ void free_pio_with_pages(struct ploop *ploop, struct pio *pio)
 	 * Not a error for this function, but the rest of code
 	 * may expect this. Sanity check.
 	 */
-	WARN_ON_ONCE(nr_pages != nr_pages_in_cluster(ploop));
+	WARN_ON_ONCE(nr_pages != ploop_nr_pages_in_cluster(ploop));
 
 	for (i = 0; i < nr_pages; i++) {
 		page = pio->bi_io_vec[i].bv_page;
@@ -531,7 +533,7 @@ static int ploop_resize(struct ploop *ploop, sector_t new_sectors)
 	if (ploop_is_ro(ploop))
 		return -EROFS;
 
-	md0 = md_page_find(ploop, 0);
+	md0 = ploop_md_page_find(ploop, 0);
 	if (WARN_ON(!md0))
 		return -EIO;
 	hdr = kmap(md0->page);
@@ -554,7 +556,7 @@ static int ploop_resize(struct ploop *ploop, sector_t new_sectors)
 	nr_bat_entries = SEC_TO_CLU(ploop, new_sectors);
 
 	/* Memory for new md pages */
-	if (prealloc_md_pages(&cmd.resize.md_pages_root,
+	if (ploop_prealloc_md_pages(&cmd.resize.md_pages_root,
 			      ploop->nr_bat_entries, nr_bat_entries) < 0)
 		goto err;
 
@@ -575,7 +577,7 @@ static int ploop_resize(struct ploop *ploop, sector_t new_sectors)
 	old_size = DIV_ROUND_UP(ploop->hb_nr, 8);
 	memset(cmd.resize.holes_bitmap + old_size, 0xff, size - old_size);
 
-	cmd.resize.pio = alloc_pio_with_pages(ploop);
+	cmd.resize.pio = ploop_alloc_pio_with_pages(ploop);
 	if (!cmd.resize.pio)
 		goto err;
 
@@ -589,13 +591,13 @@ static int ploop_resize(struct ploop *ploop, sector_t new_sectors)
 	cmd.resize.md0 = md0;
 
 	ploop_suspend_submitting_pios(ploop);
-	ret = process_resize_cmd(ploop, &cmd);
+	ret = ploop_process_resize_cmd(ploop, &cmd);
 	ploop_resume_submitting_pios(ploop);
 err:
 	if (cmd.resize.pio)
-		free_pio_with_pages(ploop, cmd.resize.pio);
+		ploop_free_pio_with_pages(ploop, cmd.resize.pio);
 	kvfree(cmd.resize.holes_bitmap);
-	free_md_pages_tree(&cmd.resize.md_pages_root);
+	ploop_free_md_pages_tree(&cmd.resize.md_pages_root);
 	return ret;
 }
 static void service_pio_endio(struct pio *pio, void *data, blk_status_t status)
@@ -614,7 +616,7 @@ static void service_pio_endio(struct pio *pio, void *data, blk_status_t status)
 		wake_up(&ploop->service_wq);
 }
 
-static int process_merge_latest_snapshot(struct ploop *ploop)
+static int ploop_process_merge_latest_snapshot(struct ploop *ploop)
 {
 	static blk_status_t service_status;
 	struct bio_vec bvec = {0};
@@ -627,12 +629,12 @@ static int process_merge_latest_snapshot(struct ploop *ploop)
 			ret = -EINTR;
 			break;
 		}
-		pio = alloc_pio(ploop, GFP_KERNEL);
+		pio = ploop_alloc_pio(ploop, GFP_KERNEL);
 		if (!pio) {
 			ret = -ENOMEM;
 			break;
 		}
-		init_pio(ploop, REQ_OP_WRITE, pio);
+		ploop_init_pio(ploop, REQ_OP_WRITE, pio);
 		pio->free_on_endio = true;
 		pio->bi_io_vec = &bvec;
 		pio->bi_iter.bi_sector = CLU_TO_SEC(ploop, clu);
@@ -642,9 +644,9 @@ static int process_merge_latest_snapshot(struct ploop *ploop)
 		pio->endio_cb = service_pio_endio;
 		pio->endio_cb_data = &service_status;
 		pio->is_fake_merge = true;
-		WARN_ON_ONCE(!fake_merge_pio(pio));
+		WARN_ON_ONCE(!ploop_fake_merge_pio(pio));
 
-		dispatch_pios(ploop, pio, NULL);
+		ploop_dispatch_pios(ploop, pio, NULL);
 
 		if (atomic_inc_return(&ploop->service_pios) == MERGE_PIOS_MAX) {
 			wait_event(ploop->service_wq,
@@ -678,7 +680,7 @@ static int ploop_merge_latest_snapshot(struct ploop *ploop)
 	if (ploop->nr_deltas < 2)
 		return -ENOENT;
 
-	ret = process_merge_latest_snapshot(ploop);
+	ret = ploop_process_merge_latest_snapshot(ploop);
 	if (ret)
 		goto out;
 
@@ -711,7 +713,7 @@ static void notify_delta_merged(struct ploop *ploop, u8 level,
 	bool stop = false;
 	u32 clu;
 
-	d_md = md_first_entry(md_root);
+	d_md = ploop_md_first_entry(md_root);
 
 	write_lock_irq(&ploop->bat_rwlock);
 	ploop_for_each_md_page(ploop, md, node) {
@@ -719,7 +721,7 @@ static void notify_delta_merged(struct ploop *ploop, u8 level,
 		bat_entries = kmap_atomic(md->page);
 		d_bat_entries = kmap_atomic(d_md->page);
 		for (; i <= end; i++) {
-			clu = page_clu_idx_to_bat_clu(md->id, i);
+			clu = ploop_page_clu_idx_to_bat_clu(md->id, i);
 			if (clu == nr_be - 1)
 				stop = true;
 
@@ -754,7 +756,7 @@ static void notify_delta_merged(struct ploop *ploop, u8 level,
 		kunmap_atomic(d_bat_entries);
 		if (stop)
 			break;
-		d_md = md_next_entry(d_md);
+		d_md = ploop_md_next_entry(d_md);
 	}
 
 	file = ploop->deltas[level].file;
@@ -767,8 +769,8 @@ static void notify_delta_merged(struct ploop *ploop, u8 level,
 	fput(file);
 }
 
-static int process_update_delta_index(struct ploop *ploop, u8 level,
-				      const char *map)
+static int ploop_process_update_delta_index(struct ploop *ploop, u8 level,
+					    const char *map)
 {
 	struct ploop_delta *delta = &ploop->deltas[level];
 	u32 clu, dst_clu, n;
@@ -794,7 +796,7 @@ static int process_update_delta_index(struct ploop *ploop, u8 level,
 	}
 	/* Commit all */
 	while (sscanf(map, "%u:%u;%n", &clu, &dst_clu, &n) == 2) {
-		try_update_bat_entry(ploop, clu, level, dst_clu);
+		ploop_try_update_bat_entry(ploop, clu, level, dst_clu);
 		map += n;
 	}
 	ret = 0;
@@ -827,7 +829,7 @@ static int ploop_delta_clusters_merged(struct ploop *ploop, u8 level,
 		goto out;
 
 	ret = -EFBIG;
-	if (changed_level != top_level(ploop) &&
+	if (changed_level != ploop_top_level(ploop) &&
 	    nr_be > deltas[changed_level + 1].nr_be)
 		goto out;
 
@@ -844,7 +846,7 @@ static int ploop_delta_clusters_merged(struct ploop *ploop, u8 level,
 	ploop_resume_submitting_pios(ploop);
 	ret = 0;
 out:
-	free_md_pages_tree(&md_root);
+	ploop_free_md_pages_tree(&md_root);
 	return ret;
 }
 
@@ -852,13 +854,13 @@ static int ploop_notify_merged(struct ploop *ploop, u8 level, bool forward)
 {
 	if (ploop->maintaince)
 		return -EBUSY;
-	if (level >= top_level(ploop))
+	if (level >= ploop_top_level(ploop))
 		return -ENOENT;
 	if (level == 0 && !forward)
 		return -EINVAL;
 	if (level == 0 && ploop->deltas[0].is_raw)
 		return -ENOTSUPP;
-	if (level == top_level(ploop) - 1 && forward)
+	if (level == ploop_top_level(ploop) - 1 && forward)
 		return -EINVAL;
 	if (ploop->nr_deltas < 3)
 		return -EINVAL;
@@ -923,7 +925,7 @@ static int ploop_update_delta_index(struct ploop *ploop, unsigned int level,
 
 	if (ploop->maintaince)
 		return -EBUSY;
-	if (level >= top_level(ploop))
+	if (level >= ploop_top_level(ploop))
 		return -ENOENT;
 
 	ret = ploop_suspend_submitting_pios(ploop);
@@ -938,7 +940,7 @@ static int ploop_update_delta_index(struct ploop *ploop, unsigned int level,
 	delta->file_size = file_size;
 	delta->file_preallocated_area_start = file_size;
 
-	ret = process_update_delta_index(ploop, level, map);
+	ret = ploop_process_update_delta_index(ploop, level, map);
 
 resume:
 	ploop_resume_submitting_pios(ploop);
@@ -950,7 +952,7 @@ static int process_flip_upper_deltas(struct ploop *ploop)
 {
 	u32 i, end, bat_clusters, hb_nr, *bat_entries;
 	void *holes_bitmap = ploop->holes_bitmap;
-	u8 level = top_level(ploop) - 1;
+	u8 level = ploop_top_level(ploop) - 1;
 	struct rb_node *node;
 	struct md_page *md;
 	u64 size;
@@ -975,9 +977,9 @@ static int process_flip_upper_deltas(struct ploop *ploop)
 			if (bat_entries[i] == BAT_ENTRY_NONE)
 				continue;
 			if (md->bat_levels[i] == level) {
-				md->bat_levels[i] = top_level(ploop);
+				md->bat_levels[i] = ploop_top_level(ploop);
 				clear_bit(bat_entries[i], holes_bitmap);
-			} else if (md->bat_levels[i] == top_level(ploop)) {
+			} else if (md->bat_levels[i] == ploop_top_level(ploop)) {
 				md->bat_levels[i] = level;
 			}
 		}
@@ -1033,14 +1035,14 @@ static int ploop_check_delta_before_flip(struct ploop *ploop, struct file *file)
 	}
 
 	/* Points to hdr since md_page[0] also contains hdr. */
-	d_md = md_first_entry(&md_root);
+	d_md = ploop_md_first_entry(&md_root);
 
 	write_lock_irq(&ploop->bat_rwlock);
 	ploop_for_each_md_page(ploop, md, node) {
 		init_be_iter(nr_be, md->id, &i, &end);
 		d_bat_entries = kmap_atomic(d_md->page);
 		for (; i <= end; i++) {
-			if (md_page_cluster_is_in_top_delta(ploop, md, i) &&
+			if (ploop_md_page_cluster_is_in_top_delta(ploop, md, i) &&
 			    d_bat_entries[i] != BAT_ENTRY_NONE) {
 				ret = -EEXIST;
 				stop = true;
@@ -1048,7 +1050,7 @@ static int ploop_check_delta_before_flip(struct ploop *ploop, struct file *file)
 			}
 		}
 
-		clu = page_clu_idx_to_bat_clu(md->id, i);
+		clu = ploop_page_clu_idx_to_bat_clu(md->id, i);
 		if (clu == nr_be - 1) {
 			stop = true;
 			goto unmap;
@@ -1057,11 +1059,11 @@ static int ploop_check_delta_before_flip(struct ploop *ploop, struct file *file)
 		kunmap_atomic(d_bat_entries);
 		if (stop)
 			break;
-		d_md = md_next_entry(d_md);
+		d_md = ploop_md_next_entry(d_md);
 	}
 
 	write_unlock_irq(&ploop->bat_rwlock);
-	free_md_pages_tree(&md_root);
+	ploop_free_md_pages_tree(&md_root);
 out:
 #endif
 	return ret;
diff --git a/drivers/md/dm-ploop-map.c b/drivers/md/dm-ploop-map.c
index 0c18cca6e14d..f302e778e002 100644
--- a/drivers/md/dm-ploop-map.c
+++ b/drivers/md/dm-ploop-map.c
@@ -21,19 +21,20 @@
 
 #define PREALLOC_SIZE (128ULL * 1024 * 1024)
 
-static void handle_cleanup(struct ploop *ploop, struct pio *pio);
-static void prq_endio(struct pio *pio, void *prq_ptr, blk_status_t bi_status);
+static void ploop_handle_cleanup(struct ploop *ploop, struct pio *pio);
+static void ploop_prq_endio(struct pio *pio, void *prq_ptr,
+			    blk_status_t bi_status);
 
 #define DM_MSG_PREFIX "ploop"
 
-static unsigned int pio_nr_segs(struct pio *pio)
+static unsigned int ploop_pio_nr_segs(struct pio *pio)
 {
 	struct bvec_iter bi = {
 		.bi_size = pio->bi_iter.bi_size,
 		.bi_bvec_done = pio->bi_iter.bi_bvec_done,
 		.bi_idx = pio->bi_iter.bi_idx,
 	};
-        unsigned int nr_segs = 0;
+	unsigned int nr_segs = 0;
 	struct bio_vec bv;
 
 	for_each_bvec(bv, pio->bi_io_vec, bi, bi)
@@ -65,7 +66,7 @@ void ploop_index_wb_init(struct ploop_index_wb *piwb, struct ploop *ploop)
 	piwb->type = PIWB_TYPE_ALLOC;
 }
 
-void init_pio(struct ploop *ploop, unsigned int bi_op, struct pio *pio)
+void ploop_init_pio(struct ploop *ploop, unsigned int bi_op, struct pio *pio)
 {
 	pio->ploop = ploop;
 	pio->css = NULL;
@@ -111,7 +112,7 @@ static int ploop_rq_valid(struct ploop *ploop, struct request *rq)
 	return 0;
 }
 
-static void init_prq(struct ploop_rq *prq, struct request *rq)
+static void ploop_init_prq(struct ploop_rq *prq, struct request *rq)
 {
 	prq->rq = rq;
 	prq->bvec = NULL;
@@ -124,14 +125,14 @@ static void init_prq(struct ploop_rq *prq, struct request *rq)
 #endif
 }
 
-static void init_prq_and_embedded_pio(struct ploop *ploop, struct request *rq,
-				      struct ploop_rq *prq, struct pio *pio)
+static void ploop_init_prq_and_embedded_pio(struct ploop *ploop,
+		struct request *rq, struct ploop_rq *prq, struct pio *pio)
 {
-	init_prq(prq, rq);
-	init_pio(ploop, req_op(rq), pio);
+	ploop_init_prq(prq, rq);
+	ploop_init_pio(ploop, req_op(rq), pio);
 	pio->css = prq->css;
 
-	pio->endio_cb = prq_endio;
+	pio->endio_cb = ploop_prq_endio;
 	pio->endio_cb_data = prq;
 }
 
@@ -145,10 +146,10 @@ void ploop_enospc_timer(struct timer_list *timer)
 	list_splice_init(&ploop->enospc_pios, &list);
 	spin_unlock_irqrestore(&ploop->deferred_lock, flags);
 
-	submit_embedded_pios(ploop, &list);
+	ploop_submit_embedded_pios(ploop, &list);
 }
 
-void ploop_event_work(struct work_struct *ws)
+void do_ploop_event_work(struct work_struct *ws)
 {
 	struct ploop *ploop = container_of(ws, struct ploop, event_work);
 
@@ -167,7 +168,7 @@ static bool ploop_try_delay_enospc(struct ploop_rq *prq, struct pio *pio)
 		goto unlock;
 	}
 
-	init_prq_and_embedded_pio(ploop, prq->rq, prq, pio);
+	ploop_init_prq_and_embedded_pio(ploop, prq->rq, prq, pio);
 
 	pr_err_once("ploop: underlying disk is almost full\n");
 	ploop->event_enospc = true;
@@ -182,10 +183,11 @@ static bool ploop_try_delay_enospc(struct ploop_rq *prq, struct pio *pio)
 	return delayed;
 }
 
-static void prq_endio(struct pio *pio, void *prq_ptr, blk_status_t bi_status)
+static void ploop_prq_endio(struct pio *pio, void *prq_ptr,
+			    blk_status_t bi_status)
 {
-        struct ploop_rq *prq = prq_ptr;
-        struct request *rq = prq->rq;
+	struct ploop_rq *prq = prq_ptr;
+	struct request *rq = prq->rq;
 
 	if (prq->bvec)
 		kfree(prq->bvec);
@@ -208,52 +210,52 @@ static void prq_endio(struct pio *pio, void *prq_ptr, blk_status_t bi_status)
 	dm_complete_request(rq, bi_status);
 }
 
-static void do_pio_endio(struct pio *pio)
+static void ploop_do_pio_endio(struct pio *pio)
 {
 	ploop_endio_t endio_cb = pio->endio_cb;
 	void *endio_cb_data = pio->endio_cb_data;
 	bool free_on_endio = pio->free_on_endio;
 
-        if (!atomic_dec_and_test(&pio->remaining))
-                return;
+	if (!atomic_dec_and_test(&pio->remaining))
+		return;
 
 	endio_cb(pio, endio_cb_data, pio->bi_status);
 
 	if (free_on_endio)
-		free_pio(pio->ploop, pio);
+		ploop_free_pio(pio->ploop, pio);
 }
 
-void pio_endio(struct pio *pio)
+void ploop_pio_endio(struct pio *pio)
 {
 	struct ploop *ploop = pio->ploop;
 
-	handle_cleanup(ploop, pio);
+	ploop_handle_cleanup(ploop, pio);
 
-	do_pio_endio(pio);
+	ploop_do_pio_endio(pio);
 }
 
-static void pio_chain_endio(struct pio *pio, void *parent_ptr,
-			    blk_status_t bi_status)
+static void ploop_pio_chain_endio(struct pio *pio, void *parent_ptr,
+				  blk_status_t bi_status)
 {
-        struct pio *parent = parent_ptr;
+	struct pio *parent = parent_ptr;
 
-        if (unlikely(bi_status))
-                parent->bi_status = bi_status;
+	if (unlikely(bi_status))
+		parent->bi_status = bi_status;
 
-        do_pio_endio(parent);
+	ploop_do_pio_endio(parent);
 }
 
-static void pio_chain(struct pio *pio, struct pio *parent)
+static void ploop_pio_chain(struct pio *pio, struct pio *parent)
 {
 	BUG_ON(pio->endio_cb_data || pio->endio_cb);
 
 	pio->endio_cb_data = parent;
-	pio->endio_cb = pio_chain_endio;
+	pio->endio_cb = ploop_pio_chain_endio;
 	atomic_inc(&parent->remaining);
 }
 
 /* Clone of bio_advance_iter() */
-static void pio_advance(struct pio *pio, unsigned int bytes)
+static void ploop_pio_advance(struct pio *pio, unsigned int bytes)
 {
 	struct bvec_iter *iter = &pio->bi_iter;
 
@@ -265,16 +267,16 @@ static void pio_advance(struct pio *pio, unsigned int bytes)
 		bvec_iter_advance(pio->bi_io_vec, iter, bytes);
 }
 
-static struct pio * split_and_chain_pio(struct ploop *ploop,
+static struct pio *ploop_split_and_chain_pio(struct ploop *ploop,
 		struct pio *pio, u32 len)
 {
 	struct pio *split;
 
-	split = alloc_pio(ploop, GFP_NOIO);
+	split = ploop_alloc_pio(ploop, GFP_NOIO);
 	if (!split)
 		return NULL;
 
-	init_pio(ploop, pio->bi_op, split);
+	ploop_init_pio(ploop, pio->bi_op, split);
 	split->css = pio->css;
 	split->queue_list_id = pio->queue_list_id;
 	split->free_on_endio = true;
@@ -283,13 +285,13 @@ static struct pio * split_and_chain_pio(struct ploop *ploop,
 	split->bi_iter.bi_size = len;
 	split->endio_cb = NULL;
 	split->endio_cb_data = NULL;
-	pio_chain(split, pio);
+	ploop_pio_chain(split, pio);
 	if (len)
-		pio_advance(pio, len);
+		ploop_pio_advance(pio, len);
 	return split;
 }
 
-static int split_pio_to_list(struct ploop *ploop, struct pio *pio,
+static int ploop_split_pio_to_list(struct ploop *ploop, struct pio *pio,
 			     struct list_head *ret_list)
 {
 	u32 clu_size = CLU_SIZE(ploop);
@@ -308,7 +310,7 @@ static int split_pio_to_list(struct ploop *ploop, struct pio *pio,
 		end = round_up(start + 1, clu_size);
 		len = end - start;
 
-		split = split_and_chain_pio(ploop, pio, len);
+		split = ploop_split_and_chain_pio(ploop, pio, len);
 		if (!split)
 			goto err;
 
@@ -319,15 +321,15 @@ static int split_pio_to_list(struct ploop *ploop, struct pio *pio,
 	list_add_tail(&pio->list, ret_list);
 	return 0;
 err:
-	while ((pio = pio_list_pop(&list)) != NULL) {
+	while ((pio = ploop_pio_list_pop(&list)) != NULL) {
 		pio->bi_status = BLK_STS_RESOURCE;
-		pio_endio(pio);
+		ploop_pio_endio(pio);
 	}
 	return -ENOMEM;
 }
 
-static void dispatch_pio(struct ploop *ploop, struct pio *pio,
-			 bool *is_data, bool *is_flush)
+static void ploop_dispatch_pio(struct ploop *ploop, struct pio *pio,
+			       bool *is_data, bool *is_flush)
 {
 	struct list_head *list = &ploop->pios[pio->queue_list_id];
 
@@ -342,17 +344,18 @@ static void dispatch_pio(struct ploop *ploop, struct pio *pio,
 	list_add_tail(&pio->list, list);
 }
 
-void dispatch_pios(struct ploop *ploop, struct pio *pio, struct list_head *pio_list)
+void ploop_dispatch_pios(struct ploop *ploop, struct pio *pio,
+			 struct list_head *pio_list)
 {
 	bool is_data = false, is_flush = false;
 	unsigned long flags;
 
 	spin_lock_irqsave(&ploop->deferred_lock, flags);
 	if (pio)
-		dispatch_pio(ploop, pio, &is_data, &is_flush);
+		ploop_dispatch_pio(ploop, pio, &is_data, &is_flush);
 	if (pio_list) {
-		while ((pio = pio_list_pop(pio_list)) != NULL)
-			dispatch_pio(ploop, pio, &is_data, &is_flush);
+		while ((pio = ploop_pio_list_pop(pio_list)) != NULL)
+			ploop_dispatch_pio(ploop, pio, &is_data, &is_flush);
 	}
 	spin_unlock_irqrestore(&ploop->deferred_lock, flags);
 
@@ -362,8 +365,8 @@ void dispatch_pios(struct ploop *ploop, struct pio *pio, struct list_head *pio_l
 		queue_work(ploop->wq, &ploop->fsync_worker);
 }
 
-static bool delay_if_md_busy(struct ploop *ploop, struct md_page *md,
-			     enum piwb_type type, struct pio *pio)
+static bool ploop_delay_if_md_busy(struct ploop *ploop, struct md_page *md,
+				   enum piwb_type type, struct pio *pio)
 {
 	struct ploop_index_wb *piwb;
 	unsigned long flags;
@@ -382,14 +385,14 @@ static bool delay_if_md_busy(struct ploop *ploop, struct md_page *md,
 	return busy;
 }
 
-static void queue_discard_index_wb(struct ploop *ploop, struct pio *pio)
+static void ploop_queue_discard_index_wb(struct ploop *ploop, struct pio *pio)
 {
 	pio->queue_list_id = PLOOP_LIST_DISCARD;
-	dispatch_pios(ploop, pio, NULL);
+	ploop_dispatch_pios(ploop, pio, NULL);
 }
 
 /* Zero @count bytes of @qio->bi_io_vec since @from byte */
-static void zero_fill_pio(struct pio *pio)
+static void ploop_zero_fill_pio(struct pio *pio)
 {
 	struct bvec_iter bi = {
 		.bi_size = pio->bi_iter.bi_size,
@@ -408,7 +411,7 @@ static void zero_fill_pio(struct pio *pio)
 	}
 }
 
-struct pio *find_pio(struct hlist_head head[], u32 clu)
+struct pio *ploop_find_pio(struct hlist_head head[], u32 clu)
 {
 	struct hlist_head *slot = ploop_htable_slot(head, clu);
 	struct pio *pio;
@@ -423,24 +426,24 @@ struct pio *find_pio(struct hlist_head head[], u32 clu)
 	return NULL;
 }
 
-static struct pio *find_inflight_bio(struct ploop *ploop, u32 clu)
+static struct pio *ploop_find_inflight_bio(struct ploop *ploop, u32 clu)
 {
 	lockdep_assert_held(&ploop->inflight_lock);
-	return find_pio(ploop->inflight_pios, clu);
+	return ploop_find_pio(ploop->inflight_pios, clu);
 }
 
-struct pio *find_lk_of_cluster(struct ploop *ploop, u32 clu)
+struct pio *ploop_find_lk_of_cluster(struct ploop *ploop, u32 clu)
 {
 	lockdep_assert_held(&ploop->deferred_lock);
-	return find_pio(ploop->exclusive_pios, clu);
+	return ploop_find_pio(ploop->exclusive_pios, clu);
 }
 
-static void add_endio_pio(struct pio *head, struct pio *pio)
+static void ploop_add_endio_pio(struct pio *head, struct pio *pio)
 {
 	list_add_tail(&pio->list, &head->endio_list);
 }
 
-static void inc_nr_inflight(struct ploop *ploop, struct pio *pio)
+static void ploop_inc_nr_inflight(struct ploop *ploop, struct pio *pio)
 {
 	unsigned char ref_index = ploop->inflight_bios_ref_index;
 
@@ -450,7 +453,7 @@ static void inc_nr_inflight(struct ploop *ploop, struct pio *pio)
 	}
 }
 
-static void dec_nr_inflight(struct ploop *ploop, struct pio *pio)
+static void ploop_dec_nr_inflight(struct ploop *ploop, struct pio *pio)
 {
 	if (pio->ref_index != PLOOP_REF_INDEX_INVALID) {
 		percpu_ref_put(&ploop->inflight_bios_ref[pio->ref_index]);
@@ -458,13 +461,13 @@ static void dec_nr_inflight(struct ploop *ploop, struct pio *pio)
 	}
 }
 
-static void link_pio(struct hlist_head head[], struct pio *pio,
+static void ploop_link_pio(struct hlist_head head[], struct pio *pio,
 		     u32 clu, bool exclusive)
 {
 	struct hlist_head *slot = ploop_htable_slot(head, clu);
 
 	if (exclusive)
-		WARN_ON_ONCE(find_pio(head, clu) != NULL);
+		WARN_ON_ONCE(ploop_find_pio(head, clu) != NULL);
 
 	BUG_ON(!hlist_unhashed(&pio->hlist_node));
 	hlist_add_head(&pio->hlist_node, slot);
@@ -476,7 +479,7 @@ static void link_pio(struct hlist_head head[], struct pio *pio,
  * or from exclusive_bios_rbtree. BIOs from endio_list are requeued
  * to deferred_list.
  */
-static void unlink_pio(struct ploop *ploop, struct pio *pio,
+static void ploop_unlink_pio(struct ploop *ploop, struct pio *pio,
 		       struct list_head *pio_list)
 {
 	BUG_ON(hlist_unhashed(&pio->hlist_node));
@@ -485,36 +488,39 @@ static void unlink_pio(struct ploop *ploop, struct pio *pio,
 	list_splice_tail_init(&pio->endio_list, pio_list);
 }
 
-static void add_cluster_lk(struct ploop *ploop, struct pio *pio, u32 clu)
+static void ploop_add_cluster_lk(struct ploop *ploop, struct pio *pio, u32 clu)
 {
 	unsigned long flags;
 
 	spin_lock_irqsave(&ploop->deferred_lock, flags);
-	link_pio(ploop->exclusive_pios, pio, clu, true);
+	ploop_link_pio(ploop->exclusive_pios, pio, clu, true);
 	spin_unlock_irqrestore(&ploop->deferred_lock, flags);
 }
-static void del_cluster_lk(struct ploop *ploop, struct pio *pio)
+
+static void ploop_del_cluster_lk(struct ploop *ploop, struct pio *pio)
 {
 	LIST_HEAD(pio_list);
 	unsigned long flags;
 
 	spin_lock_irqsave(&ploop->deferred_lock, flags);
-	unlink_pio(ploop, pio, &pio_list);
+	ploop_unlink_pio(ploop, pio, &pio_list);
 	spin_unlock_irqrestore(&ploop->deferred_lock, flags);
 
 	if (!list_empty(&pio_list))
-		dispatch_pios(ploop, NULL, &pio_list);
+		ploop_dispatch_pios(ploop, NULL, &pio_list);
 }
 
-static void link_submitting_pio(struct ploop *ploop, struct pio *pio, u32 clu)
+static void ploop_link_submitting_pio(struct ploop *ploop, struct pio *pio,
+				      u32 clu)
 {
 	unsigned long flags;
 
 	spin_lock_irqsave(&ploop->inflight_lock, flags);
-	link_pio(ploop->inflight_pios, pio, clu, false);
+	ploop_link_pio(ploop->inflight_pios, pio, clu, false);
 	spin_unlock_irqrestore(&ploop->inflight_lock, flags);
 }
-static void unlink_completed_pio(struct ploop *ploop, struct pio *pio)
+
+static void ploop_unlink_completed_pio(struct ploop *ploop, struct pio *pio)
 {
 	LIST_HEAD(pio_list);
 	unsigned long flags;
@@ -523,11 +529,11 @@ static void unlink_completed_pio(struct ploop *ploop, struct pio *pio)
 		return;
 
 	spin_lock_irqsave(&ploop->inflight_lock, flags);
-	unlink_pio(ploop, pio, &pio_list);
+	ploop_unlink_pio(ploop, pio, &pio_list);
 	spin_unlock_irqrestore(&ploop->inflight_lock, flags);
 
 	if (!list_empty(&pio_list))
-		dispatch_pios(ploop, NULL, &pio_list);
+		ploop_dispatch_pios(ploop, NULL, &pio_list);
 }
 
 static bool ploop_md_make_dirty(struct ploop *ploop, struct md_page *md)
@@ -537,17 +543,17 @@ static bool ploop_md_make_dirty(struct ploop *ploop, struct md_page *md)
 
 	write_lock_irqsave(&ploop->bat_rwlock, flags);
 	WARN_ON_ONCE((md->status & MD_WRITEBACK));
-        if (!(md->status & MD_DIRTY)) {
-                md->status |= MD_DIRTY;
-                list_add_tail(&md->wb_link, &ploop->wb_batch_list);
-                new = true;
+	if (!(md->status & MD_DIRTY)) {
+		md->status |= MD_DIRTY;
+		list_add_tail(&md->wb_link, &ploop->wb_batch_list);
+		new = true;
 	}
 	write_unlock_irqrestore(&ploop->bat_rwlock, flags);
 
 	return new;
 }
 
-static bool pio_endio_if_all_zeros(struct pio *pio)
+static bool ploop_pio_endio_if_all_zeros(struct pio *pio)
 {
 	struct bvec_iter bi = {
 		.bi_size = pio->bi_iter.bi_size,
@@ -567,31 +573,31 @@ static bool pio_endio_if_all_zeros(struct pio *pio)
 			return false;
 	}
 
-	pio_endio(pio);
+	ploop_pio_endio(pio);
 	return true;
 }
 
-static bool pio_endio_if_merge_fake_pio(struct pio *pio)
+static bool ploop_pio_endio_if_merge_fake_pio(struct pio *pio)
 {
-	if (likely(!fake_merge_pio(pio)))
+	if (likely(!ploop_fake_merge_pio(pio)))
 		return false;
-	pio_endio(pio);
+	ploop_pio_endio(pio);
 	return true;
 }
 
-static int punch_hole(struct file *file, loff_t pos, loff_t len)
+static int ploop_punch_hole(struct file *file, loff_t pos, loff_t len)
 {
 	return vfs_fallocate(file, FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE,
 			     pos, len);
 }
 
-static int zero_range(struct file *file, loff_t pos, loff_t len)
+static int ploop_zero_range(struct file *file, loff_t pos, loff_t len)
 {
 	return vfs_fallocate(file, FALLOC_FL_ZERO_RANGE|FALLOC_FL_KEEP_SIZE,
 			     pos, len);
 }
 
-static void handle_discard_pio(struct ploop *ploop, struct pio *pio,
+static void ploop_handle_discard_pio(struct ploop *ploop, struct pio *pio,
 			       u32 clu, u32 dst_clu)
 {
 	struct pio *inflight_h;
@@ -599,7 +605,7 @@ static void handle_discard_pio(struct ploop *ploop, struct pio *pio,
 	loff_t pos;
 	int ret;
 
-	if (!whole_cluster(ploop, pio)) {
+	if (!ploop_whole_cluster(ploop, pio)) {
 		/*
 		 * Despite discard_granularity is given, block level
 		 * may submit shorter reqs. E.g., these are boundary
@@ -607,12 +613,12 @@ static void handle_discard_pio(struct ploop *ploop, struct pio *pio,
 		 * it's OK to just ignore such reqs. Keep in mind
 		 * this implementing REQ_OP_WRITE_ZEROES etc.
 		 */
-		pio_endio(pio);
+		ploop_pio_endio(pio);
 		return;
 	}
 
-	if (!cluster_is_in_top_delta(ploop, clu)) {
-		pio_endio(pio);
+	if (!ploop_cluster_is_in_top_delta(ploop, clu)) {
+		ploop_pio_endio(pio);
 		return;
 	}
 
@@ -621,9 +627,9 @@ static void handle_discard_pio(struct ploop *ploop, struct pio *pio,
 		goto punch_hole;
 
 	spin_lock_irqsave(&ploop->inflight_lock, flags);
-	inflight_h = find_inflight_bio(ploop, clu);
+	inflight_h = ploop_find_inflight_bio(ploop, clu);
 	if (inflight_h)
-		add_endio_pio(inflight_h, pio);
+		ploop_add_endio_pio(inflight_h, pio);
 	spin_unlock_irqrestore(&ploop->inflight_lock, flags);
 
 	if (inflight_h) {
@@ -632,42 +638,43 @@ static void handle_discard_pio(struct ploop *ploop, struct pio *pio,
 		return;
 	}
 
-	add_cluster_lk(ploop, pio, clu);
+	ploop_add_cluster_lk(ploop, pio, clu);
 	pio->wants_discard_index_cleanup = true;
 
 punch_hole:
-	remap_to_cluster(ploop, pio, dst_clu);
+	ploop_remap_to_cluster(ploop, pio, dst_clu);
 	pos = to_bytes(pio->bi_iter.bi_sector);
-	ret = punch_hole(top_delta(ploop)->file, pos, pio->bi_iter.bi_size);
+	ret = ploop_punch_hole(ploop_top_delta(ploop)->file, pos,
+			       pio->bi_iter.bi_size);
 	if (ret || ploop->nr_deltas != 1) {
 		if (ret)
 			pio->bi_status = errno_to_blk_status(ret);
-		pio_endio(pio);
+		ploop_pio_endio(pio);
 		return;
 	}
 
-	queue_discard_index_wb(ploop, pio);
+	ploop_queue_discard_index_wb(ploop, pio);
 }
 
 static void ploop_discard_index_pio_end(struct ploop *ploop, struct pio *pio)
 {
-	del_cluster_lk(ploop, pio);
+	ploop_del_cluster_lk(ploop, pio);
 }
 
-static void queue_or_fail(struct ploop *ploop, int err, void *data)
+static void ploop_queue_or_fail(struct ploop *ploop, int err, void *data)
 {
 	struct pio *pio = data;
 
 	/* FIXME: do we use BLK_STS_AGAIN? */
 	if (err && err != BLK_STS_AGAIN) {
 		pio->bi_status = errno_to_blk_status(err);
-		pio_endio(pio);
+		ploop_pio_endio(pio);
 	} else {
-		dispatch_pios(ploop, pio, NULL);
+		ploop_dispatch_pios(ploop, pio, NULL);
 	}
 }
 
-static void complete_cow(struct ploop_cow *cow, blk_status_t bi_status)
+static void ploop_complete_cow(struct ploop_cow *cow, blk_status_t bi_status)
 {
 	struct pio *aux_pio = cow->aux_pio;
 	struct ploop *ploop = cow->ploop;
@@ -678,7 +685,7 @@ static void complete_cow(struct ploop_cow *cow, blk_status_t bi_status)
 	WARN_ON_ONCE(!list_empty(&aux_pio->list));
 	cow_pio = cow->cow_pio;
 
-	del_cluster_lk(ploop, cow_pio);
+	ploop_del_cluster_lk(ploop, cow_pio);
 
 	if (dst_clu != BAT_ENTRY_NONE && bi_status != BLK_STS_OK) {
 		read_lock_irqsave(&ploop->bat_rwlock, flags);
@@ -686,10 +693,10 @@ static void complete_cow(struct ploop_cow *cow, blk_status_t bi_status)
 		read_unlock_irqrestore(&ploop->bat_rwlock, flags);
 	}
 
-	queue_or_fail(ploop, blk_status_to_errno(bi_status), cow_pio);
+	ploop_queue_or_fail(ploop, blk_status_to_errno(bi_status), cow_pio);
 
 	queue_work(ploop->wq, &ploop->worker);
-	free_pio_with_pages(ploop, cow->aux_pio);
+	ploop_free_pio_with_pages(ploop, cow->aux_pio);
 	kmem_cache_free(cow_cache, cow);
 }
 
@@ -700,11 +707,11 @@ static void ploop_release_cluster(struct ploop *ploop, u32 clu)
 
 	lockdep_assert_held(&ploop->bat_rwlock);
 
-	id = bat_clu_to_page_nr(clu);
-        md = md_page_find(ploop, id);
-        BUG_ON(!md);
+	id = ploop_bat_clu_to_page_nr(clu);
+	md = ploop_md_page_find(ploop, id);
+	BUG_ON(!md);
 
-	clu = bat_clu_idx_in_page(clu); /* relative to page */
+	clu = ploop_bat_clu_idx_in_page(clu); /* relative to page */
 
 	bat_entries = kmap_atomic(md->page);
 	dst_clu = bat_entries[clu];
@@ -715,13 +722,13 @@ static void ploop_release_cluster(struct ploop *ploop, u32 clu)
 	ploop_hole_set_bit(dst_clu, ploop);
 }
 
-static void piwb_discard_completed(struct ploop *ploop, bool success,
-				   u32 clu, u32 new_dst_clu)
+static void ploop_piwb_discard_completed(struct ploop *ploop,
+					 bool success, u32 clu, u32 new_dst_clu)
 {
 	if (new_dst_clu)
 		return;
 
-	if (cluster_is_in_top_delta(ploop, clu)) {
+	if (ploop_cluster_is_in_top_delta(ploop, clu)) {
 		WARN_ON_ONCE(ploop->nr_deltas != 1);
 		if (success)
 			ploop_release_cluster(ploop, clu);
@@ -764,21 +771,22 @@ static void ploop_advance_local_after_bat_wb(struct ploop *ploop,
 
 	for (; i < last; i++) {
 		if (piwb->type == PIWB_TYPE_DISCARD) {
-			piwb_discard_completed(ploop, success, i + off, dst_clu[i]);
+			ploop_piwb_discard_completed(ploop, success, i + off, dst_clu[i]);
 			continue;
 		}
 
 		if (!dst_clu[i])
 			continue;
 
-		if (cluster_is_in_top_delta(ploop, i + off) && piwb->type == PIWB_TYPE_ALLOC) {
+		if (ploop_cluster_is_in_top_delta(ploop, i + off)
+						&& piwb->type == PIWB_TYPE_ALLOC) {
 			WARN_ON(bat_entries[i] != dst_clu[i]);
 			continue;
 		}
 
 		if (success) {
 			bat_entries[i] = dst_clu[i];
-			md->bat_levels[i] = top_level(ploop);
+			md->bat_levels[i] = ploop_top_level(ploop);
 		} else {
 			ploop_hole_set_bit(i + off, ploop);
 		}
@@ -793,17 +801,17 @@ static void ploop_advance_local_after_bat_wb(struct ploop *ploop,
 	kunmap_atomic(bat_entries);
 
 	if (!list_empty(&list))
-		dispatch_pios(ploop, NULL, &list);
+		ploop_dispatch_pios(ploop, NULL, &list);
 }
 
-static void free_piwb(struct ploop_index_wb *piwb)
+static void ploop_free_piwb(struct ploop_index_wb *piwb)
 {
-	free_pio(piwb->ploop, piwb->pio);
+	ploop_free_pio(piwb->ploop, piwb->pio);
 	put_page(piwb->bat_page);
 	kfree(piwb);
 }
 
-static void put_piwb(struct ploop_index_wb *piwb)
+static void ploop_put_piwb(struct ploop_index_wb *piwb)
 {
 	if (atomic_dec_and_test(&piwb->count)) {
 		struct ploop *ploop = piwb->ploop;
@@ -819,7 +827,7 @@ static void put_piwb(struct ploop_index_wb *piwb)
 				*piwb->comp_bi_status = piwb->bi_status;
 			complete(piwb->comp);
 		}
-		free_piwb(piwb);
+		ploop_free_piwb(piwb);
 	}
 }
 
@@ -854,22 +862,22 @@ static void ploop_bat_write_complete(struct ploop_index_wb *piwb,
 	 * End pending data bios. Unlocked, as nobody can
 	 * add a new element after piwc->completed is true.
 	 */
-	while ((data_pio = pio_list_pop(&piwb->ready_data_pios)) != NULL) {
+	while ((data_pio = ploop_pio_list_pop(&piwb->ready_data_pios)) != NULL) {
 		if (bi_status)
 			data_pio->bi_status = bi_status;
-		pio_endio(data_pio);
+		ploop_pio_endio(data_pio);
 	}
 
-	while ((aux_pio = pio_list_pop(&piwb->cow_list))) {
+	while ((aux_pio = ploop_pio_list_pop(&piwb->cow_list))) {
 		cow = aux_pio->endio_cb_data;
-		complete_cow(cow, bi_status);
+		ploop_complete_cow(cow, bi_status);
 	}
 
 	/*
 	 * In case of update BAT is failed, dst_clusters will be
 	 * set back to holes_bitmap on last put_piwb().
 	 */
-	put_piwb(piwb);
+	ploop_put_piwb(piwb);
 }
 
 static int ploop_prepare_bat_update(struct ploop *ploop, struct md_page *md,
@@ -889,10 +897,10 @@ static int ploop_prepare_bat_update(struct ploop *ploop, struct md_page *md,
 	ploop_index_wb_init(piwb, ploop);
 
 	piwb->bat_page = page = alloc_page(GFP_NOIO);
-	piwb->pio = pio = alloc_pio(ploop, GFP_NOIO);
+	piwb->pio = pio = ploop_alloc_pio(ploop, GFP_NOIO);
 	if (!page || !pio)
 		goto err;
-	init_pio(ploop, REQ_OP_WRITE, pio);
+	ploop_init_pio(ploop, REQ_OP_WRITE, pio);
 
 	bat_entries = kmap_atomic(md->page);
 
@@ -921,7 +929,7 @@ static int ploop_prepare_bat_update(struct ploop *ploop, struct md_page *md,
 
 	/* Copy BAT (BAT goes right after hdr, see .ctr) */
 	for (; i < last; i++) {
-		if (cluster_is_in_top_delta(ploop, i + off))
+		if (ploop_cluster_is_in_top_delta(ploop, i + off))
 			continue;
 		to[i] = 0;
 	}
@@ -937,7 +945,7 @@ static int ploop_prepare_bat_update(struct ploop *ploop, struct md_page *md,
 	piwb->type = type;
 	return 0;
 err:
-	free_piwb(piwb);
+	ploop_free_piwb(piwb);
 	return -ENOMEM;
 }
 
@@ -951,7 +959,7 @@ void ploop_break_bat_update(struct ploop *ploop, struct md_page *md)
 	md->piwb = NULL;
 	write_unlock_irqrestore(&ploop->bat_rwlock, flags);
 
-	free_piwb(piwb);
+	ploop_free_piwb(piwb);
 }
 
 static void ploop_bat_page_zero_cluster(struct ploop *ploop,
@@ -961,15 +969,14 @@ static void ploop_bat_page_zero_cluster(struct ploop *ploop,
 	map_index_t *to;
 
 	/* Cluster index related to the page[page_id] start */
-	clu = bat_clu_idx_in_page(clu);
+	clu = ploop_bat_clu_idx_in_page(clu);
 
 	to = kmap_atomic(piwb->bat_page);
 	to[clu] = 0;
 	kunmap_atomic(to);
 }
 
-static int find_dst_clu_bit(struct ploop *ploop,
-		      u32 *ret_dst_clu)
+static int ploop_find_dst_clu_bit(struct ploop *ploop, u32 *ret_dst_clu)
 {
 	u32 dst_clu;
 
@@ -981,8 +988,9 @@ static int find_dst_clu_bit(struct ploop *ploop,
 	return 0;
 }
 
-static int truncate_prealloc_safe(struct ploop *ploop, struct ploop_delta *delta,
-				  loff_t len, const char *func)
+static int ploop_truncate_prealloc_safe(struct ploop *ploop,
+					struct ploop_delta *delta,
+					loff_t len, const char *func)
 {
 	struct file *file = delta->file;
 	loff_t old_len = delta->file_size;
@@ -1013,15 +1021,15 @@ static int truncate_prealloc_safe(struct ploop *ploop, struct ploop_delta *delta
 	return 0;
 }
 
-static int allocate_cluster(struct ploop *ploop, u32 *dst_clu)
+static int ploop_allocate_cluster(struct ploop *ploop, u32 *dst_clu)
 {
-	struct ploop_delta *top = top_delta(ploop);
+	struct ploop_delta *top = ploop_top_delta(ploop);
 	u32 clu_size = CLU_SIZE(ploop);
 	loff_t off, pos, end, old_size;
 	struct file *file = top->file;
 	int ret;
 
-	if (find_dst_clu_bit(ploop, dst_clu) < 0)
+	if (ploop_find_dst_clu_bit(ploop, dst_clu) < 0)
 		return -EIO;
 
 	pos = CLU_TO_POS(ploop, *dst_clu);
@@ -1032,9 +1040,9 @@ static int allocate_cluster(struct ploop *ploop, u32 *dst_clu)
 		/* Clu at @pos may contain dirty data */
 		off = min_t(loff_t, old_size, end);
 		if (!ploop->falloc_new_clu)
-			ret = punch_hole(file, pos, off - pos);
+			ret = ploop_punch_hole(file, pos, off - pos);
 		else
-			ret = zero_range(file, pos, off - pos);
+			ret = ploop_zero_range(file, pos, off - pos);
 		if (ret) {
 			pr_err("ploop: punch/zero area: %d\n", ret);
 			return ret;
@@ -1042,7 +1050,7 @@ static int allocate_cluster(struct ploop *ploop, u32 *dst_clu)
 	}
 
 	if (end > old_size) {
-		ret = truncate_prealloc_safe(ploop, top, end, __func__);
+		ret = ploop_truncate_prealloc_safe(ploop, top, end, __func__);
 		if (ret)
 			return ret;
 	} else if (pos < top->file_preallocated_area_start) {
@@ -1095,7 +1103,7 @@ static int ploop_alloc_cluster(struct ploop *ploop, struct ploop_index_wb *piwb,
 	if (already_alloced)
 		goto out;
 
-	ret = allocate_cluster(ploop, dst_clu);
+	ret = ploop_allocate_cluster(ploop, dst_clu);
 	if (ret < 0)
 		goto out;
 
@@ -1120,7 +1128,7 @@ static bool ploop_data_pio_end(struct pio *pio)
 		pio->bi_status = piwb->bi_status;
 	spin_unlock_irqrestore(&piwb->lock, flags);
 
-	put_piwb(piwb);
+	ploop_put_piwb(piwb);
 
 	return completed;
 }
@@ -1147,7 +1155,7 @@ static void ploop_queue_resubmit(struct pio *pio)
 	queue_work(ploop->wq, &ploop->worker);
 }
 
-static void data_rw_complete(struct pio *pio)
+static void ploop_data_rw_complete(struct pio *pio)
 {
 	bool completed;
 
@@ -1155,7 +1163,7 @@ static void data_rw_complete(struct pio *pio)
 		if (pio->ret >= 0) {
 			/* Partial IO */
 			WARN_ON_ONCE(pio->ret == 0);
-			pio_advance(pio, pio->ret);
+			ploop_pio_advance(pio, pio->ret);
 			ploop_queue_resubmit(pio);
 			return;
 		}
@@ -1168,7 +1176,7 @@ static void data_rw_complete(struct pio *pio)
 			return;
 	}
 
-	pio_endio(pio);
+	ploop_pio_endio(pio);
 }
 
 /*
@@ -1176,7 +1184,7 @@ static void data_rw_complete(struct pio *pio)
  * Don't use this function from fsync kwork in case of the caller blocks
  * to wait for completion, since kwork is who resubmits after partial IO.
  */
-static void submit_rw_mapped(struct ploop *ploop, struct pio *pio)
+static void ploop_submit_rw_mapped(struct ploop *ploop, struct pio *pio)
 {
 	unsigned int rw, nr_segs;
 	struct bio_vec *bvec;
@@ -1184,12 +1192,12 @@ static void submit_rw_mapped(struct ploop *ploop, struct pio *pio)
 	struct file *file;
 	loff_t pos;
 
-	BUG_ON(pio->level > top_level(ploop));
+	BUG_ON(pio->level > ploop_top_level(ploop));
 
-	pio->complete = data_rw_complete;
+	pio->complete = ploop_data_rw_complete;
 
 	rw = (op_is_write(pio->bi_op) ? WRITE : READ);
-	nr_segs = pio_nr_segs(pio);
+	nr_segs = ploop_pio_nr_segs(pio);
 	bvec = __bvec_iter_bvec(pio->bi_io_vec, pio->bi_iter);
 
 	iov_iter_bvec(&iter, rw, bvec, nr_segs, pio->bi_iter.bi_size);
@@ -1208,63 +1216,66 @@ static void submit_rw_mapped(struct ploop *ploop, struct pio *pio)
 		ploop_call_rw_iter(file, pos, rw, &iter, pio);
 }
 
-void map_and_submit_rw(struct ploop *ploop, u32 dst_clu, struct pio *pio, u8 level)
+void ploop_map_and_submit_rw(struct ploop *ploop, u32 dst_clu,
+			     struct pio *pio, u8 level)
 {
-	remap_to_cluster(ploop, pio, dst_clu);
+	ploop_remap_to_cluster(ploop, pio, dst_clu);
 	pio->level = level;
 
-	submit_rw_mapped(ploop, pio);
+	ploop_submit_rw_mapped(ploop, pio);
 }
 
-static void initiate_delta_read(struct ploop *ploop, unsigned int level,
-				u32 dst_clu, struct pio *pio)
+static void ploop_initiate_delta_read(struct ploop *ploop, unsigned int level,
+				      u32 dst_clu, struct pio *pio)
 {
 	if (dst_clu == BAT_ENTRY_NONE) {
 		/* No one delta contains dst_clu. */
-		zero_fill_pio(pio);
-		pio_endio(pio);
+		ploop_zero_fill_pio(pio);
+		ploop_pio_endio(pio);
 		return;
 	}
 
-	map_and_submit_rw(ploop, dst_clu, pio, level);
+	ploop_map_and_submit_rw(ploop, dst_clu, pio, level);
 }
 
-static void ploop_cow_endio(struct pio *aux_pio, void *data, blk_status_t bi_status)
+static void ploop_cow_endio(struct pio *aux_pio, void *data,
+			    blk_status_t bi_status)
 {
 	struct ploop_cow *cow = data;
 	struct ploop *ploop = cow->ploop;
 
 	aux_pio->queue_list_id = PLOOP_LIST_COW;
-	dispatch_pios(ploop, aux_pio, NULL);
+	ploop_dispatch_pios(ploop, aux_pio, NULL);
 }
 
-static bool postpone_if_cluster_locked(struct ploop *ploop, struct pio *pio, u32 clu)
+static bool ploop_postpone_if_cluster_locked(struct ploop *ploop,
+					     struct pio *pio, u32 clu)
 {
 	struct pio *e_h; /* Exclusively locked */
 
 	spin_lock_irq(&ploop->deferred_lock);
-	e_h = find_lk_of_cluster(ploop, clu);
+	e_h = ploop_find_lk_of_cluster(ploop, clu);
 	if (e_h)
-		add_endio_pio(e_h, pio);
+		ploop_add_endio_pio(e_h, pio);
 	spin_unlock_irq(&ploop->deferred_lock);
 
 	return e_h != NULL;
 }
 
-static int submit_cluster_cow(struct ploop *ploop, unsigned int level,
+static int ploop_submit_cluster_cow(struct ploop *ploop, unsigned int level,
 			      u32 clu, u32 dst_clu, struct pio *cow_pio)
 {
 	struct ploop_cow *cow = NULL;
 	struct pio *aux_pio = NULL;
 
 	/* Prepare new delta read */
-	aux_pio = alloc_pio_with_pages(ploop);
+	aux_pio = ploop_alloc_pio_with_pages(ploop);
 	cow = kmem_cache_alloc(cow_cache, GFP_NOIO);
 	if (!aux_pio || !cow)
 		goto err;
-	init_pio(ploop, REQ_OP_READ, aux_pio);
+	ploop_init_pio(ploop, REQ_OP_READ, aux_pio);
+	ploop_pio_prepare_offsets(ploop, aux_pio, clu);
 	aux_pio->css = cow_pio->css;
-	pio_prepare_offsets(ploop, aux_pio, clu);
 	aux_pio->endio_cb = ploop_cow_endio;
 	aux_pio->endio_cb_data = cow;
 
@@ -1273,55 +1284,56 @@ static int submit_cluster_cow(struct ploop *ploop, unsigned int level,
 	cow->aux_pio = aux_pio;
 	cow->cow_pio = cow_pio;
 
-	add_cluster_lk(ploop, cow_pio, clu);
+	ploop_add_cluster_lk(ploop, cow_pio, clu);
 
 	/* Stage #0: read secondary delta full clu */
-	map_and_submit_rw(ploop, dst_clu, aux_pio, level);
+	ploop_map_and_submit_rw(ploop, dst_clu, aux_pio, level);
 	return 0;
 err:
 	if (aux_pio)
-		free_pio_with_pages(ploop, aux_pio);
+		ploop_free_pio_with_pages(ploop, aux_pio);
 	kfree(cow);
 	return -ENOMEM;
 }
 
-static void initiate_cluster_cow(struct ploop *ploop, unsigned int level,
-				 u32 clu, u32 dst_clu, struct pio *pio)
+static void ploop_initiate_cluster_cow(struct ploop *ploop, unsigned int level,
+				       u32 clu, u32 dst_clu, struct pio *pio)
 {
-	if (!submit_cluster_cow(ploop, level, clu, dst_clu, pio))
+	if (!ploop_submit_cluster_cow(ploop, level, clu, dst_clu, pio))
 		return;
 
 	pio->bi_status = BLK_STS_RESOURCE;
-	pio_endio(pio);
+	ploop_pio_endio(pio);
 }
 
-static void submit_cluster_write(struct ploop_cow *cow)
+static void ploop_submit_cluster_write(struct ploop_cow *cow)
 {
 	struct pio *aux_pio = cow->aux_pio;
 	struct ploop *ploop = cow->ploop;
 	u32 dst_clu;
 	int ret;
 
-	ret = allocate_cluster(ploop, &dst_clu);
+	ret = ploop_allocate_cluster(ploop, &dst_clu);
 	if (unlikely(ret < 0))
 		goto error;
 	cow->dst_clu = dst_clu;
 
-	init_pio(ploop, REQ_OP_WRITE, aux_pio);
+	ploop_init_pio(ploop, REQ_OP_WRITE, aux_pio);
 	aux_pio->css = cow->cow_pio->css;
-	pio_prepare_offsets(ploop, aux_pio, dst_clu);
+	ploop_pio_prepare_offsets(ploop, aux_pio, dst_clu);
 
 	BUG_ON(irqs_disabled());
 	aux_pio->endio_cb = ploop_cow_endio;
 	aux_pio->endio_cb_data = cow;
 
-	map_and_submit_rw(ploop, dst_clu, aux_pio, top_level(ploop));
+	ploop_map_and_submit_rw(ploop, dst_clu, aux_pio,
+				ploop_top_level(ploop));
 	return;
 error:
-	complete_cow(cow, errno_to_blk_status(ret));
+	ploop_complete_cow(cow, errno_to_blk_status(ret));
 }
 
-static void submit_cow_index_wb(struct ploop_cow *cow)
+static void ploop_submit_cow_index_wb(struct ploop_cow *cow)
 {
 	struct pio *cow_pio = cow->cow_pio;
 	struct ploop *ploop = cow->ploop;
@@ -1331,10 +1343,10 @@ static void submit_cow_index_wb(struct ploop_cow *cow)
 	map_index_t *to;
 
 	WARN_ON_ONCE(cow->aux_pio->queue_list_id != PLOOP_LIST_COW);
-	page_id = bat_clu_to_page_nr(clu);
-	md = md_page_find(ploop, page_id);
+	page_id = ploop_bat_clu_to_page_nr(clu);
+	md = ploop_md_page_find(ploop, page_id);
 
-	if (delay_if_md_busy(ploop, md, PIWB_TYPE_ALLOC, cow->aux_pio))
+	if (ploop_delay_if_md_busy(ploop, md, PIWB_TYPE_ALLOC, cow->aux_pio))
 		goto out;
 
 	if (!(md->status & MD_DIRTY)) {
@@ -1361,10 +1373,11 @@ static void submit_cow_index_wb(struct ploop_cow *cow)
 out:
 	return;
 err_resource:
-	complete_cow(cow, BLK_STS_RESOURCE);
+	ploop_complete_cow(cow, BLK_STS_RESOURCE);
 }
 
-static void process_delta_cow(struct ploop *ploop, struct list_head *cow_list)
+static void ploop_process_delta_cow(struct ploop *ploop,
+				    struct list_head *cow_list)
 {
 	struct ploop_cow *cow;
 	struct pio *aux_pio;
@@ -1372,10 +1385,10 @@ static void process_delta_cow(struct ploop *ploop, struct list_head *cow_list)
 	if (list_empty(cow_list))
 		return;
 
-	while ((aux_pio = pio_list_pop(cow_list)) != NULL) {
+	while ((aux_pio = ploop_pio_list_pop(cow_list)) != NULL) {
 		cow = aux_pio->endio_cb_data;
 		if (unlikely(aux_pio->bi_status != BLK_STS_OK)) {
-			complete_cow(cow, aux_pio->bi_status);
+			ploop_complete_cow(cow, aux_pio->bi_status);
 			continue;
 		}
 
@@ -1384,13 +1397,13 @@ static void process_delta_cow(struct ploop *ploop, struct list_head *cow_list)
 			 * Stage #1: assign dst_clu and write data
 			 * to top delta.
 			 */
-			submit_cluster_write(cow);
+			ploop_submit_cluster_write(cow);
 		} else {
 			/*
 			 * Stage #2: data is written to top delta.
 			 * Update index.
 			 */
-			submit_cow_index_wb(cow);
+			ploop_submit_cow_index_wb(cow);
 		}
 	}
 }
@@ -1409,10 +1422,10 @@ static void process_delta_cow(struct ploop *ploop, struct list_head *cow_list)
  * Note: clu newer becomes locked here, since index update is called
  * synchronously. Keep in mind this in case you make it async.
  */
-static bool locate_new_cluster_and_attach_pio(struct ploop *ploop,
-					      struct md_page *md,
-					      u32 clu, u32 *dst_clu,
-					      struct pio *pio)
+static bool ploop_locate_new_cluster_and_attach_pio(struct ploop *ploop,
+						    struct md_page *md,
+						    u32 clu, u32 *dst_clu,
+						    struct pio *pio)
 {
 	bool bat_update_prepared = false;
 	struct ploop_index_wb *piwb;
@@ -1421,12 +1434,12 @@ static bool locate_new_cluster_and_attach_pio(struct ploop *ploop,
 	int err;
 
 	WARN_ON_ONCE(pio->queue_list_id != PLOOP_LIST_DEFERRED);
-	if (delay_if_md_busy(ploop, md, PIWB_TYPE_ALLOC, pio))
+	if (ploop_delay_if_md_busy(ploop, md, PIWB_TYPE_ALLOC, pio))
 		goto out;
 
 	if (!(md->status & MD_DIRTY)) {
 		 /* Unlocked since MD_DIRTY is set and cleared from this work */
-		page_id = bat_clu_to_page_nr(clu);
+		page_id = ploop_bat_clu_to_page_nr(clu);
 		if (ploop_prepare_bat_update(ploop, md, PIWB_TYPE_ALLOC) < 0) {
 			pio->bi_status = BLK_STS_RESOURCE;
 			goto error;
@@ -1453,11 +1466,11 @@ static bool locate_new_cluster_and_attach_pio(struct ploop *ploop,
 	/* Uninit piwb */
 	if (bat_update_prepared)
 		ploop_break_bat_update(ploop, md);
-	pio_endio(pio);
+	ploop_pio_endio(pio);
 	return false;
 }
 
-static int process_one_deferred_bio(struct ploop *ploop, struct pio *pio)
+static int ploop_process_one_deferred_bio(struct ploop *ploop, struct pio *pio)
 {
 	sector_t sector = pio->bi_iter.bi_sector;
 	struct md_page *md;
@@ -1466,19 +1479,19 @@ static int process_one_deferred_bio(struct ploop *ploop, struct pio *pio)
 	bool ret;
 
 	clu = SEC_TO_CLU(ploop, sector);
-	if (postpone_if_cluster_locked(ploop, pio, clu))
+	if (ploop_postpone_if_cluster_locked(ploop, pio, clu))
 		goto out;
 
 	dst_clu = ploop_bat_entries(ploop, clu, &level, &md);
 	if (op_is_discard(pio->bi_op)) {
 		/* FIXME: check there is no parallel alloc */
-		handle_discard_pio(ploop, pio, clu, dst_clu);
+		ploop_handle_discard_pio(ploop, pio, clu, dst_clu);
 		goto out;
 	}
 
-	if (cluster_is_in_top_delta(ploop, clu)) {
+	if (ploop_cluster_is_in_top_delta(ploop, clu)) {
 		/* Already mapped */
-		if (pio_endio_if_merge_fake_pio(pio))
+		if (ploop_pio_endio_if_merge_fake_pio(pio))
 			goto out;
 		goto queue;
 	} else if (!op_is_write(pio->bi_op)) {
@@ -1486,7 +1499,7 @@ static int process_one_deferred_bio(struct ploop *ploop, struct pio *pio)
 		 * Simple read from secondary delta. May fail.
 		 * (Also handles the case dst_clu == BAT_ENTRY_NONE).
 		 */
-		initiate_delta_read(ploop, level, dst_clu, pio);
+		ploop_initiate_delta_read(ploop, level, dst_clu, pio);
 		goto out;
 	} else if (dst_clu != BAT_ENTRY_NONE) {
 		/*
@@ -1495,46 +1508,49 @@ static int process_one_deferred_bio(struct ploop *ploop, struct pio *pio)
 		 * a lot of other corner cases, but we don't do that as
 		 * snapshots are used and COW occurs very rare.
 		 */
-		initiate_cluster_cow(ploop, level, clu, dst_clu, pio);
+		ploop_initiate_cluster_cow(ploop, level, clu, dst_clu, pio);
 		goto out;
 	}
 
-	if (unlikely(pio_endio_if_all_zeros(pio)))
+	if (unlikely(ploop_pio_endio_if_all_zeros(pio)))
 		goto out;
 
 	/* Cluster exists nowhere. Allocate it and setup pio as outrunning */
-	ret = locate_new_cluster_and_attach_pio(ploop, md, clu, &dst_clu, pio);
+	ret = ploop_locate_new_cluster_and_attach_pio(ploop, md, clu,
+						      &dst_clu, pio);
 	if (!ret)
 		goto out;
 queue:
-	link_submitting_pio(ploop, pio, clu);
+	ploop_link_submitting_pio(ploop, pio, clu);
 
-	map_and_submit_rw(ploop, dst_clu, pio, top_level(ploop));
+	ploop_map_and_submit_rw(ploop, dst_clu, pio, ploop_top_level(ploop));
 out:
 	return 0;
 }
 
-static void md_fsync_endio(struct pio *pio, void *piwb_ptr, blk_status_t bi_status)
+static void ploop_md_fsync_endio(struct pio *pio, void *piwb_ptr,
+				 blk_status_t bi_status)
 {
 	struct ploop_index_wb *piwb = piwb_ptr;
 
 	ploop_bat_write_complete(piwb, bi_status);
 }
 
-static void md_write_endio(struct pio *pio, void *piwb_ptr, blk_status_t bi_status)
+static void ploop_md_write_endio(struct pio *pio, void *piwb_ptr,
+				 blk_status_t bi_status)
 {
 	struct ploop_index_wb *piwb = piwb_ptr;
 	struct ploop *ploop = piwb->ploop;
 
 	if (bi_status) {
-		md_fsync_endio(pio, piwb, bi_status);
+		ploop_md_fsync_endio(pio, piwb, bi_status);
 	} else {
-		init_pio(ploop, REQ_OP_FLUSH, pio);
-		pio->endio_cb = md_fsync_endio;
+		ploop_init_pio(ploop, REQ_OP_FLUSH, pio);
+		pio->endio_cb = ploop_md_fsync_endio;
 		pio->endio_cb_data = piwb;
 
 		pio->queue_list_id = PLOOP_LIST_FLUSH;
-		dispatch_pios(ploop, pio, NULL);
+		ploop_dispatch_pios(ploop, pio, NULL);
 	}
 }
 
@@ -1553,14 +1569,14 @@ void ploop_index_wb_submit(struct ploop *ploop, struct ploop_index_wb *piwb)
 	pio->bi_iter.bi_idx = 0;
 	pio->bi_iter.bi_bvec_done = 0;
 	pio->bi_io_vec = bvec;
-	pio->level = top_level(ploop);
-	pio->endio_cb = md_write_endio;
+	pio->level = ploop_top_level(ploop);
+	pio->endio_cb = ploop_md_write_endio;
 	pio->endio_cb_data = piwb;
 
-	submit_rw_mapped(ploop, pio);
+	ploop_submit_rw_mapped(ploop, pio);
 }
 
-static struct bio_vec *create_bvec_from_rq(struct request *rq)
+static struct bio_vec *ploop_create_bvec_from_rq(struct request *rq)
 {
 	struct bio_vec bv, *bvec, *tmp;
 	struct req_iterator rq_iter;
@@ -1583,8 +1599,9 @@ static struct bio_vec *create_bvec_from_rq(struct request *rq)
 	return bvec;
 }
 
-static void prepare_one_embedded_pio(struct ploop *ploop, struct pio *pio,
-				     struct list_head *deferred_pios)
+static void ploop_prepare_one_embedded_pio(struct ploop *ploop,
+					   struct pio *pio,
+					   struct list_head *deferred_pios)
 {
 	struct ploop_rq *prq = pio->endio_cb_data;
 	struct request *rq = prq->rq;
@@ -1599,7 +1616,7 @@ static void prepare_one_embedded_pio(struct ploop *ploop, struct pio *pio,
 		 * Transform a set of bvec arrays related to bios
 		 * into a single bvec array (which we can iterate).
 		 */
-		bvec = create_bvec_from_rq(rq);
+		bvec = ploop_create_bvec_from_rq(rq);
 		if (!bvec)
 			goto err_nomem;
 		prq->bvec = bvec;
@@ -1617,34 +1634,36 @@ static void prepare_one_embedded_pio(struct ploop *ploop, struct pio *pio,
 	pio->bi_io_vec = bvec;
 
 	pio->queue_list_id = PLOOP_LIST_DEFERRED;
-	ret = split_pio_to_list(ploop, pio, deferred_pios);
+	ret = ploop_split_pio_to_list(ploop, pio, deferred_pios);
 	if (ret)
 		goto err_nomem;
 
 	return;
 err_nomem:
 	pio->bi_status = BLK_STS_RESOURCE;
-	pio_endio(pio);
+	ploop_pio_endio(pio);
 }
 
-static void prepare_embedded_pios(struct ploop *ploop, struct list_head *pios,
-				  struct list_head *deferred_pios)
+static void ploop_prepare_embedded_pios(struct ploop *ploop,
+					struct list_head *pios,
+					struct list_head *deferred_pios)
 {
 	struct pio *pio;
 
-	while ((pio = pio_list_pop(pios)) != NULL)
-		prepare_one_embedded_pio(ploop, pio, deferred_pios);
+	while ((pio = ploop_pio_list_pop(pios)) != NULL)
+		ploop_prepare_one_embedded_pio(ploop, pio, deferred_pios);
 }
 
-static void process_deferred_pios(struct ploop *ploop, struct list_head *pios)
+static void ploop_process_deferred_pios(struct ploop *ploop,
+					struct list_head *pios)
 {
 	struct pio *pio;
 
-	while ((pio = pio_list_pop(pios)) != NULL)
-		process_one_deferred_bio(ploop, pio);
+	while ((pio = ploop_pio_list_pop(pios)) != NULL)
+		ploop_process_one_deferred_bio(ploop, pio);
 }
 
-static void process_one_discard_pio(struct ploop *ploop, struct pio *pio)
+static void ploop_process_one_discard_pio(struct ploop *ploop, struct pio *pio)
 {
 	bool bat_update_prepared = false;
 	u32 page_id, clu = pio->clu;
@@ -1655,9 +1674,9 @@ static void process_one_discard_pio(struct ploop *ploop, struct pio *pio)
 	WARN_ON(ploop->nr_deltas != 1 ||
 		pio->queue_list_id != PLOOP_LIST_DISCARD);
 
-	page_id = bat_clu_to_page_nr(clu);
-	md = md_page_find(ploop, page_id);
-	if (delay_if_md_busy(ploop, md, PIWB_TYPE_DISCARD, pio))
+	page_id = ploop_bat_clu_to_page_nr(clu);
+	md = ploop_md_page_find(ploop, page_id);
+	if (ploop_delay_if_md_busy(ploop, md, PIWB_TYPE_DISCARD, pio))
 		goto out;
 
 	if (!(md->status & MD_DIRTY)) {
@@ -1691,28 +1710,30 @@ static void process_one_discard_pio(struct ploop *ploop, struct pio *pio)
 err:
 	if (bat_update_prepared)
 		ploop_break_bat_update(ploop, md);
-	pio_endio(pio);
+	ploop_pio_endio(pio);
 }
 
-static void process_discard_pios(struct ploop *ploop, struct list_head *pios)
+static void ploop_process_discard_pios(struct ploop *ploop,
+				       struct list_head *pios)
 {
 	struct pio *pio;
 
-	while ((pio = pio_list_pop(pios)) != NULL)
-		process_one_discard_pio(ploop, pio);
+	while ((pio = ploop_pio_list_pop(pios)) != NULL)
+		ploop_process_one_discard_pio(ploop, pio);
 }
 
-static void process_resubmit_pios(struct ploop *ploop, struct list_head *pios)
+static void ploop_process_resubmit_pios(struct ploop *ploop,
+					struct list_head *pios)
 {
 	struct pio *pio;
 
-	while ((pio = pio_list_pop(pios)) != NULL) {
+	while ((pio = ploop_pio_list_pop(pios)) != NULL) {
 		pio->queue_list_id = PLOOP_LIST_INVALID;
-		submit_rw_mapped(ploop, pio);
+		ploop_submit_rw_mapped(ploop, pio);
 	}
 }
 
-static void submit_metadata_writeback(struct ploop *ploop)
+static void ploop_submit_metadata_writeback(struct ploop *ploop)
 {
 	struct md_page *md;
 
@@ -1756,14 +1777,14 @@ void do_ploop_work(struct work_struct *ws)
 	list_splice_init(&ploop->resubmit_pios, &resubmit_pios);
 	spin_unlock_irq(&ploop->deferred_lock);
 
-	prepare_embedded_pios(ploop, &embedded_pios, &deferred_pios);
+	ploop_prepare_embedded_pios(ploop, &embedded_pios, &deferred_pios);
 
-	process_resubmit_pios(ploop, &resubmit_pios);
-	process_deferred_pios(ploop, &deferred_pios);
-	process_discard_pios(ploop, &discard_pios);
-	process_delta_cow(ploop, &cow_pios);
+	ploop_process_resubmit_pios(ploop, &resubmit_pios);
+	ploop_process_deferred_pios(ploop, &deferred_pios);
+	ploop_process_discard_pios(ploop, &discard_pios);
+	ploop_process_delta_cow(ploop, &cow_pios);
 
-	submit_metadata_writeback(ploop);
+	ploop_submit_metadata_writeback(ploop);
 
 	current->flags = old_flags;
 }
@@ -1780,17 +1801,17 @@ void do_ploop_fsync_work(struct work_struct *ws)
 	list_splice_init(&ploop->pios[PLOOP_LIST_FLUSH], &flush_pios);
 	spin_unlock_irq(&ploop->deferred_lock);
 
-	file = top_delta(ploop)->file;
+	file = ploop_top_delta(ploop)->file;
 	ret = vfs_fsync(file, 0);
 
-	while ((pio = pio_list_pop(&flush_pios)) != NULL) {
+	while ((pio = ploop_pio_list_pop(&flush_pios)) != NULL) {
 		if (unlikely(ret))
 			pio->bi_status = errno_to_blk_status(ret);
-		pio_endio(pio);
+		ploop_pio_endio(pio);
 	}
 }
 
-static void submit_embedded_pio(struct ploop *ploop, struct pio *pio)
+static void ploop_submit_embedded_pio(struct ploop *ploop, struct pio *pio)
 {
 	struct ploop_rq *prq = pio->endio_cb_data;
 	struct request *rq = prq->rq;
@@ -1814,7 +1835,7 @@ static void submit_embedded_pio(struct ploop *ploop, struct pio *pio)
 		goto unlock;
 	}
 
-	inc_nr_inflight(ploop, pio);
+	ploop_inc_nr_inflight(ploop, pio);
 	list_add_tail(&pio->list, &ploop->pios[pio->queue_list_id]);
 unlock:
 	spin_unlock_irqrestore(&ploop->deferred_lock, flags);
@@ -1823,12 +1844,12 @@ static void submit_embedded_pio(struct ploop *ploop, struct pio *pio)
 		queue_work(ploop->wq, worker);
 }
 
-void submit_embedded_pios(struct ploop *ploop, struct list_head *list)
+void ploop_submit_embedded_pios(struct ploop *ploop, struct list_head *list)
 {
 	struct pio *pio;
 
-	while ((pio = pio_list_pop(list)) != NULL)
-		submit_embedded_pio(ploop, pio);
+	while ((pio = ploop_pio_list_pop(list)) != NULL)
+		ploop_submit_embedded_pio(ploop, pio);
 }
 
 int ploop_clone_and_map(struct dm_target *ti, struct request *rq,
@@ -1846,13 +1867,13 @@ int ploop_clone_and_map(struct dm_target *ti, struct request *rq,
 		return DM_MAPIO_KILL;
 	pio = (void *)prq + sizeof(*prq);
 
-	init_prq_and_embedded_pio(ploop, rq, prq, pio);
+	ploop_init_prq_and_embedded_pio(ploop, rq, prq, pio);
 
-	submit_embedded_pio(ploop, pio);
+	ploop_submit_embedded_pio(ploop, pio);
 	return DM_MAPIO_SUBMITTED;
 }
 
-static void handle_cleanup(struct ploop *ploop, struct pio *pio)
+static void ploop_handle_cleanup(struct ploop *ploop, struct pio *pio)
 {
 	/*
 	 * This function is called from the very beginning
@@ -1861,8 +1882,8 @@ static void handle_cleanup(struct ploop *ploop, struct pio *pio)
 	if (pio->wants_discard_index_cleanup)
 		ploop_discard_index_pio_end(ploop, pio);
 
-	unlink_completed_pio(ploop, pio);
-	dec_nr_inflight(ploop, pio);
+	ploop_unlink_completed_pio(ploop, pio);
+	ploop_dec_nr_inflight(ploop, pio);
 }
 
 /*
@@ -1877,8 +1898,8 @@ int ploop_prepare_reloc_index_wb(struct ploop *ploop,
 				 u32 clu, u32 *dst_clu)
 {
 	enum piwb_type type = PIWB_TYPE_ALLOC;
-	u32 page_id = bat_clu_to_page_nr(clu);
-	struct md_page *md = md_page_find(ploop, page_id);
+	u32 page_id = ploop_bat_clu_to_page_nr(clu);
+	struct md_page *md = ploop_md_page_find(ploop, page_id);
 	struct ploop_index_wb *piwb;
 	int err;
 
diff --git a/drivers/md/dm-ploop-target.c b/drivers/md/dm-ploop-target.c
index d7cc2fbba537..e670ea320bf8 100644
--- a/drivers/md/dm-ploop-target.c
+++ b/drivers/md/dm-ploop-target.c
@@ -105,21 +105,21 @@ int ploop_rw_page_sync(unsigned rw, struct file *file,
 	return ret;
 }
 
-static void inflight_bios_ref_exit0(struct percpu_ref *ref)
+static void ploop_inflight_bios_ref_exit0(struct percpu_ref *ref)
 {
 	struct ploop *ploop = container_of(ref, struct ploop,
 					   inflight_bios_ref[0]);
 	complete(&ploop->inflight_bios_ref_comp);
 }
 
-static void inflight_bios_ref_exit1(struct percpu_ref *ref)
+static void ploop_inflight_bios_ref_exit1(struct percpu_ref *ref)
 {
 	struct ploop *ploop = container_of(ref, struct ploop,
 					   inflight_bios_ref[1]);
 	complete(&ploop->inflight_bios_ref_comp);
 }
 
-void free_md_pages_tree(struct rb_root *root)
+void ploop_free_md_pages_tree(struct rb_root *root)
 {
 	struct rb_node *node;
 	struct md_page *md;
@@ -179,11 +179,11 @@ static void ploop_destroy(struct ploop *ploop)
 	mempool_destroy(ploop->prq_pool);
 	kfree(ploop->deltas);
 	kvfree(ploop->holes_bitmap);
-	free_md_pages_tree(&ploop->bat_entries);
+	ploop_free_md_pages_tree(&ploop->bat_entries);
 	kfree(ploop);
 }
 
-static struct file * get_delta_file(int fd)
+static struct file *ploop_get_delta_file(int fd)
 {
 	struct file *file;
 
@@ -198,7 +198,7 @@ static struct file * get_delta_file(int fd)
 	return file;
 }
 
-static int check_top_delta(struct ploop *ploop, struct file *file)
+static int ploop_check_top_delta(struct ploop *ploop, struct file *file)
 {
 	struct dm_target *ti = ploop->ti;
 	struct page *page = NULL;
@@ -226,7 +226,8 @@ static int check_top_delta(struct ploop *ploop, struct file *file)
 	if (ret)
 		goto out;
 
-	ret = prealloc_md_pages(&ploop->bat_entries, 0, ploop->nr_bat_entries);
+	ret = ploop_prealloc_md_pages(&ploop->bat_entries, 0,
+				      ploop->nr_bat_entries);
 	if (ret)
 		goto out;
 out:
@@ -269,14 +270,14 @@ static int ploop_add_deltas_stack(struct ploop *ploop, char **argv, int argc)
 		if (kstrtos32(arg, 10, &delta_fd) < 0)
 			goto out;
 
-		file = get_delta_file(delta_fd);
+		file = ploop_get_delta_file(delta_fd);
 		if (IS_ERR(file)) {
 			ret = PTR_ERR(file);
 			goto out;
 		}
 
 		if (i == argc - 1) { /* Top delta */
-			ret = check_top_delta(ploop, file);
+			ret = ploop_check_top_delta(ploop, file);
 			if (ret)
 				goto err_fput;
 		}
@@ -354,11 +355,12 @@ static int ploop_ctr(struct dm_target *ti, unsigned int argc, char **argv)
 
 	INIT_WORK(&ploop->worker, do_ploop_work);
 	INIT_WORK(&ploop->fsync_worker, do_ploop_fsync_work);
-	INIT_WORK(&ploop->event_work, ploop_event_work);
+	INIT_WORK(&ploop->event_work, do_ploop_event_work);
 	init_completion(&ploop->inflight_bios_ref_comp);
 
 	for (i = 0; i < 2; i++) {
-		release = i ? inflight_bios_ref_exit1 : inflight_bios_ref_exit0;
+		release = i ? ploop_inflight_bios_ref_exit1 :
+			      ploop_inflight_bios_ref_exit0;
 		if (percpu_ref_init(&ploop->inflight_bios_ref[i], release,
 				    PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) {
 			ret = -ENOMEM;
diff --git a/drivers/md/dm-ploop.h b/drivers/md/dm-ploop.h
index 03e3ed749920..5d953278a976 100644
--- a/drivers/md/dm-ploop.h
+++ b/drivers/md/dm-ploop.h
@@ -311,13 +311,14 @@ static inline bool ploop_is_ro(struct ploop *ploop)
 	return (dm_table_get_mode(ploop->ti->table) & FMODE_WRITE) == 0;
 }
 
-static inline void remap_to_cluster(struct ploop *ploop, struct pio *pio, u32 clu)
+static inline void ploop_remap_to_cluster(struct ploop *ploop,
+					  struct pio *pio, u32 clu)
 {
 	pio->bi_iter.bi_sector &= ((1 << ploop->cluster_log) - 1);
 	pio->bi_iter.bi_sector |= (clu << ploop->cluster_log);
 }
 
-static inline bool whole_cluster(struct ploop *ploop, struct pio *pio)
+static inline bool ploop_whole_cluster(struct ploop *ploop, struct pio *pio)
 {
 	sector_t end_sector = bvec_iter_end_sector(pio->bi_iter);
 
@@ -332,14 +333,14 @@ static inline bool whole_cluster(struct ploop *ploop, struct pio *pio)
 
 #define BAT_LEVEL_MAX		(U8_MAX - 1)
 #define BAT_LEVEL_INVALID	U8_MAX
-static inline u8 top_level(struct ploop *ploop)
+static inline u8 ploop_top_level(struct ploop *ploop)
 {
 	return ploop->nr_deltas - 1;
 }
 
-static inline struct ploop_delta *top_delta(struct ploop *ploop)
+static inline struct ploop_delta *ploop_top_delta(struct ploop *ploop)
 {
-	return &ploop->deltas[top_level(ploop)];
+	return &ploop->deltas[ploop_top_level(ploop)];
 }
 
 static inline void ploop_hole_set_bit(unsigned long nr, struct ploop *ploop)
@@ -354,7 +355,7 @@ static inline void ploop_hole_clear_bit(u32 nr, struct ploop *ploop)
 		clear_bit(nr, ploop->holes_bitmap);
 }
 
-static inline unsigned int nr_pages_in_cluster(struct ploop *ploop)
+static inline unsigned int ploop_nr_pages_in_cluster(struct ploop *ploop)
 {
 	return 1 << (ploop->cluster_log + 9 - PAGE_SHIFT);
 }
@@ -371,7 +372,7 @@ static inline unsigned int ploop_nr_bat_clusters(struct ploop *ploop,
 	return bat_clusters;
 }
 
-static inline u32 bat_clu_to_page_nr(u32 clu)
+static inline u32 ploop_bat_clu_to_page_nr(u32 clu)
 {
 	u64 byte;
 
@@ -379,30 +380,30 @@ static inline u32 bat_clu_to_page_nr(u32 clu)
 	return byte >> PAGE_SHIFT;
 }
 
-static inline u32 bat_clu_idx_in_page(u32 clu)
+static inline u32 ploop_bat_clu_idx_in_page(u32 clu)
 {
 	return (clu + PLOOP_MAP_OFFSET) % (PAGE_SIZE / sizeof(map_index_t));
 }
 
-static inline u32 page_clu_idx_to_bat_clu(u32 page_id, u32 cluster_rel)
+static inline u32 ploop_page_clu_idx_to_bat_clu(u32 page_id, u32 cluster_rel)
 {
 	unsigned int off;
 	off = (u64)page_id * PAGE_SIZE / sizeof(map_index_t) - PLOOP_MAP_OFFSET;
 	return off + cluster_rel;
 }
 
-static inline struct md_page *md_first_entry(struct rb_root *md_root)
+static inline struct md_page *ploop_md_first_entry(struct rb_root *md_root)
 {
 	struct rb_node *node = rb_first(md_root);
 	return rb_entry(node, struct md_page, node);
 }
-static inline struct md_page *md_next_entry(struct md_page *md)
+static inline struct md_page *ploop_md_next_entry(struct md_page *md)
 {
 	return rb_entry(rb_next(&md->node), struct md_page, node);
 }
 
 
-extern struct md_page * md_page_find(struct ploop *ploop, u32 id);
+extern struct md_page *ploop_md_page_find(struct ploop *ploop, u32 id);
 
 /*
  * This should be called in very rare cases. Avoid this function
@@ -415,12 +416,12 @@ static inline u32 ploop_bat_entries(struct ploop *ploop, u32 clu,
 	u32 *bat_entries, dst_clu, id;
 	struct md_page *md;
 
-	id = bat_clu_to_page_nr(clu);
-	md = md_page_find(ploop, id);
+	id = ploop_bat_clu_to_page_nr(clu);
+	md = ploop_md_page_find(ploop, id);
 	BUG_ON(!md);
 
 	/* Cluster index related to the page[page_id] start */
-	clu = bat_clu_idx_in_page(clu);
+	clu = ploop_bat_clu_idx_in_page(clu);
 
 	if (bat_level)
 		*bat_level = md->bat_levels[clu];
@@ -433,7 +434,7 @@ static inline u32 ploop_bat_entries(struct ploop *ploop, u32 clu,
 	return dst_clu;
 }
 
-static inline bool cluster_is_in_top_delta(struct ploop *ploop, u32 clu)
+static inline bool ploop_cluster_is_in_top_delta(struct ploop *ploop, u32 clu)
 {
 	u32 dst_clu;
 	u8 level;
@@ -442,12 +443,12 @@ static inline bool cluster_is_in_top_delta(struct ploop *ploop, u32 clu)
 		return false;
 	dst_clu = ploop_bat_entries(ploop, clu, &level, NULL);
 
-	if (dst_clu == BAT_ENTRY_NONE || level < top_level(ploop))
+	if (dst_clu == BAT_ENTRY_NONE || level < ploop_top_level(ploop))
 		return false;
 	return true;
 }
 
-static inline bool md_page_cluster_is_in_top_delta(struct ploop *ploop,
+static inline bool ploop_md_page_cluster_is_in_top_delta(struct ploop *ploop,
 					   struct md_page *md, u32 clu)
 {
 	u32 count, *bat_entries;
@@ -462,7 +463,7 @@ static inline bool md_page_cluster_is_in_top_delta(struct ploop *ploop,
 
 	bat_entries = kmap_atomic(md->page);
 	if (bat_entries[clu] == BAT_ENTRY_NONE ||
-	    md->bat_levels[clu] < top_level(ploop))
+	    md->bat_levels[clu] < ploop_top_level(ploop))
 		ret = false;
 	kunmap_atomic(bat_entries);
 	return ret;
@@ -472,7 +473,7 @@ static inline void init_be_iter(u32 nr_be, u32 page_id,
 				u32 *start, u32 *end)
 {
 	unsigned int count = PAGE_SIZE / sizeof(map_index_t);
-	u32 rem, last_page = bat_clu_to_page_nr(nr_be - 1);
+	u32 rem, last_page = ploop_bat_clu_to_page_nr(nr_be - 1);
 
 	*start = 0;
 	if (page_id == 0)
@@ -493,17 +494,17 @@ static inline void ploop_init_be_iter(struct ploop *ploop, u32 page_id,
 	init_be_iter(ploop->nr_bat_entries, page_id, start, end);
 }
 
-extern struct pio *find_pio(struct hlist_head head[], u32 clu);
+extern struct pio *ploop_find_pio(struct hlist_head head[], u32 clu);
 
-extern int prealloc_md_pages(struct rb_root *root, u32 nr_bat_entries,
+extern int ploop_prealloc_md_pages(struct rb_root *root, u32 nr_bat_entries,
 			     u32 new_nr_bat_entries);
 
-static inline struct pio *bio_to_endio_hook(struct bio *bio)
+static inline struct pio *ploop_bio_to_endio_hook(struct bio *bio)
 {
 	return dm_per_bio_data(bio, sizeof(struct pio));
 }
 
-static inline struct pio *pio_list_pop(struct list_head *pio_list)
+static inline struct pio *ploop_pio_list_pop(struct list_head *pio_list)
 {
 	struct pio *pio;
 
@@ -520,7 +521,7 @@ static inline struct hlist_head *ploop_htable_slot(struct hlist_head head[], u32
 	return &head[hash_32(clu, PLOOP_HASH_TABLE_BITS)];
 }
 
-static inline bool fake_merge_pio(struct pio *pio)
+static inline bool ploop_fake_merge_pio(struct pio *pio)
 {
 	if (pio->is_fake_merge) {
 		WARN_ON_ONCE(pio->bi_iter.bi_size ||
@@ -530,51 +531,59 @@ static inline bool fake_merge_pio(struct pio *pio)
 	return false;
 }
 
-static inline struct pio *alloc_pio(struct ploop *ploop, gfp_t flags)
+static inline struct pio *ploop_alloc_pio(struct ploop *ploop, gfp_t flags)
 {
 	return mempool_alloc(ploop->pio_pool, flags);
 }
 
-static inline void free_pio(struct ploop *ploop, struct pio *pio)
+static inline void ploop_free_pio(struct ploop *ploop, struct pio *pio)
 {
 	mempool_free(pio, ploop->pio_pool);
 }
 
-extern void md_page_insert(struct ploop *ploop, struct md_page *md);
+extern void ploop_md_page_insert(struct ploop *ploop, struct md_page *md);
 extern void ploop_free_md_page(struct md_page *md);
-extern void free_md_pages_tree(struct rb_root *root);
-extern bool try_update_bat_entry(struct ploop *ploop, u32 clu,
-				 u8 level, u32 dst_clu);
-
-extern int ploop_add_delta(struct ploop *ploop, u32 level, struct file *file, bool is_raw);
-extern int ploop_check_delta_length(struct ploop *ploop, struct file *file, loff_t *file_size);
-extern void submit_embedded_pios(struct ploop *ploop, struct list_head *list);
-extern void dispatch_pios(struct ploop *ploop, struct pio *pio, struct list_head *pio_list);
+extern void ploop_free_md_pages_tree(struct rb_root *root);
+extern bool ploop_try_update_bat_entry(struct ploop *ploop, u32 clu,
+				       u8 level, u32 dst_clu);
+
+extern int ploop_add_delta(struct ploop *ploop, u32 level,
+			   struct file *file, bool is_raw);
+extern int ploop_check_delta_length(struct ploop *ploop, struct file *file,
+				    loff_t *file_size);
+extern void ploop_submit_embedded_pios(struct ploop *ploop,
+				       struct list_head *list);
+extern void ploop_dispatch_pios(struct ploop *ploop, struct pio *pio,
+				struct list_head *pio_list);
 extern void do_ploop_work(struct work_struct *ws);
 extern void do_ploop_fsync_work(struct work_struct *ws);
-extern void ploop_event_work(struct work_struct *work);
+extern void do_ploop_event_work(struct work_struct *work);
 extern int ploop_clone_and_map(struct dm_target *ti, struct request *rq,
-		    union map_info *map_context, struct request **clone);
-extern struct pio *find_lk_of_cluster(struct ploop *ploop, u32 clu);
-extern void init_pio(struct ploop *ploop, unsigned int bi_op, struct pio *pio);
+			       union map_info *map_context,
+			       struct request **clone);
+extern struct pio *ploop_find_lk_of_cluster(struct ploop *ploop, u32 clu);
+extern void ploop_init_pio(struct ploop *ploop, unsigned int bi_op,
+			   struct pio *pio);
 extern int ploop_rw_page_sync(unsigned rw, struct file *file,
 			      u64 index, struct page *page);
-extern void map_and_submit_rw(struct ploop *ploop, u32 dst_clu, struct pio *pio, u8 level);
-
-extern int ploop_prepare_reloc_index_wb(struct ploop *, struct md_page **, u32, u32 *);
-extern void ploop_break_bat_update(struct ploop *ploop, struct md_page *);
+extern void ploop_map_and_submit_rw(struct ploop *ploop, u32 dst_clu,
+				    struct pio *pio, u8 level);
+extern int ploop_prepare_reloc_index_wb(struct ploop *ploop,
+					struct md_page **ret_md, u32 clu, u32 *dst_clu);
+extern void ploop_break_bat_update(struct ploop *ploop, struct md_page *md);
 extern void ploop_index_wb_submit(struct ploop *, struct ploop_index_wb *);
 extern int ploop_message(struct dm_target *ti, unsigned int argc, char **argv,
 			 char *result, unsigned int maxlen);
 
-extern struct pio * alloc_pio_with_pages(struct ploop *ploop);
-extern void free_pio_with_pages(struct ploop *ploop, struct pio *pio);
-extern void pio_prepare_offsets(struct ploop *, struct pio *, u32);
+extern struct pio *ploop_alloc_pio_with_pages(struct ploop *ploop);
+extern void ploop_free_pio_with_pages(struct ploop *ploop, struct pio *pio);
+extern void ploop_pio_prepare_offsets(struct ploop *ploop, struct pio *pio, u32 clu);
 
 extern int ploop_setup_metadata(struct ploop *ploop, struct page *page);
 extern int ploop_read_delta_metadata(struct ploop *ploop, struct file *file,
 				     struct rb_root *md_root, u32 *delta_nr_be);
-extern void ploop_index_wb_init(struct ploop_index_wb *piwb, struct ploop *ploop);
+extern void ploop_index_wb_init(struct ploop_index_wb *piwb,
+				struct ploop *ploop);
 extern void ploop_call_rw_iter(struct file *file, loff_t pos, unsigned rw,
 			       struct iov_iter *iter, struct pio *pio);
 extern void ploop_enospc_timer(struct timer_list *timer);


More information about the Devel mailing list