[Devel] [PATCH rh7] writeback: revert ub dirty limit related stuff

Vladimir Davydov vdavydov at parallels.com
Mon Aug 31 02:24:04 PDT 2015


This patch reverts ub dirty limit related hunks brought by the initial
commit 2a8b5de95918. None of them actually works, so this patch
introduces no functional changes. Dirty set control will be
reimplemented in the scope of PSBM-33841.

Signed-off-by: Vladimir Davydov <vdavydov at parallels.com>
---
 fs/fs-writeback.c         | 39 +++++++--------------------------------
 include/linux/writeback.h |  4 ----
 mm/page-writeback.c       |  4 ----
 3 files changed, 7 insertions(+), 40 deletions(-)

diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 66586a4f32de..ac8066b38214 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -40,7 +40,6 @@
 struct wb_writeback_work {
 	long nr_pages;
 	struct super_block *sb;
-	struct user_beancounter *ub;
 	unsigned long *older_than_this;
 	enum writeback_sync_modes sync_mode;
 	unsigned int tagged_writepages:1;
@@ -130,8 +129,8 @@ out_unlock:
 }
 
 static void
-__bdi_start_writeback(struct backing_dev_info *bdi,
-		      long nr_pages, bool range_cyclic, enum wb_reason reason)
+__bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
+		      bool range_cyclic, enum wb_reason reason)
 {
 	struct wb_writeback_work *work;
 
@@ -150,7 +149,6 @@ __bdi_start_writeback(struct backing_dev_info *bdi,
 	work->nr_pages	= nr_pages;
 	work->range_cyclic = range_cyclic;
 	work->reason	= reason;
-	work->ub	= NULL;
 
 	bdi_queue_work(bdi, work);
 }
@@ -673,7 +671,6 @@ static long writeback_sb_inodes(struct super_block *sb,
 		.range_cyclic		= work->range_cyclic,
 		.range_start		= 0,
 		.range_end		= LLONG_MAX,
-		.wb_ub			= work->ub,
 	};
 	unsigned long start_time = jiffies;
 	long write_chunk;
@@ -707,14 +704,6 @@ static long writeback_sb_inodes(struct super_block *sb,
 		 * kind writeout is handled by the freer.
 		 */
 		spin_lock(&inode->i_lock);
-		if (wbc.wb_ub && !wb->bdi->dirty_exceeded &&
-		    (inode->i_mapping->dirtied_ub != wbc.wb_ub) &&
-		    (inode->i_state & I_DIRTY) == I_DIRTY_PAGES &&
-		    ub_should_skip_writeback(wbc.wb_ub, inode)) {
-			requeue_io(inode, wb);
-			continue;
-		}
-
 		if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
 			spin_unlock(&inode->i_lock);
 			redirty_tail(inode, wb);
@@ -913,12 +902,9 @@ static long wb_writeback(struct bdi_writeback *wb,
 
 		/*
 		 * For background writeout, stop when we are below the
-		 * background dirty threshold. For filtered background
-		 * writeback we write all inodes dirtied before us,
-		 * because we cannot dereference this ub pointer.
+		 * background dirty threshold
 		 */
-		if (work->for_background && !work->ub &&
-		    !over_bground_thresh(wb->bdi))
+		if (work->for_background && !over_bground_thresh(wb->bdi))
 			break;
 
 		/*
@@ -1371,7 +1357,7 @@ out_unlock_inode:
 }
 EXPORT_SYMBOL(__mark_inode_dirty);
 
-static void wait_sb_inodes(struct super_block *sb, struct user_beancounter *ub)
+static void wait_sb_inodes(struct super_block *sb)
 {
 	struct inode *inode, *old_inode = NULL;
 
@@ -1399,11 +1385,6 @@ static void wait_sb_inodes(struct super_block *sb, struct user_beancounter *ub)
 			spin_unlock(&inode->i_lock);
 			continue;
 		}
-		if (ub && (mapping->dirtied_ub != ub) &&
-		    (inode->i_state & I_DIRTY) == I_DIRTY_PAGES) {
-			spin_unlock(&inode->i_lock);
-			continue;
-		}
 		__iget(inode);
 		spin_unlock(&inode->i_lock);
 		spin_unlock(&inode_sb_list_lock);
@@ -1522,12 +1503,11 @@ EXPORT_SYMBOL(try_to_writeback_inodes_sb);
  * This function writes and waits on any dirty inode belonging to this
  * super_block.
  */
-void sync_inodes_sb_ub(struct super_block *sb, struct user_beancounter *ub)
+void sync_inodes_sb(struct super_block *sb)
 {
 	DECLARE_COMPLETION_ONSTACK(done);
 	struct wb_writeback_work work = {
 		.sb		= sb,
-		.ub		= ub,
 		.sync_mode	= WB_SYNC_ALL,
 		.nr_pages	= LONG_MAX,
 		.range_cyclic	= 0,
@@ -1544,12 +1524,7 @@ void sync_inodes_sb_ub(struct super_block *sb, struct user_beancounter *ub)
 	bdi_queue_work(sb->s_bdi, &work);
 	wait_for_completion(&done);
 
-	wait_sb_inodes(sb, ub);
-}
-
-void sync_inodes_sb(struct super_block *sb)
-{
-	sync_inodes_sb_ub(sb, NULL);
+	wait_sb_inodes(sb);
 }
 EXPORT_SYMBOL(sync_inodes_sb);
 
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 13c28729187c..a193a7eafb34 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -85,15 +85,12 @@ struct writeback_control {
 	/* reserved for Red Hat */
 	RH_KABI_RESERVE(1)
 	RH_KABI_RESERVE(2)
-
-	struct user_beancounter *wb_ub;	/* only for this beancounter */
 };
 
 /*
  * fs/fs-writeback.c
  */	
 struct bdi_writeback;
-struct user_beancounter;
 int inode_wait(void *);
 void writeback_inodes_sb(struct super_block *, enum wb_reason reason);
 void writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
@@ -102,7 +99,6 @@ int try_to_writeback_inodes_sb(struct super_block *, enum wb_reason reason);
 int try_to_writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
 				  enum wb_reason reason);
 void sync_inodes_sb(struct super_block *);
-void sync_inodes_sb_ub(struct super_block *sb, struct user_beancounter *);
 void wakeup_flusher_threads(long nr_pages, enum wb_reason reason);
 void inode_wait_for_writeback(struct inode *inode);
 
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 0cb8437f590e..2bb28ba0ff98 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -1590,10 +1590,6 @@ pause:
 	virtinfo_notifier_call(VITYPE_IO, VIRTINFO_IO_BALANCE_DIRTY,
 			       (void*)pages_dirtied);
 
-	/*
-	 * Even if this is filtered writeback for other ub it will write
-	 * inodes for this ub, because ub->dirty_exceeded is set.
-	 */
 	if (writeback_in_progress(bdi))
 		return;
 
-- 
2.1.4




More information about the Devel mailing list