[Devel] [PATCH rh7 2/5] ub: get rid of dcache accounting related stuff

Vladimir Davydov vdavydov at parallels.com
Mon May 18 07:24:57 PDT 2015


dcache is now accounted as part of memcg:kmem, so remove the leftovers.

If we decide to account dcache separately, we will re-implement/port
what we really need.

Signed-off-by: Vladimir Davydov <vdavydov at parallels.com>
---
 fs/namei.c               |    1 -
 include/bc/beancounter.h |    6 --
 include/bc/dcache.h      |   18 ----
 kernel/bc/beancounter.c  |    5 -
 kernel/bc/dcache.c       |  269 ----------------------------------------------
 kernel/bc/proc.c         |    3 -
 kernel/bc/vm_pages.c     |    5 +-
 kernel/ve/vecalls.c      |    2 -
 8 files changed, 2 insertions(+), 307 deletions(-)
 delete mode 100644 include/bc/dcache.h
 delete mode 100644 kernel/bc/dcache.c

diff --git a/fs/namei.c b/fs/namei.c
index 5b0146255e94..b62c93df99d1 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -142,7 +142,6 @@ getname_flags(const char __user *filename, int flags, int *empty)
 	if (result)
 		return result;
 
-	/*ub_dentry_checkup();*/
 	result = __getname();
 	if (unlikely(!result))
 		return ERR_PTR(-ENOMEM);
diff --git a/include/bc/beancounter.h b/include/bc/beancounter.h
index 31671ff459da..4337e1363eeb 100644
--- a/include/bc/beancounter.h
+++ b/include/bc/beancounter.h
@@ -149,12 +149,6 @@ struct user_beancounter {
 
 	void			*private_data2;
 
-	struct list_head	ub_dentry_lru;
-	struct list_head	ub_dentry_top;
-	int			ub_dentry_unused;
-	int			ub_dentry_batch;
-	unsigned long		ub_dentry_pruned;
-
 	/* resources statistic and settings */
 	struct ubparm		ub_parms[UB_RESOURCES];
 	/* resources statistic for last interval */
diff --git a/include/bc/dcache.h b/include/bc/dcache.h
deleted file mode 100644
index 186e0fc895d5..000000000000
--- a/include/bc/dcache.h
+++ /dev/null
@@ -1,18 +0,0 @@
-#ifndef __UB_DCACHE_H__
-#define __UB_DCACHE_H__
-
-#include <bc/decl.h>
-
-extern unsigned int ub_dcache_threshold;
-
-UB_DECLARE_FUNC(int, ub_dcache_charge(struct user_beancounter *ub, int name_len))
-UB_DECLARE_VOID_FUNC(ub_dcache_uncharge(struct user_beancounter *ub, int name_len))
-UB_DECLARE_VOID_FUNC(ub_dcache_set_owner(struct dentry *d, struct user_beancounter *ub))
-UB_DECLARE_VOID_FUNC(ub_dcache_change_owner(struct dentry *dentry, struct user_beancounter *ub))
-UB_DECLARE_VOID_FUNC(ub_dcache_clear_owner(struct dentry *dentry))
-UB_DECLARE_VOID_FUNC(ub_dcache_unuse(struct user_beancounter *ub))
-UB_DECLARE_VOID_FUNC(ub_dcache_reclaim(struct user_beancounter *ub, unsigned long numerator, unsigned long denominator))
-UB_DECLARE_FUNC(int, ub_dcache_shrink(struct user_beancounter *ub, unsigned long size, gfp_t gfp_mask))
-UB_DECLARE_FUNC(unsigned long, ub_dcache_get_size(struct dentry *dentry))
-
-#endif
diff --git a/kernel/bc/beancounter.c b/kernel/bc/beancounter.c
index cdbe846bf839..5cc0688131ae 100644
--- a/kernel/bc/beancounter.c
+++ b/kernel/bc/beancounter.c
@@ -42,7 +42,6 @@
 #include <bc/beancounter.h>
 #include <bc/io_acct.h>
 #include <bc/vmpages.h>
-#include <bc/dcache.h>
 #include <bc/proc.h>
 
 static struct kmem_cache *ub_cachep;
@@ -465,8 +464,6 @@ static inline int bc_verify_held(struct user_beancounter *ub)
 
 	clean &= verify_res(ub, "pincount", __ub_percpu_sum(ub, pincount));
 
-	clean &= verify_res(ub, "dcache", !list_empty(&ub->ub_dentry_lru));
-
 	ub_debug_trace(!clean, 5, 60*HZ);
 
 	return clean;
@@ -958,8 +955,6 @@ static void init_beancounter_struct(struct user_beancounter *ub)
 	spin_lock_init(&ub->ub_lock);
 	INIT_LIST_HEAD(&ub->ub_tcp_sk_list);
 	INIT_LIST_HEAD(&ub->ub_other_sk_list);
-	INIT_LIST_HEAD(&ub->ub_dentry_lru);
-	INIT_LIST_HEAD(&ub->ub_dentry_top);
 	init_oom_control(&ub->oom_ctrl);
 	spin_lock_init(&ub->rl_lock);
 	ub->rl_wall.tv64 = LLONG_MIN;
diff --git a/kernel/bc/dcache.c b/kernel/bc/dcache.c
deleted file mode 100644
index 2727e690fbb4..000000000000
--- a/kernel/bc/dcache.c
+++ /dev/null
@@ -1,269 +0,0 @@
-#include <linux/slab.h>
-#include <linux/dcache.h>
-#include <linux/fs.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-
-#include <bc/beancounter.h>
-#include <bc/vmpages.h>
-#include <bc/dcache.h>
-#include <bc/kmem.h>
-
-static unsigned int dcache_charge_size(int name_len)
-{
-	return dentry_cache->objuse + kmem_cache_objuse(inode_cachep) +
-		(name_len > DNAME_INLINE_LEN ? name_len : 0);
-}
-
-int ub_dcache_shrink(struct user_beancounter *ub,
-		unsigned long size, gfp_t gfp_mask)
-{
-	int count, pruned;
-
-	if (!(gfp_mask & __GFP_FS))
-		return -EBUSY;
-
-	count = DIV_ROUND_UP(size, dcache_charge_size(0));
-	spin_lock(&dcache_lock);
-	pruned = __shrink_dcache_ub(ub, count);
-	spin_unlock(&dcache_lock);
-	if (!pruned)
-		return -ENOMEM;
-
-	return 0;
-}
-
-static int __ub_dcache_charge(struct user_beancounter *ub,
-		unsigned long size, gfp_t gfp_mask, int strict)
-{
-	return charge_beancounter_fast(ub, UB_DCACHESIZE, size, strict);
-}
-
-static void __ub_dcache_uncharge(struct user_beancounter *ub,
-		unsigned long size)
-{
-	uncharge_beancounter_fast(ub, UB_DCACHESIZE, size);
-}
-
-int ub_dcache_charge(struct user_beancounter *ub, int name_len)
-{
-	int size, shrink;
-
-	size = dcache_charge_size(name_len);
-	do {
-		if (!__ub_dcache_charge(ub, size,
-					GFP_KERNEL|__GFP_NOWARN,
-					UB_SOFT | UB_TEST))
-			return 0;
-
-		shrink = max(size, ub->ub_parms[UB_DCACHESIZE].max_precharge);
-	} while (!ub_dcache_shrink(ub, shrink, GFP_KERNEL));
-
-	spin_lock_irq(&ub->ub_lock);
-	ub->ub_parms[UB_DCACHESIZE].failcnt++;
-	spin_unlock_irq(&ub->ub_lock);
-
-	return -ENOMEM;
-}
-
-void ub_dcache_uncharge(struct user_beancounter *ub, int name_len)
-{
-	unsigned int size;
-
-	size = dcache_charge_size(name_len);
-	__ub_dcache_uncharge(ub, size);
-}
-
-static unsigned long recharge_subtree(struct dentry *d, struct user_beancounter *ub,
-		struct user_beancounter *cub)
-{
-	struct dentry *orig_root;
-	unsigned long size = 0;
-
-	orig_root = d;
-
-	while (1) {
-		if (d->d_ub != cub) {
-			if (!(d->d_flags & DCACHE_BCTOP)) {
-				printk("%s %s %s %s %s %p %p %p %p\n", __func__,
-						d->d_name.name,
-						d->d_ub->ub_name,
-						ub->ub_name,
-						cub->ub_name,
-						d, d->d_ub, ub, cub);
-				WARN_ON(1);
-			}
-			goto skip_subtree;
-		} else if (d->d_ub == ub)
-			goto skip_recharge;
-
-		if (!list_empty(&d->d_lru)) {
-			list_move(&d->d_bclru, &ub->ub_dentry_lru);
-			cub->ub_dentry_unused--;
-			ub->ub_dentry_unused++;
-		}
-
-		d->d_ub = ub;
-skip_recharge:
-		size += dcache_charge_size(d->d_name.len);
-
-		if (!list_empty(&d->d_subdirs)) {
-			d = list_entry(d->d_subdirs.next,
-					struct dentry, d_u.d_child);
-			continue;
-		}
-skip_subtree:
-		if (d == orig_root)
-			break;
-		while (d == list_entry(d->d_parent->d_subdirs.prev,
-					struct dentry, d_u.d_child)) {
-			d = d->d_parent;
-			if (d == orig_root)
-				goto out;
-		}
-		d = list_entry(d->d_u.d_child.next,
-				struct dentry, d_u.d_child);
-	}
-out:
-	return size;
-}
-
-unsigned long ub_dcache_get_size(struct dentry *dentry)
-{
-	unsigned long size;
-
-	spin_lock(&dcache_lock);
-	size = recharge_subtree(dentry, dentry->d_ub, dentry->d_ub);
-	spin_unlock(&dcache_lock);
-
-	return size;
-}
-
-void ub_dcache_set_owner(struct dentry *root, struct user_beancounter *ub)
-{
-	struct user_beancounter *cub;
-	unsigned long size;
-
-	spin_lock(&dcache_lock);
-
-	cub = root->d_ub;
-	if (ub != cub) {
-		size = recharge_subtree(root, ub, cub);
-		__ub_dcache_uncharge(cub, size);
-		__ub_dcache_charge(ub, size, GFP_ATOMIC | __GFP_NOFAIL, UB_FORCE);
-	}
-
-	if (root->d_flags & DCACHE_BCTOP) {
-		list_del(&root->d_bclru);
-	} else {
-		spin_lock(&root->d_lock);
-		root->d_flags |= DCACHE_BCTOP;
-		spin_unlock(&root->d_lock);
-	}
-
-	if (!list_empty(&root->d_lru)) {
-		list_del_init(&root->d_lru);
-		list_del(&root->d_bclru);
-		root->d_sb->s_nr_dentry_unused--;
-		cub->ub_dentry_unused--;
-		dentry_stat.nr_unused--;
-	}
-
-	list_add_tail(&root->d_bclru, &ub->ub_dentry_top);
-
-	spin_unlock(&dcache_lock);
-}
-EXPORT_SYMBOL(ub_dcache_set_owner);
-
-void ub_dcache_change_owner(struct dentry *dentry, struct user_beancounter *ub)
-{
-	struct user_beancounter *cub = dentry->d_ub;
-	long size;
-
-	size = recharge_subtree(dentry, ub, cub);
-	__ub_dcache_uncharge(cub, size);
-	__ub_dcache_charge(ub, size, GFP_ATOMIC | __GFP_NOFAIL, UB_FORCE);
-}
-
-#define UB_DCACHE_BATCH 32
-
-void ub_dcache_reclaim(struct user_beancounter *ub,
-		unsigned long numerator, unsigned long denominator)
-{
-	unsigned long flags, batch;
-
-	if (ub->ub_dentry_unused <= ub_dcache_threshold)
-		return;
-
-	spin_lock_irqsave(&ub->ub_lock, flags);
-	batch = ub->ub_dentry_unused * numerator / denominator;
-	batch = ub->ub_dentry_batch = batch + ub->ub_dentry_batch;
-	if (batch < UB_DCACHE_BATCH)
-		batch = 0;
-	else
-		ub->ub_dentry_batch = 0;
-	spin_unlock_irqrestore(&ub->ub_lock, flags);
-
-	if (batch) {
-		spin_lock(&dcache_lock);
-		__shrink_dcache_ub(ub, batch);
-		spin_unlock(&dcache_lock);
-	}
-}
-
-/* under dcache_lock and dentry->d_lock */
-void ub_dcache_clear_owner(struct dentry *dentry)
-{
-	struct user_beancounter *ub, *cub;
-	long size;
-
-	BUG_ON(!list_empty(&dentry->d_subdirs));
-	BUG_ON(!(dentry->d_flags & DCACHE_BCTOP));
-
-	cub = dentry->d_ub;
-	ub = IS_ROOT(dentry) ? get_ub0() : dentry->d_parent->d_ub;
-	dentry->d_ub = ub;
-
-	size = dcache_charge_size(dentry->d_name.len);
-	__ub_dcache_uncharge(cub, size);
-	__ub_dcache_charge(ub, size, GFP_ATOMIC|__GFP_NOFAIL, UB_FORCE);
-
-	dentry->d_flags &= ~DCACHE_BCTOP;
-
-	list_del(&dentry->d_bclru);
-}
-
-void ub_dcache_unuse(struct user_beancounter *cub)
-{
-	struct dentry *dentry, *tmp;
-	struct user_beancounter *ub;
-	long size;
-
-	spin_lock(&dcache_lock);
-	list_for_each_entry_safe(dentry, tmp, &cub->ub_dentry_top, d_bclru) {
-		/* umount in progress */
-		if (!atomic_read(&dentry->d_sb->s_active))
-			continue;
-
-		BUG_ON(dentry->d_ub != cub);
-		ub = IS_ROOT(dentry) ? get_ub0() : dentry->d_parent->d_ub;
-
-		size = recharge_subtree(dentry, ub, cub);
-		__ub_dcache_uncharge(cub, size);
-		__ub_dcache_charge(ub, size, GFP_ATOMIC|__GFP_NOFAIL, UB_FORCE);
-
-		spin_lock(&dentry->d_lock);
-		BUG_ON(!(dentry->d_flags & DCACHE_BCTOP));
-		dentry->d_flags &= ~DCACHE_BCTOP;
-		spin_unlock(&dentry->d_lock);
-
-		list_del(&dentry->d_bclru);
-	}
-	spin_unlock(&dcache_lock);
-
-	/* wait for concurrent umounts */
-	while (!list_empty(&cub->ub_dentry_top))
-		schedule_timeout_uninterruptible(1);
-
-	BUG_ON(!list_empty(&cub->ub_dentry_lru));
-}
diff --git a/kernel/bc/proc.c b/kernel/bc/proc.c
index af6a610a3e08..3d5bf1cc78df 100644
--- a/kernel/bc/proc.c
+++ b/kernel/bc/proc.c
@@ -20,7 +20,6 @@
 
 #include <bc/beancounter.h>
 #include <bc/proc.h>
-#include <bc/dcache.h>
 
 /* Generic output formats */
 #if BITS_PER_LONG == 32
@@ -122,8 +121,6 @@ static int bc_debug_show(struct seq_file *f, void *v)
 	seq_printf(f, "sizeof: %lu\n", sizeof(struct user_beancounter));
 	seq_printf(f, "pincount: %d\n", __ub_percpu_sum(ub, pincount));
 
-	seq_printf(f, "dcache_pruned: %lu\n", ub->ub_dentry_pruned);
-
 	seq_printf(f, "oom_score_adj: %s\n", (ub->ub_flags &
 				UB_OOM_MANUAL_SCORE_ADJ) ? "manual" : "auto");
 
diff --git a/kernel/bc/vm_pages.c b/kernel/bc/vm_pages.c
index 17d5aee72dc3..7f5eece57aa7 100644
--- a/kernel/bc/vm_pages.c
+++ b/kernel/bc/vm_pages.c
@@ -348,10 +348,9 @@ void __show_ub_mem(struct user_beancounter *ub)
 	__show_one_resource("DCSZ", ub->ub_parms + UB_DCACHESIZE);
 	__show_one_resource("OOMG", ub->ub_parms + UB_OOMGUARPAGES);
 
-	printk("Dirty %lu Wback %lu Dche %u Prnd %lu\n",
+	printk("Dirty %lu Wback %lu\n",
 			ub_stat_get(ub, dirty_pages),
-			ub_stat_get(ub, writeback_pages),
-			ub->ub_dentry_unused, ub->ub_dentry_pruned);
+			ub_stat_get(ub, writeback_pages));
 }
 
 void show_ub_mem(struct user_beancounter *ub)
diff --git a/kernel/ve/vecalls.c b/kernel/ve/vecalls.c
index 7c574b344154..c60fd02e0087 100644
--- a/kernel/ve/vecalls.c
+++ b/kernel/ve/vecalls.c
@@ -75,8 +75,6 @@
 #include <linux/virtinfo.h>
 #include <linux/major.h>
 
-#include <bc/dcache.h>
-
 static struct cgroup *devices_root;
 
 static int	do_env_enter(struct ve_struct *ve, unsigned int flags);
-- 
1.7.10.4




More information about the Devel mailing list