[Devel] [PATCH rh7] vmalloc: zap ub_vmalloc

Vladimir Davydov vdavydov at parallels.com
Thu May 21 09:21:31 PDT 2015


This is a leftover from RH6 where some vmalloc allocations are charged
to UBC. This code is now obsolete - all vmalloc allocations should be
charged to kmemcg (this is for future work), so this patch kills it.

[It reverts vmalloc-related pieces of commit 1da9426dc5c49]

Signed-off-by: Vladimir Davydov <vdavydov at parallels.com>
---
 arch/x86/kernel/ldt.c          |    2 +-
 fs/file.c                      |    2 +-
 include/linux/vmalloc.h        |    4 ----
 ipc/util.c                     |    2 +-
 kernel/fairsched.c             |    2 +-
 mm/vmalloc.c                   |   42 ++++++----------------------------------
 net/ipv4/netfilter/ip_tables.c |    2 +-
 net/netfilter/x_tables.c       |    2 +-
 8 files changed, 12 insertions(+), 46 deletions(-)

diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index 0896329b2e49..b654ee4ffbed 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -42,7 +42,7 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
 	mincount = (mincount + (PAGE_SIZE / LDT_ENTRY_SIZE - 1)) &
 			(~(PAGE_SIZE / LDT_ENTRY_SIZE - 1));
 	if (mincount * LDT_ENTRY_SIZE > PAGE_SIZE)
-		newldt = ub_vmalloc(mincount * LDT_ENTRY_SIZE);
+		newldt = vmalloc(mincount * LDT_ENTRY_SIZE);
 	else
 		newldt = (void *)__get_free_page(GFP_KERNEL);
 
diff --git a/fs/file.c b/fs/file.c
index 7f5e91e4ad3e..7bbbb42098d6 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -38,7 +38,7 @@ static void *alloc_fdmem(size_t size)
 		if (data != NULL)
 			return data;
 	}
-	return ub_vmalloc(size);
+	return vmalloc(size);
 }
 
 static void free_fdmem(void *ptr)
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index a97f31919a40..dd0a2c810529 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -66,17 +66,13 @@ static inline void vmalloc_init(void)
 
 extern void *vmalloc(unsigned long size);
 extern void *vzalloc(unsigned long size);
-extern void *ub_vmalloc(unsigned long size);
 extern void *vmalloc_user(unsigned long size);
 extern void *vmalloc_node(unsigned long size, int node);
 extern void *vzalloc_node(unsigned long size, int node);
-extern void *ub_vmalloc_node(unsigned long size, int node);
 extern void *vmalloc_exec(unsigned long size);
 extern void *vmalloc_32(unsigned long size);
 extern void *vmalloc_32_user(unsigned long size);
 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
-extern void *vmalloc_best(unsigned long size);
-extern void *ub_vmalloc_best(unsigned long size);
 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
 			unsigned long start, unsigned long end, gfp_t gfp_mask,
 			pgprot_t prot, int node, const void *caller);
diff --git a/ipc/util.c b/ipc/util.c
index 6539b0e10c8d..721a9e0a3b38 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -466,7 +466,7 @@ void *ipc_alloc(int size)
 {
 	void *out;
 	if(size > PAGE_SIZE)
-		out = ub_vmalloc(size);
+		out = vmalloc(size);
 	else
 		out = kmalloc(size, GFP_KERNEL);
 	return out;
diff --git a/kernel/fairsched.c b/kernel/fairsched.c
index 0d0fa5c0a597..2fd39cda14bb 100644
--- a/kernel/fairsched.c
+++ b/kernel/fairsched.c
@@ -456,7 +456,7 @@ static struct fairsched_dump *fairsched_do_dump(int compat)
 
 	nr_nodes = ve_is_super(get_exec_env()) ? nr_nodes + 16 : 1;
 
-	dump = ub_vmalloc(sizeof(*dump) + nr_nodes * sizeof(dump->nodes[0]));
+	dump = vmalloc(sizeof(*dump) + nr_nodes * sizeof(dump->nodes[0]));
 	if (dump == NULL)
 		goto out;
 
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index ac32dca89d4f..7fbc92aa03bc 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -32,15 +32,13 @@
 #include <asm/tlbflush.h>
 #include <asm/shmparam.h>
 
-#include <bc/debug.h>
-
 struct vfree_deferred {
 	struct llist_head list;
 	struct work_struct wq;
 };
 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
 
-static void __vunmap(const void *, int, int);
+static void __vunmap(const void *, int);
 
 static void free_work(struct work_struct *w)
 {
@@ -49,7 +47,7 @@ static void free_work(struct work_struct *w)
 	while (llnode) {
 		void *p = llnode;
 		llnode = llist_next(llnode);
-		__vunmap(p, 1, 0);
+		__vunmap(p, 1);
 	}
 }
 
@@ -1471,7 +1469,7 @@ struct vm_struct *remove_vm_area(const void *addr)
 	return NULL;
 }
 
-static void __vunmap(const void *addr, int deallocate_pages, int uncharge)
+static void __vunmap(const void *addr, int deallocate_pages)
 {
 	struct vm_struct *area;
 
@@ -1540,7 +1538,7 @@ void vfree(const void *addr)
 		llist_add((struct llist_node *)addr, &p->list);
 		schedule_work(&p->wq);
 	} else
-		__vunmap(addr, 1, 1);
+		__vunmap(addr, 1);
 }
 EXPORT_SYMBOL(vfree);
 
@@ -1558,7 +1556,7 @@ void vunmap(const void *addr)
 	BUG_ON(in_interrupt());
 	might_sleep();
 	if (addr)
-		__vunmap(addr, 0, 0);
+		__vunmap(addr, 0);
 }
 EXPORT_SYMBOL(vunmap);
 
@@ -1646,14 +1644,13 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
 
 	if (map_vm_area(area, prot, &pages))
 		goto fail;
-
 	return area->addr;
 
 fail:
 	warn_alloc_failed(gfp_mask, order,
 			  "vmalloc: allocation failure, allocated %ld of %ld bytes\n",
 			  (area->nr_pages*PAGE_SIZE), area->size);
-	__vunmap(area->addr, 1, 0);
+	vfree(area->addr);
 	return NULL;
 }
 
@@ -1767,26 +1764,6 @@ void *vmalloc(unsigned long size)
 }
 EXPORT_SYMBOL(vmalloc);
 
-void *ub_vmalloc(unsigned long size)
-{
-	return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
-}
-EXPORT_SYMBOL(ub_vmalloc);
-
-void *vmalloc_best(unsigned long size)
-{
-	return vmalloc(size);
-}
-
-EXPORT_SYMBOL(vmalloc_best);
-
-void *ub_vmalloc_best(unsigned long size)
-{
-	return ub_vmalloc(size);
-}
-
-EXPORT_SYMBOL(ub_vmalloc_best);
-
 /**
  *	vzalloc - allocate virtually contiguous memory with zero fill
  *	@size:	allocation size
@@ -1846,13 +1823,6 @@ void *vmalloc_node(unsigned long size, int node)
 }
 EXPORT_SYMBOL(vmalloc_node);
 
-void *ub_vmalloc_node(unsigned long size, int node)
-{
-	return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
-					node, __builtin_return_address(0));
-}
-EXPORT_SYMBOL(ub_vmalloc_node);
-
 /**
  * vzalloc_node - allocate memory on a specific node with zero fill
  * @size:	allocation size
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 47fceddccf78..31eda610bd6e 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -1348,7 +1348,7 @@ do_add_counters(struct net *net, const void __user *user,
 	if (len != size + num_counters * sizeof(struct xt_counters))
 		return -EINVAL;
 
-	paddc = ub_vmalloc(len - size);
+	paddc = vmalloc(len - size);
 	if (!paddc)
 		return -ENOMEM;
 
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 7c8cf12c5fe8..919976f89644 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -723,7 +723,7 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size)
 							GFP_KERNEL,
 							cpu_to_node(cpu));
 		else
-			newinfo->entries[cpu] = ub_vmalloc_node(size,
+			newinfo->entries[cpu] = vmalloc_node(size,
 							cpu_to_node(cpu));
 
 		if (newinfo->entries[cpu] == NULL) {
-- 
1.7.10.4




More information about the Devel mailing list