[Devel] [PATCH RHEL7 COMMIT] bc/vmalloc: zap ub_vmalloc

Konstantin Khorenko khorenko at virtuozzo.com
Tue May 26 07:52:18 PDT 2015


The commit is pushed to "branch-rh7-3.10.0-123.1.2-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-123.1.2.vz7.5.5
------>
commit a850b25e2410426bf4a98d0e5893553d20e17586
Author: Vladimir Davydov <vdavydov at parallels.com>
Date:   Tue May 26 18:52:18 2015 +0400

    bc/vmalloc: zap ub_vmalloc
    
    This is a leftover from RH6 where some vmalloc allocations are charged
    to UBC. This code is now obsolete - all vmalloc allocations should be
    charged to kmemcg (this is for future work), so this patch kills it.
    
    [It reverts vmalloc-related pieces of commit 1da9426dc5c49]
    
    Signed-off-by: Vladimir Davydov <vdavydov at parallels.com>
---
 arch/x86/kernel/ldt.c          |  2 +-
 fs/file.c                      |  2 +-
 include/linux/vmalloc.h        |  4 ----
 ipc/util.c                     |  2 +-
 kernel/fairsched.c             |  2 +-
 mm/vmalloc.c                   | 42 ++++++------------------------------------
 net/ipv4/netfilter/ip_tables.c |  2 +-
 net/netfilter/x_tables.c       |  2 +-
 8 files changed, 12 insertions(+), 46 deletions(-)

diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index 0896329..b654ee4 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -42,7 +42,7 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
 	mincount = (mincount + (PAGE_SIZE / LDT_ENTRY_SIZE - 1)) &
 			(~(PAGE_SIZE / LDT_ENTRY_SIZE - 1));
 	if (mincount * LDT_ENTRY_SIZE > PAGE_SIZE)
-		newldt = ub_vmalloc(mincount * LDT_ENTRY_SIZE);
+		newldt = vmalloc(mincount * LDT_ENTRY_SIZE);
 	else
 		newldt = (void *)__get_free_page(GFP_KERNEL);
 
diff --git a/fs/file.c b/fs/file.c
index 7f5e91e..7bbbb42 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -38,7 +38,7 @@ static void *alloc_fdmem(size_t size)
 		if (data != NULL)
 			return data;
 	}
-	return ub_vmalloc(size);
+	return vmalloc(size);
 }
 
 static void free_fdmem(void *ptr)
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index a97f319..dd0a2c8 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -66,17 +66,13 @@ static inline void vmalloc_init(void)
 
 extern void *vmalloc(unsigned long size);
 extern void *vzalloc(unsigned long size);
-extern void *ub_vmalloc(unsigned long size);
 extern void *vmalloc_user(unsigned long size);
 extern void *vmalloc_node(unsigned long size, int node);
 extern void *vzalloc_node(unsigned long size, int node);
-extern void *ub_vmalloc_node(unsigned long size, int node);
 extern void *vmalloc_exec(unsigned long size);
 extern void *vmalloc_32(unsigned long size);
 extern void *vmalloc_32_user(unsigned long size);
 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
-extern void *vmalloc_best(unsigned long size);
-extern void *ub_vmalloc_best(unsigned long size);
 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
 			unsigned long start, unsigned long end, gfp_t gfp_mask,
 			pgprot_t prot, int node, const void *caller);
diff --git a/ipc/util.c b/ipc/util.c
index 6539b0e..721a9e0 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -466,7 +466,7 @@ void *ipc_alloc(int size)
 {
 	void *out;
 	if(size > PAGE_SIZE)
-		out = ub_vmalloc(size);
+		out = vmalloc(size);
 	else
 		out = kmalloc(size, GFP_KERNEL);
 	return out;
diff --git a/kernel/fairsched.c b/kernel/fairsched.c
index 0d0fa5c..2fd39cd 100644
--- a/kernel/fairsched.c
+++ b/kernel/fairsched.c
@@ -456,7 +456,7 @@ static struct fairsched_dump *fairsched_do_dump(int compat)
 
 	nr_nodes = ve_is_super(get_exec_env()) ? nr_nodes + 16 : 1;
 
-	dump = ub_vmalloc(sizeof(*dump) + nr_nodes * sizeof(dump->nodes[0]));
+	dump = vmalloc(sizeof(*dump) + nr_nodes * sizeof(dump->nodes[0]));
 	if (dump == NULL)
 		goto out;
 
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index ac32dca..7fbc92a 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -32,15 +32,13 @@
 #include <asm/tlbflush.h>
 #include <asm/shmparam.h>
 
-#include <bc/debug.h>
-
 struct vfree_deferred {
 	struct llist_head list;
 	struct work_struct wq;
 };
 static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
 
-static void __vunmap(const void *, int, int);
+static void __vunmap(const void *, int);
 
 static void free_work(struct work_struct *w)
 {
@@ -49,7 +47,7 @@ static void free_work(struct work_struct *w)
 	while (llnode) {
 		void *p = llnode;
 		llnode = llist_next(llnode);
-		__vunmap(p, 1, 0);
+		__vunmap(p, 1);
 	}
 }
 
@@ -1471,7 +1469,7 @@ struct vm_struct *remove_vm_area(const void *addr)
 	return NULL;
 }
 
-static void __vunmap(const void *addr, int deallocate_pages, int uncharge)
+static void __vunmap(const void *addr, int deallocate_pages)
 {
 	struct vm_struct *area;
 
@@ -1540,7 +1538,7 @@ void vfree(const void *addr)
 		llist_add((struct llist_node *)addr, &p->list);
 		schedule_work(&p->wq);
 	} else
-		__vunmap(addr, 1, 1);
+		__vunmap(addr, 1);
 }
 EXPORT_SYMBOL(vfree);
 
@@ -1558,7 +1556,7 @@ void vunmap(const void *addr)
 	BUG_ON(in_interrupt());
 	might_sleep();
 	if (addr)
-		__vunmap(addr, 0, 0);
+		__vunmap(addr, 0);
 }
 EXPORT_SYMBOL(vunmap);
 
@@ -1646,14 +1644,13 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
 
 	if (map_vm_area(area, prot, &pages))
 		goto fail;
-
 	return area->addr;
 
 fail:
 	warn_alloc_failed(gfp_mask, order,
 			  "vmalloc: allocation failure, allocated %ld of %ld bytes\n",
 			  (area->nr_pages*PAGE_SIZE), area->size);
-	__vunmap(area->addr, 1, 0);
+	vfree(area->addr);
 	return NULL;
 }
 
@@ -1767,26 +1764,6 @@ void *vmalloc(unsigned long size)
 }
 EXPORT_SYMBOL(vmalloc);
 
-void *ub_vmalloc(unsigned long size)
-{
-	return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
-}
-EXPORT_SYMBOL(ub_vmalloc);
-
-void *vmalloc_best(unsigned long size)
-{
-	return vmalloc(size);
-}
-
-EXPORT_SYMBOL(vmalloc_best);
-
-void *ub_vmalloc_best(unsigned long size)
-{
-	return ub_vmalloc(size);
-}
-
-EXPORT_SYMBOL(ub_vmalloc_best);
-
 /**
  *	vzalloc - allocate virtually contiguous memory with zero fill
  *	@size:	allocation size
@@ -1846,13 +1823,6 @@ void *vmalloc_node(unsigned long size, int node)
 }
 EXPORT_SYMBOL(vmalloc_node);
 
-void *ub_vmalloc_node(unsigned long size, int node)
-{
-	return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
-					node, __builtin_return_address(0));
-}
-EXPORT_SYMBOL(ub_vmalloc_node);
-
 /**
  * vzalloc_node - allocate memory on a specific node with zero fill
  * @size:	allocation size
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 47fcedd..31eda61 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -1348,7 +1348,7 @@ do_add_counters(struct net *net, const void __user *user,
 	if (len != size + num_counters * sizeof(struct xt_counters))
 		return -EINVAL;
 
-	paddc = ub_vmalloc(len - size);
+	paddc = vmalloc(len - size);
 	if (!paddc)
 		return -ENOMEM;
 
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 7c8cf12..919976f 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -723,7 +723,7 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size)
 							GFP_KERNEL,
 							cpu_to_node(cpu));
 		else
-			newinfo->entries[cpu] = ub_vmalloc_node(size,
+			newinfo->entries[cpu] = vmalloc_node(size,
 							cpu_to_node(cpu));
 
 		if (newinfo->entries[cpu] == NULL) {



More information about the Devel mailing list