[Devel] [PATCH rh7 18/19] ms/netfilter: conntrack: use single slab cache
Konstantin Khorenko
khorenko at virtuozzo.com
Fri May 22 11:10:55 MSK 2020
From: Florian Westphal <fw at strlen.de>
An earlier patch changed lookup side to also net_eq() namespaces after
obtaining a reference on the conntrack, so a single kmemcache can be used.
Signed-off-by: Florian Westphal <fw at strlen.de>
Signed-off-by: Pablo Neira Ayuso <pablo at netfilter.org>
https://jira.sw.ru/browse/PSBM-103515
(cherry picked from commit 0c5366b3a8c77fd6d67b763c5a76dfdc314e7726)
Signed-off-by: Konstantin Khorenko <khorenko at virtuozzo.com>
---
include/net/netns/conntrack.h | 2 --
net/netfilter/nf_conntrack_core.c | 38 ++++++++++++++------------------------
2 files changed, 14 insertions(+), 26 deletions(-)
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index 7f921295d020e..9bb9edef8611a 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -120,7 +120,6 @@ struct netns_ct {
struct ctl_table_header *event_sysctl_header;
struct ctl_table_header *helper_sysctl_header;
#endif
- char *slabname;
unsigned int sysctl_log_invalid; /* Log invalid packets */
unsigned int sysctl_events_retry_timeout;
int sysctl_events;
@@ -131,7 +130,6 @@ struct netns_ct {
int sysctl_checksum;
RH_KABI_DEPRECATE(seqcount_t, generation)
- struct kmem_cache *nf_conntrack_cachep;
/* next three hlist heads are unused in RHEL,
* only kept for ABI compatibility. */
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index aca113963fed9..1c8223e006b18 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -78,6 +78,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock);
struct hlist_nulls_head *nf_conntrack_hash __read_mostly;
EXPORT_SYMBOL_GPL(nf_conntrack_hash);
+static __read_mostly struct kmem_cache *nf_conntrack_cachep;
static __read_mostly seqcount_t nf_conntrack_generation;
static void nf_conntrack_double_unlock(unsigned int h1, unsigned int h2)
@@ -1003,7 +1004,7 @@ __nf_conntrack_alloc(struct net *net,
* Do not use kmem_cache_zalloc(), as this cache uses
* SLAB_DESTROY_BY_RCU.
*/
- ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp);
+ ct = kmem_cache_alloc(nf_conntrack_cachep, gfp);
if (ct == NULL)
goto out;
@@ -1033,7 +1034,7 @@ __nf_conntrack_alloc(struct net *net,
atomic_set(&ct->ct_general.use, 0);
return ct;
out_free:
- kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
+ kmem_cache_free(nf_conntrack_cachep, ct);
out:
atomic_dec(&net->ct.count);
return ERR_PTR(-ENOMEM);
@@ -1060,7 +1061,7 @@ void nf_conntrack_free(struct nf_conn *ct)
nf_ct_ext_destroy(ct);
nf_ct_ext_free(ct);
- kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
+ kmem_cache_free(nf_conntrack_cachep, ct);
smp_mb__before_atomic_dec();
atomic_dec(&net->ct.count);
}
@@ -1705,8 +1706,6 @@ void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list)
nf_conntrack_tstamp_pernet_fini(net);
nf_conntrack_acct_pernet_fini(net);
nf_conntrack_expect_pernet_fini(net);
- kmem_cache_destroy(net->ct.nf_conntrack_cachep);
- kfree(net->ct.slabname);
free_percpu(net->ct.stat);
free_percpu(net->ct.pcpu_lists);
}
@@ -1804,7 +1803,8 @@ module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint,
int nf_conntrack_init_start(void)
{
int max_factor = 8;
- int i, ret;
+ int ret = -ENOMEM;
+ int i;
seqcount_init(&nf_conntrack_generation);
@@ -1840,6 +1840,12 @@ int nf_conntrack_init_start(void)
init_net.ct.max = max_factor * nf_conntrack_htable_size;
+ nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
+ sizeof(struct nf_conn), 0,
+ SLAB_DESTROY_BY_RCU, NULL);
+ if (!nf_conntrack_cachep)
+ goto err_cachep;
+
printk(KERN_INFO "nf_conntrack version %s (%u buckets, %d max)\n",
NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
init_net.ct.max);
@@ -1908,6 +1914,8 @@ int nf_conntrack_init_start(void)
err_acct:
nf_conntrack_expect_fini();
err_expect:
+ kmem_cache_destroy(nf_conntrack_cachep);
+err_cachep:
nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_htable_size);
return ret;
}
@@ -1928,7 +1936,6 @@ void nf_conntrack_init_end(void)
int nf_conntrack_init_net(struct net *net)
{
- static atomic64_t unique_id;
int ret = -ENOMEM;
int cpu;
@@ -1952,19 +1959,6 @@ int nf_conntrack_init_net(struct net *net)
if (!net->ct.stat)
goto err_pcpu_lists;
- net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%llu",
- (u64)atomic64_inc_return(&unique_id));
- if (!net->ct.slabname)
- goto err_slabname;
-
- net->ct.nf_conntrack_cachep = kmem_cache_create(net->ct.slabname,
- sizeof(struct nf_conn), 0,
- SLAB_DESTROY_BY_RCU, NULL);
- if (!net->ct.nf_conntrack_cachep) {
- printk(KERN_ERR "Unable to create nf_conn slab cache\n");
- goto err_cache;
- }
-
ret = nf_conntrack_expect_pernet_init(net);
if (ret < 0)
goto err_expect;
@@ -1996,10 +1990,6 @@ int nf_conntrack_init_net(struct net *net)
err_acct:
nf_conntrack_expect_pernet_fini(net);
err_expect:
- kmem_cache_destroy(net->ct.nf_conntrack_cachep);
-err_cache:
- kfree(net->ct.slabname);
-err_slabname:
free_percpu(net->ct.stat);
err_pcpu_lists:
free_percpu(net->ct.pcpu_lists);
--
2.15.1
More information about the Devel
mailing list