[Devel] [PATCH RHEL7 COMMIT] mm/memcontrol: res_counter -> page_counter transition
Konstantin Khorenko
khorenko at virtuozzo.com
Mon Nov 7 06:03:33 PST 2016
The commit is pushed to "branch-rh7-3.10.0-493.vz7.25.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-493.el7
------>
commit cb9f756091037376b3c8c4689c668d8b853c251f
Author: Andrey Ryabinin <aryabinin at virtuozzo.com>
Date: Mon Nov 7 18:03:33 2016 +0400
mm/memcontrol: res_counter -> page_counter transition
RHEL7.3 beta kernel has moved from res counters to page counters,
so we have to do the same in our code.
https://jira.sw.ru/browse/PSBM-54816
Signed-off-by: Andrey Ryabinin <aryabinin at virtuozzo.com>
---
include/linux/memcontrol.h | 2 +-
include/net/sock.h | 6 +-
include/net/udp_memcontrol.h | 2 +-
mm/memcontrol.c | 179 ++++++++++++++++++++++---------------------
net/ipv4/udp_memcontrol.c | 93 +++++++++++-----------
5 files changed, 141 insertions(+), 141 deletions(-)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index ad3691e..30be0a7 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -566,7 +566,7 @@ void __memcg_kmem_put_cache(struct kmem_cache *cachep);
struct mem_cgroup *__mem_cgroup_from_kmem(void *ptr);
int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size);
-void memcg_charge_kmem_nofail(struct mem_cgroup *memcg, u64 size);
+void memcg_charge_kmem_nofail(struct mem_cgroup *memcg, unsigned long nr_pages);
void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size);
/**
diff --git a/include/net/sock.h b/include/net/sock.h
index a8a865d..e3fd61f 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -54,11 +54,7 @@
#include <linux/security.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
-#ifdef __GENKSYMS__
-#include <linux/res_counter.h>
-#else
#include <linux/page_counter.h>
-#endif
#include <linux/memcontrol.h>
#include <linux/static_key.h>
#include <linux/aio.h>
@@ -1250,7 +1246,7 @@ static inline void memcg_memory_allocated_add(struct cg_proto *prot,
unsigned long amt,
int *parent_status)
{
- page_counter_charge(prot->memcg, amt);
+ memcg_charge_kmem_nofail(prot->memcg, amt);
page_counter_charge(prot->memory_allocated, amt);
if (page_counter_read(prot->memory_allocated) >
diff --git a/include/net/udp_memcontrol.h b/include/net/udp_memcontrol.h
index cb2ba90..875fc2e 100644
--- a/include/net/udp_memcontrol.h
+++ b/include/net/udp_memcontrol.h
@@ -10,7 +10,7 @@
struct udp_memcontrol {
struct cg_proto cg_proto;
- struct res_counter udp_memory_allocated;
+ struct page_counter udp_memory_allocated;
long udp_prot_mem[3];
};
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index b5002e2..79a5c58 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -281,8 +281,8 @@ struct mem_cgroup {
unsigned long soft_limit;
/* Normal memory consumption range */
- unsigned long long low;
- unsigned long long high;
+ unsigned long low;
+ unsigned long high;
/* vmpressure notifications */
struct vmpressure vmpressure;
@@ -307,7 +307,7 @@ struct mem_cgroup {
atomic_long_t oom_kill_cnt;
struct oom_context oom_ctx;
- unsigned long long oom_guarantee;
+ unsigned long oom_guarantee;
/*
* Should the accounting and control be hierarchical, per subtree?
@@ -958,8 +958,8 @@ static void mem_cgroup_update_swap_max(struct mem_cgroup *memcg)
long long swap;
for (; memcg; memcg = parent_mem_cgroup(memcg)) {
- swap = res_counter_read_u64(&memcg->memsw, RES_USAGE) -
- res_counter_read_u64(&memcg->res, RES_USAGE);
+ swap = page_counter_read(&memcg->memsw) -
+ page_counter_read(&memcg->memory);
/* This is racy, but we don't have to be absolutely precise */
if (swap > (long long)memcg->swap_max)
@@ -970,12 +970,20 @@ static void mem_cgroup_update_swap_max(struct mem_cgroup *memcg)
static void mem_cgroup_inc_failcnt(struct mem_cgroup *memcg,
gfp_t gfp_mask, unsigned int nr_pages)
{
+ unsigned long margin = 0;
+ unsigned long count;
+ unsigned long limit;
+
if (gfp_mask & __GFP_NOWARN)
return;
atomic_long_inc(&memcg->mem_failcnt);
- if (do_swap_account &&
- res_counter_margin(&memcg->memsw) < nr_pages * PAGE_SIZE)
+ count = page_counter_read(&memcg->memsw);
+ limit = ACCESS_ONCE(memcg->memsw.limit);
+ if (count < limit)
+ margin = limit - count;
+
+ if (do_swap_account && margin < nr_pages)
atomic_long_inc(&memcg->swap_failcnt);
}
@@ -1625,7 +1633,7 @@ bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg)
if (memcg == root_mem_cgroup)
return false;
- if (res_counter_read_u64(&memcg->res, RES_USAGE) >= memcg->low)
+ if (page_counter_read(&memcg->res) >= memcg->low)
return false;
/*
@@ -1654,7 +1662,7 @@ bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg)
if (memcg == root_mem_cgroup)
break;
- if (res_counter_read_u64(&memcg->res, RES_USAGE) >= memcg->low)
+ if (page_counter_read(&memcg->memory) >= memcg->low)
return false;
}
return true;
@@ -1731,19 +1739,16 @@ unsigned long mem_cgroup_overdraft(struct mem_cgroup *memcg)
return 0;
guarantee = ACCESS_ONCE(memcg->oom_guarantee);
- usage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
+ usage = page_counter_read(&memcg->memsw);
return div64_u64(1000 * usage, guarantee + 1);
}
unsigned long mem_cgroup_total_pages(struct mem_cgroup *memcg, bool swap)
{
- unsigned long long limit;
+ unsigned long limit;
- limit = swap ? res_counter_read_u64(&memcg->memsw, RES_LIMIT) :
- res_counter_read_u64(&memcg->res, RES_LIMIT);
- if (limit >= RESOURCE_MAX)
- return ULONG_MAX;
- return min_t(unsigned long long, ULONG_MAX, limit >> PAGE_SHIFT);
+ limit = swap ? memcg->memsw.limit : memcg->memory.limit;
+ return min_t(unsigned long, PAGE_COUNTER_MAX, limit);
}
#define mem_cgroup_from_counter(counter, member) \
@@ -2967,9 +2972,9 @@ again:
* successful charge implies full memory barrier.
*/
if (unlikely(memcg->is_offline)) {
- res_counter_uncharge(&memcg->res, batch * PAGE_SIZE);
+ page_counter_uncharge(&memcg->memory, batch);
if (do_swap_account)
- res_counter_uncharge(&memcg->memsw, batch * PAGE_SIZE);
+ page_counter_uncharge(&memcg->memsw, batch);
css_put(&memcg->css);
goto bypass;
}
@@ -2985,7 +2990,7 @@ again:
do {
if (!(gfp_mask & __GFP_WAIT))
break;
- if (res_counter_read_u64(&iter->res, RES_USAGE) <= iter->high)
+ if (page_counter_read(&iter->res) <= iter->high)
continue;
try_to_free_mem_cgroup_pages(iter, nr_pages, gfp_mask, false);
} while ((iter = parent_mem_cgroup(iter)));
@@ -3202,6 +3207,17 @@ int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp,
return ret;
}
+void memcg_charge_kmem_nofail(struct mem_cgroup *memcg, unsigned long nr_pages)
+{
+ page_counter_charge(&memcg->res, nr_pages);
+ if (do_swap_account)
+ page_counter_charge(&memcg->memsw, nr_pages);
+
+ /* kmem must be charged after res - see memcg_charge_kmem() */
+ page_counter_charge(&memcg->kmem, nr_pages);
+}
+
+
void memcg_uncharge_kmem(struct mem_cgroup *memcg,
unsigned long nr_pages)
{
@@ -4998,11 +5014,10 @@ int mem_cgroup_enough_memory(struct mem_cgroup *memcg, long pages)
long free;
/* unused memory */
- free = (res_counter_read_u64(&memcg->memsw, RES_LIMIT) -
- res_counter_read_u64(&memcg->memsw, RES_USAGE)) >> PAGE_SHIFT;
+ free = memcg->memsw.limit - page_counter_read(&memcg->memsw);
/* reclaimable slabs */
- free += res_counter_read_u64(&memcg->dcache, RES_USAGE) >> PAGE_SHIFT;
+ free += page_counter_read(&memcg->dcache);
/* assume file cache is reclaimable */
free += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_CACHE);
@@ -5229,7 +5244,8 @@ static ssize_t mem_cgroup_low_read(struct cgroup *cont, struct cftype *cft,
char str[64];
int len;
- len = scnprintf(str, sizeof(str), "%llu\n", memcg->low);
+ len = scnprintf(str, sizeof(str), "%llu\n",
+ ((unsigned long long)memcg->low) << PAGE_SHIFT);
return simple_read_from_buffer(buf, nbytes, ppos, str, len);
}
@@ -5237,14 +5253,14 @@ static int mem_cgroup_low_write(struct cgroup *cont, struct cftype *cft,
const char *buffer)
{
struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
- unsigned long long val;
+ unsigned long nr_pages;
int ret;
- ret = res_counter_memparse_write_strategy(buffer, &val);
+ ret = page_counter_memparse(buffer, &nr_pages);
if (ret)
return ret;
- memcg->low = val;
+ memcg->low = nr_pages;
return 0;
}
@@ -5256,7 +5272,8 @@ static ssize_t mem_cgroup_high_read(struct cgroup *cont, struct cftype *cft,
char str[64];
int len;
- len = scnprintf(str, sizeof(str), "%llu\n", memcg->high);
+ len = scnprintf(str, sizeof(str), "%llu\n",
+ ((unsigned long long)memcg->high) << PAGE_SHIFT);
return simple_read_from_buffer(buf, nbytes, ppos, str, len);
}
@@ -5264,19 +5281,18 @@ static int mem_cgroup_high_write(struct cgroup *cont, struct cftype *cft,
const char *buffer)
{
struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
- unsigned long long val, usage;
+ unsigned long nr_pages, usage;
int ret;
- ret = res_counter_memparse_write_strategy(buffer, &val);
+ ret = page_counter_memparse(buffer, &nr_pages);
if (ret)
return ret;
- memcg->high = val;
+ memcg->high = nr_pages;
- usage = res_counter_read_u64(&memcg->res, RES_USAGE);
- if (usage > val)
- try_to_free_mem_cgroup_pages(memcg,
- (usage - val) >> PAGE_SHIFT,
+ usage = page_counter_read_u64(&memcg->res);
+ if (usage > nr_pages)
+ try_to_free_mem_cgroup_pages(memcg, usage - nr_pages,
GFP_KERNEL, false);
return 0;
}
@@ -5289,7 +5305,8 @@ static ssize_t mem_cgroup_oom_guarantee_read(struct cgroup *cont,
char str[64];
int len;
- len = scnprintf(str, sizeof(str), "%llu\n", memcg->oom_guarantee);
+ len = scnprintf(str, sizeof(str), "%llu\n",
+ ((unsigned long long)memcg->oom_guarantee) << PAGE_SHIFT);
return simple_read_from_buffer(buf, nbytes, ppos, str, len);
}
@@ -5297,14 +5314,14 @@ static int mem_cgroup_oom_guarantee_write(struct cgroup *cont,
struct cftype *cft, const char *buffer)
{
struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
- unsigned long long val;
+ unsigned long nr_pages;
int ret;
- ret = res_counter_memparse_write_strategy(buffer, &val);
+ ret = page_counter_memparse(buffer, &nr_pages);
if (ret)
return ret;
- memcg->oom_guarantee = val;
+ memcg->oom_guarantee = nr_pages;
return 0;
}
@@ -5337,10 +5354,10 @@ static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
unsigned long long *mem_limit, unsigned long long *memsw_limit)
{
struct cgroup *cgroup;
- unsigned long long min_limit, min_memsw_limit, tmp;
+ unsigned long min_limit, min_memsw_limit, tmp;
- min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
- min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
+ min_limit = memcg->memory.limit;
+ min_memsw_limit = memcg->memsw.limit;
cgroup = memcg->css.cgroup;
if (!memcg->use_hierarchy)
goto out;
@@ -5350,9 +5367,9 @@ static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
memcg = mem_cgroup_from_cont(cgroup);
if (!memcg->use_hierarchy)
break;
- tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
+ tmp = memcg->memory.limit;
min_limit = min(min_limit, tmp);
- tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
+ tmp = memcg->memsw.limit;
min_memsw_limit = min(min_memsw_limit, tmp);
}
out:
@@ -5433,7 +5450,7 @@ void mem_cgroup_sync_beancounter(struct mem_cgroup *memcg,
struct user_beancounter *ub)
{
struct mem_cgroup *mi;
- unsigned long long lim, held, maxheld;
+ unsigned long lim, held, maxheld;
volatile struct ubparm *k, *d, *p, *s, *o;
k = &ub->ub_parms[UB_KMEMSIZE];
@@ -5442,34 +5459,35 @@ void mem_cgroup_sync_beancounter(struct mem_cgroup *memcg,
s = &ub->ub_parms[UB_SWAPPAGES];
o = &ub->ub_parms[UB_OOMGUARPAGES];
- p->held = res_counter_read_u64(&memcg->res, RES_USAGE) >> PAGE_SHIFT;
- p->maxheld = res_counter_read_u64(&memcg->res, RES_MAX_USAGE) >> PAGE_SHIFT;
+ p->held = page_counter_read(&memcg->memory);
+ p->maxheld = memcg->memory.watermark;
p->failcnt = atomic_long_read(&memcg->mem_failcnt);
- lim = res_counter_read_u64(&memcg->res, RES_LIMIT);
- lim = lim >= RESOURCE_MAX ? UB_MAXVALUE :
- min_t(unsigned long long, lim >> PAGE_SHIFT, UB_MAXVALUE);
+ lim = memcg->memory.limit;
+ lim = lim >= PAGE_COUNTER_MAX ? UB_MAXVALUE :
+ min_t(unsigned long, lim, UB_MAXVALUE);
p->barrier = p->limit = lim;
- k->held = res_counter_read_u64(&memcg->kmem, RES_USAGE);
- k->maxheld = res_counter_read_u64(&memcg->kmem, RES_MAX_USAGE);
- k->failcnt = res_counter_read_u64(&memcg->kmem, RES_FAILCNT);
- lim = res_counter_read_u64(&memcg->kmem, RES_LIMIT);
- lim = lim >= RESOURCE_MAX ? UB_MAXVALUE :
+ //todo: check odd code - counting in bytes instead of pages. wtf???
+ k->held = page_counter_read(&memcg->kmem) << PAGE_SHIFT;
+ k->maxheld = memcg->kmem.watermark << PAGE_SHIFT;
+ k->failcnt = memcg->kmem.failcnt << PAGE_SHIFT;
+ lim = memcg->kmem.limit << PAGE_SHIFT;
+ lim = lim >= (PAGE_COUNTER_MAX << PAGE_SHIFT) ? UB_MAXVALUE :
min_t(unsigned long long, lim, UB_MAXVALUE);
k->barrier = k->limit = lim;
- d->held = res_counter_read_u64(&memcg->dcache, RES_USAGE);
- d->maxheld = res_counter_read_u64(&memcg->dcache, RES_MAX_USAGE);
+ d->held = page_counter_read(&memcg->dcache) << PAGE_SHIFT;
+ d->maxheld = memcg->dcache.watermark << PAGE_SHIFT;
d->failcnt = 0;
d->barrier = d->limit = UB_MAXVALUE;
- held = (res_counter_read_u64(&memcg->memsw, RES_USAGE) -
- res_counter_read_u64(&memcg->res, RES_USAGE)) >> PAGE_SHIFT;
- maxheld = memcg->swap_max >> PAGE_SHIFT;
+ held = page_counter_read(&memcg->memsw) -
+ page_counter_read(&memcg->memory);
+ maxheld = memcg->swap_max;
s->failcnt = atomic_long_read(&memcg->swap_failcnt);
- lim = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
- lim = lim >= RESOURCE_MAX ? UB_MAXVALUE :
- min_t(unsigned long long, lim >> PAGE_SHIFT, UB_MAXVALUE);
+ lim = &memcg->memsw.limit;
+ lim = lim >= PAGE_COUNTER_MAX ? UB_MAXVALUE :
+ min_t(unsigned long long, lim, UB_MAXVALUE);
if (lim != UB_MAXVALUE)
lim -= p->limit;
s->barrier = s->limit = lim;
@@ -5479,11 +5497,11 @@ void mem_cgroup_sync_beancounter(struct mem_cgroup *memcg,
s->held = min(held, lim);
s->maxheld = min(maxheld, lim);
- o->held = res_counter_read_u64(&memcg->memsw, RES_USAGE) >> PAGE_SHIFT;
- o->maxheld = res_counter_read_u64(&memcg->memsw, RES_MAX_USAGE) >> PAGE_SHIFT;
+ o->held = page_counter_read(&memcg->memsw);
+ o->maxheld = memcg->memsw.watermark;
o->failcnt = atomic_long_read(&memcg->oom_kill_cnt);
lim = memcg->oom_guarantee;
- lim = lim >= RESOURCE_MAX ? UB_MAXVALUE :
+ lim = lim >= PAGE_COUNTER_MAX ? UB_MAXVALUE :
min_t(unsigned long long, lim >> PAGE_SHIFT, UB_MAXVALUE);
o->barrier = o->limit = lim;
@@ -5505,26 +5523,13 @@ int mem_cgroup_apply_beancounter(struct mem_cgroup *memcg,
return -EPERM;
mem = ub->ub_parms[UB_PHYSPAGES].limit;
- if (mem < RESOURCE_MAX >> PAGE_SHIFT)
- mem <<= PAGE_SHIFT;
- else
- mem = RESOURCE_MAX;
-
memsw = ub->ub_parms[UB_SWAPPAGES].limit;
- if (memsw < RESOURCE_MAX >> PAGE_SHIFT)
- memsw <<= PAGE_SHIFT;
- else
- memsw = RESOURCE_MAX;
- if (memsw < RESOURCE_MAX - mem)
+ if (memsw < PAGE_COUNTER_MAX - mem)
memsw += mem;
else
- memsw = RESOURCE_MAX;
+ memsw = PAGE_COUNTER_MAX;
oomguar = ub->ub_parms[UB_OOMGUARPAGES].barrier;
- if (oomguar < RESOURCE_MAX >> PAGE_SHIFT)
- oomguar <<= PAGE_SHIFT;
- else
- oomguar = RESOURCE_MAX;
if (ub->ub_parms[UB_KMEMSIZE].limit != UB_MAXVALUE)
pr_warn_once("ub: kmemsize limit is deprecated\n");
@@ -5532,27 +5537,27 @@ int mem_cgroup_apply_beancounter(struct mem_cgroup *memcg,
pr_warn_once("ub: dcachesize limit is deprecated\n");
/* activate kmem accounting */
- ret = memcg_update_kmem_limit(cg, RESOURCE_MAX);
+ ret = memcg_update_kmem_limit(cg, PAGE_COUNTER_MAX);
if (ret)
goto out;
/* try change mem+swap before changing mem limit */
- if (res_counter_read_u64(&memcg->memsw, RES_LIMIT) != memsw)
+ if (memcg->memsw.limit != memsw)
(void)mem_cgroup_resize_memsw_limit(memcg, memsw);
- if (res_counter_read_u64(&memcg->res, RES_LIMIT) != mem) {
+ if (memcg->memory.limit != mem) {
ret = mem_cgroup_resize_limit(memcg, mem);
if (ret)
goto out;
}
- mem_old = res_counter_read_u64(&memcg->res, RES_LIMIT);
- memsw_old = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
+ mem_old = memcg->memory.limit;
+ memsw_old = memcg->memsw.limit;
if (mem != mem_old) {
/* first, reset memsw limit since it cannot be < mem limit */
- if (memsw_old < RESOURCE_MAX) {
- memsw_old = RESOURCE_MAX;
+ if (memsw_old < PAGE_COUNTER_MAX) {
+ memsw_old = PAGE_COUNTER_MAX;
ret = mem_cgroup_resize_memsw_limit(memcg, memsw_old);
if (ret)
goto out;
diff --git a/net/ipv4/udp_memcontrol.c b/net/ipv4/udp_memcontrol.c
index 68818e1..7461810 100644
--- a/net/ipv4/udp_memcontrol.c
+++ b/net/ipv4/udp_memcontrol.c
@@ -27,11 +27,11 @@ static inline struct udp_memcontrol *udp_from_cgproto(struct cg_proto *cg_proto)
int udp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
{
/*
- * The root cgroup does not use res_counters, but rather,
+ * The root cgroup does not use page_counters, but rather,
* rely on the data already collected by the network
* subsystem
*/
- struct res_counter *res_parent = NULL;
+ struct page_counter *counter_parent = NULL;
struct cg_proto *cg_proto, *parent_cg;
struct udp_memcontrol *udp;
struct mem_cgroup *parent = parent_mem_cgroup(memcg);
@@ -48,9 +48,9 @@ int udp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
parent_cg = udp_prot.proto_cgroup(parent);
if (parent_cg)
- res_parent = parent_cg->memory_allocated;
+ counter_parent = &parent_cg->memory_allocated;
- res_counter_init(&udp->udp_memory_allocated, res_parent);
+ page_counter_init(&udp->udp_memory_allocated, counter_parent);
cg_proto->sysctl_mem = udp->udp_prot_mem;
cg_proto->memory_allocated = &udp->udp_memory_allocated;
@@ -63,7 +63,7 @@ void udp_destroy_cgroup(struct mem_cgroup *memcg)
{
}
-static int udp_update_limit(struct mem_cgroup *memcg, u64 val)
+static int udp_update_limit(struct mem_cgroup *memcg, unsigned long nr_pages)
{
struct udp_memcontrol *udp;
struct cg_proto *cg_proto;
@@ -75,22 +75,19 @@ static int udp_update_limit(struct mem_cgroup *memcg, u64 val)
if (!cg_proto)
return -EINVAL;
- if (val > RESOURCE_MAX)
- val = RESOURCE_MAX;
-
udp = udp_from_cgproto(cg_proto);
- old_lim = res_counter_read_u64(&udp->udp_memory_allocated, RES_LIMIT);
- ret = res_counter_set_limit(&udp->udp_memory_allocated, val);
+ old_lim = udp->udp_memory_allocated.limit;
+ ret = page_counter_limit(&udp->udp_memory_allocated, nr_pages);
if (ret)
return ret;
for (i = 0; i < 3; i++)
- udp->udp_prot_mem[i] = min_t(long, val >> PAGE_SHIFT, sysctl_udp_mem[i]);
+ udp->udp_prot_mem[i] = min_t(long, nr_pages, sysctl_udp_mem[i]);
- if (val == RESOURCE_MAX)
+ if (nr_pages == PAGE_COUNTER_MAX)
clear_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags);
- else if (val != RESOURCE_MAX) {
+ else {
if (!test_and_set_bit(MEMCG_SOCK_ACTIVATED, &cg_proto->flags))
static_key_slow_inc(&memcg_socket_limit_enabled);
set_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags);
@@ -99,20 +96,32 @@ static int udp_update_limit(struct mem_cgroup *memcg, u64 val)
return 0;
}
+enum {
+ RES_USAGE,
+ RES_LIMIT,
+ RES_MAX_USAGE,
+ RES_FAILCNT,
+};
+
+static DEFINE_MUTEX(udp_limit_mutex);
+
static int udp_cgroup_write(struct cgroup *cont, struct cftype *cft,
const char *buffer)
{
struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
- unsigned long long val;
+ unsigned long val;
int ret = 0;
switch (cft->private) {
case RES_LIMIT:
/* see memcontrol.c */
- ret = res_counter_memparse_write_strategy(buffer, &val);
+ ret = page_counter_memparse(buffer, &nr_pages);
if (ret)
break;
- ret = udp_update_limit(memcg, val);
+
+ mutex_lock(&udp_limit_mutex);
+ ret = udp_update_limit(memcg, nr_pages);
+ mutex_unlock(&udp_limit_mutex);
break;
default:
ret = -EINVAL;
@@ -121,47 +130,37 @@ static int udp_cgroup_write(struct cgroup *cont, struct cftype *cft,
return ret;
}
-static u64 udp_read_stat(struct mem_cgroup *memcg, int type, u64 default_val)
-{
- struct udp_memcontrol *udp;
- struct cg_proto *cg_proto;
-
- cg_proto = udp_prot.proto_cgroup(memcg);
- if (!cg_proto)
- return default_val;
-
- udp = udp_from_cgproto(cg_proto);
- return res_counter_read_u64(&udp->udp_memory_allocated, type);
-}
-
-static u64 udp_read_usage(struct mem_cgroup *memcg)
-{
- struct udp_memcontrol *udp;
- struct cg_proto *cg_proto;
-
- cg_proto = udp_prot.proto_cgroup(memcg);
- if (!cg_proto)
- return atomic_long_read(&udp_memory_allocated) << PAGE_SHIFT;
-
- udp = udp_from_cgproto(cg_proto);
- return res_counter_read_u64(&udp->udp_memory_allocated, RES_USAGE);
-}
-
static u64 udp_cgroup_read(struct cgroup *cont, struct cftype *cft)
{
struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+ struct cg_proto *cg_proto = udp_prot.proto_cgroup(memcg);
+
u64 val;
switch (cft->private) {
case RES_LIMIT:
- val = udp_read_stat(memcg, RES_LIMIT, RESOURCE_MAX);
+ if (!cg_proto)
+ return PAGE_COUNTER_MAX;
+ val = cg_proto->memory_allocated.limit;
+ val *= PAGE_SIZE
break;
case RES_USAGE:
- val = udp_read_usage(memcg);
+ if (!cg_proto)
+ val = atomic_long_read(&tcp_memory_allocated);
+ else
+ val = page_counter_read(&cg_proto->memory_allocated);
+ val *= PAGE_SIZE;
break;
case RES_FAILCNT:
+ if (!cg_proto)
+ return 0;
+ val = cg_proto->memory_allocated.failcnt;
+ break;
case RES_MAX_USAGE:
- val = udp_read_stat(memcg, cft->private, 0);
+ if (!cg_proto)
+ return 0;
+ val = cg_proto->memory_allocated.watermark;
+ val *= PAGE_SIZE;
break;
default:
BUG();
@@ -183,10 +182,10 @@ static int udp_cgroup_reset(struct cgroup *cont, unsigned int event)
switch (event) {
case RES_MAX_USAGE:
- res_counter_reset_max(&udp->udp_memory_allocated);
+ page_counter_reset_watermark(&udp->udp_memory_allocated);
break;
case RES_FAILCNT:
- res_counter_reset_failcnt(&udp->udp_memory_allocated);
+ cg_proto->memory_allocated.failcnt = 0;
break;
}
More information about the Devel
mailing list