[Devel] [PATCH 04/17] memcg: revert old oom_guarantee logic
Vladimir Davydov
vdavydov at parallels.com
Fri Aug 14 10:03:28 PDT 2015
This patch set reverts hunks of commit e94e18346f74 ("memcg: add
oom_guarantee") implementing oom_guarantee logic, because it is going to
be reworked. Note the memory.oom_guarantee knob is still there.
Signed-off-by: Vladimir Davydov <vdavydov at parallels.com>
---
include/linux/memcontrol.h | 6 -----
include/linux/oom.h | 2 +-
mm/memcontrol.c | 60 ++--------------------------------------------
mm/oom_kill.c | 14 ++---------
4 files changed, 5 insertions(+), 77 deletions(-)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index ac3f16f0ee28..99f0f74be0af 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -120,7 +120,6 @@ bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg);
int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list);
void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int);
-extern bool mem_cgroup_below_oom_guarantee(struct task_struct *p);
extern void mem_cgroup_note_oom_kill(struct mem_cgroup *memcg,
struct task_struct *task);
extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
@@ -345,11 +344,6 @@ mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
{
}
-static inline bool mem_cgroup_below_oom_guarantee(struct task_struct *p)
-{
- return false;
-}
-
static inline void
mem_cgroup_note_oom_kill(struct mem_cgroup *memcg, struct task_struct *task)
{
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 04f4f579c36c..571b6408bc99 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -77,7 +77,7 @@ extern void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
extern enum oom_scan_t oom_scan_process_thread(struct task_struct *task,
unsigned long totalpages, const nodemask_t *nodemask,
- bool force_kill, bool ignore_memcg_guarantee);
+ bool force_kill);
extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
int order, nodemask_t *mask, bool force_kill);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 0cb329028a29..c96bbc11236c 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1577,51 +1577,6 @@ bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg)
return true;
}
-static bool __mem_cgroup_below_oom_guarantee(struct mem_cgroup *root,
- struct mem_cgroup *memcg)
-{
- if (mem_cgroup_disabled())
- return false;
-
- if (memcg == root_mem_cgroup)
- return false;
-
- if (res_counter_read_u64(&memcg->memsw, RES_USAGE) >=
- memcg->oom_guarantee)
- return false;
-
- while (memcg != root) {
- memcg = parent_mem_cgroup(memcg);
- if (!memcg)
- break;
-
- if (memcg == root_mem_cgroup)
- break;
-
- if (res_counter_read_u64(&memcg->memsw, RES_USAGE) >=
- memcg->oom_guarantee)
- return false;
- }
- return true;
-}
-
-bool mem_cgroup_below_oom_guarantee(struct task_struct *p)
-{
- struct mem_cgroup *memcg = NULL;
- bool ret = false;
-
- p = find_lock_task_mm(p);
- if (p) {
- memcg = try_get_mem_cgroup_from_mm(p->mm);
- task_unlock(p);
- }
- if (memcg) {
- ret = __mem_cgroup_below_oom_guarantee(root_mem_cgroup, memcg);
- css_put(&memcg->css);
- }
- return ret;
-}
-
void mem_cgroup_note_oom_kill(struct mem_cgroup *root_memcg,
struct task_struct *task)
{
@@ -1954,7 +1909,6 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
unsigned long totalpages;
unsigned int points = 0;
struct task_struct *chosen = NULL;
- bool ignore_memcg_guarantee = false;
/*
* If current has a pending SIGKILL or is exiting, then automatically
@@ -1968,20 +1922,15 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL);
totalpages = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT ? : 1;
-retry:
for_each_mem_cgroup_tree(iter, memcg) {
struct cgroup *cgroup = iter->css.cgroup;
struct cgroup_iter it;
struct task_struct *task;
- if (!ignore_memcg_guarantee &&
- __mem_cgroup_below_oom_guarantee(memcg, iter))
- continue;
-
cgroup_iter_start(cgroup, &it);
while ((task = cgroup_iter_next(cgroup, &it))) {
switch (oom_scan_process_thread(task, totalpages, NULL,
- false, true)) {
+ false)) {
case OOM_SCAN_SELECT:
if (chosen)
put_task_struct(chosen);
@@ -2012,13 +1961,8 @@ retry:
cgroup_iter_end(cgroup, &it);
}
- if (!chosen) {
- if (!ignore_memcg_guarantee) {
- ignore_memcg_guarantee = true;
- goto retry;
- }
+ if (!chosen)
return;
- }
points = chosen_points * 1000 / totalpages;
oom_kill_process(chosen, gfp_mask, order, points, totalpages, memcg,
NULL, "Memory cgroup out of memory");
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 70893730524a..f598eac397a3 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -266,7 +266,7 @@ static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
enum oom_scan_t oom_scan_process_thread(struct task_struct *task,
unsigned long totalpages, const nodemask_t *nodemask,
- bool force_kill, bool ignore_memcg_guarantee)
+ bool force_kill)
{
if (task->exit_state)
return OOM_SCAN_CONTINUE;
@@ -301,10 +301,6 @@ enum oom_scan_t oom_scan_process_thread(struct task_struct *task,
if (!(task->group_leader->ptrace & PT_TRACE_EXIT))
return OOM_SCAN_ABORT;
}
-
- if (!ignore_memcg_guarantee && mem_cgroup_below_oom_guarantee(task))
- return OOM_SCAN_CONTINUE;
-
return OOM_SCAN_OK;
}
@@ -321,15 +317,13 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
struct task_struct *g, *p;
struct task_struct *chosen = NULL;
unsigned long chosen_points = 0;
- bool ignore_memcg_guarantee = false;
rcu_read_lock();
-retry:
for_each_process_thread(g, p) {
unsigned int points;
switch (oom_scan_process_thread(p, totalpages, nodemask,
- force_kill, ignore_memcg_guarantee)) {
+ force_kill)) {
case OOM_SCAN_SELECT:
chosen = p;
chosen_points = ULONG_MAX;
@@ -350,10 +344,6 @@ retry:
}
if (chosen)
get_task_struct(chosen);
- else if (!ignore_memcg_guarantee) {
- ignore_memcg_guarantee = true;
- goto retry;
- }
rcu_read_unlock();
*ppoints = chosen_points * 1000 / totalpages;
--
2.1.4
More information about the Devel
mailing list