Skip to content

Commit

Permalink
memcg: avoid oom during moving charge
Browse files Browse the repository at this point in the history
This move-charge-at-task-migration feature has extra charges on
"to"(pre-charges) and "from"(left-over charges) during moving charge.
This means unnecessary oom can happen.

This patch tries to avoid such oom.

Signed-off-by: Daisuke Nishimura <[email protected]>
Cc: Balbir Singh <[email protected]>
Acked-by: KAMEZAWA Hiroyuki <[email protected]>
Cc: Li Zefan <[email protected]>
Cc: Paul Menage <[email protected]>
Cc: Daisuke Nishimura <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
Daisuke Nishimura authored and torvalds committed Mar 12, 2010
1 parent 854ffa8 commit 8033b97
Showing 1 changed file with 51 additions and 2 deletions.
53 changes: 51 additions & 2 deletions mm/memcontrol.c
Original file line number Diff line number Diff line change
Expand Up @@ -254,7 +254,11 @@ static struct move_charge_struct {
struct mem_cgroup *to;
unsigned long precharge;
unsigned long moved_charge;
} mc;
struct task_struct *moving_task; /* a task moving charges */
wait_queue_head_t waitq; /* a waitq for other context */
} mc = {
.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
};

/*
* Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
Expand Down Expand Up @@ -1508,6 +1512,48 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
if (mem_cgroup_check_under_limit(mem_over_limit))
continue;

/* try to avoid oom while someone is moving charge */
if (mc.moving_task && current != mc.moving_task) {
struct mem_cgroup *from, *to;
bool do_continue = false;
/*
* There is a small race that "from" or "to" can be
* freed by rmdir, so we use css_tryget().
*/
rcu_read_lock();
from = mc.from;
to = mc.to;
if (from && css_tryget(&from->css)) {
if (mem_over_limit->use_hierarchy)
do_continue = css_is_ancestor(
&from->css,
&mem_over_limit->css);
else
do_continue = (from == mem_over_limit);
css_put(&from->css);
}
if (!do_continue && to && css_tryget(&to->css)) {
if (mem_over_limit->use_hierarchy)
do_continue = css_is_ancestor(
&to->css,
&mem_over_limit->css);
else
do_continue = (to == mem_over_limit);
css_put(&to->css);
}
rcu_read_unlock();
if (do_continue) {
DEFINE_WAIT(wait);
prepare_to_wait(&mc.waitq, &wait,
TASK_INTERRUPTIBLE);
/* moving charge context might have finished. */
if (mc.moving_task)
schedule();
finish_wait(&mc.waitq, &wait);
continue;
}
}

if (!nr_retries--) {
if (oom) {
mem_cgroup_out_of_memory(mem_over_limit, gfp_mask);
Expand Down Expand Up @@ -3381,7 +3427,6 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
INIT_WORK(&stock->work, drain_local_stock);
}
hotcpu_notifier(memcg_stock_cpu_callback, 0);

} else {
parent = mem_cgroup_from_cont(cont->parent);
mem->use_hierarchy = parent->use_hierarchy;
Expand Down Expand Up @@ -3641,6 +3686,8 @@ static void mem_cgroup_clear_mc(void)
}
mc.from = NULL;
mc.to = NULL;
mc.moving_task = NULL;
wake_up_all(&mc.waitq);
}

static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
Expand All @@ -3666,10 +3713,12 @@ static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
VM_BUG_ON(mc.to);
VM_BUG_ON(mc.precharge);
VM_BUG_ON(mc.moved_charge);
VM_BUG_ON(mc.moving_task);
mc.from = from;
mc.to = mem;
mc.precharge = 0;
mc.moved_charge = 0;
mc.moving_task = current;

ret = mem_cgroup_precharge_mc(mm);
if (ret)
Expand Down

0 comments on commit 8033b97

Please sign in to comment.