Skip to content

Commit

Permalink
mm: memcontrol: implement lruvec stat functions on top of each other
Browse files Browse the repository at this point in the history
The implementation of the lruvec stat functions and their variants for
accounting through a page, or accounting from a preemptible context, are
mostly identical and needlessly repetitive.

Implement the lruvec_page functions by looking up the page's lruvec and
then using the lruvec function.

Implement the functions for preemptible contexts by disabling preemption
before calling the atomic context functions.

Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Johannes Weiner <[email protected]>
Acked-by: Vladimir Davydov <[email protected]>
Cc: Michal Hocko <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
hnaz authored and torvalds committed Feb 1, 2018
1 parent c9019e9 commit 2845426
Showing 1 changed file with 22 additions and 22 deletions.
44 changes: 22 additions & 22 deletions include/linux/memcontrol.h
Original file line number Diff line number Diff line change
Expand Up @@ -569,51 +569,51 @@ static inline void __mod_lruvec_state(struct lruvec *lruvec,
{
struct mem_cgroup_per_node *pn;

/* Update node */
__mod_node_page_state(lruvec_pgdat(lruvec), idx, val);

if (mem_cgroup_disabled())
return;

pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);

/* Update memcg */
__mod_memcg_state(pn->memcg, idx, val);

/* Update lruvec */
__this_cpu_add(pn->lruvec_stat->count[idx], val);
}

static inline void mod_lruvec_state(struct lruvec *lruvec,
enum node_stat_item idx, int val)
{
struct mem_cgroup_per_node *pn;

mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
if (mem_cgroup_disabled())
return;
pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
mod_memcg_state(pn->memcg, idx, val);
this_cpu_add(pn->lruvec_stat->count[idx], val);
preempt_disable();
__mod_lruvec_state(lruvec, idx, val);
preempt_enable();
}

static inline void __mod_lruvec_page_state(struct page *page,
enum node_stat_item idx, int val)
{
struct mem_cgroup_per_node *pn;
pg_data_t *pgdat = page_pgdat(page);
struct lruvec *lruvec;

__mod_node_page_state(page_pgdat(page), idx, val);
if (mem_cgroup_disabled() || !page->mem_cgroup)
/* Untracked pages have no memcg, no lruvec. Update only the node */
if (!page->mem_cgroup) {
__mod_node_page_state(pgdat, idx, val);
return;
__mod_memcg_state(page->mem_cgroup, idx, val);
pn = page->mem_cgroup->nodeinfo[page_to_nid(page)];
__this_cpu_add(pn->lruvec_stat->count[idx], val);
}

lruvec = mem_cgroup_lruvec(pgdat, page->mem_cgroup);
__mod_lruvec_state(lruvec, idx, val);
}

static inline void mod_lruvec_page_state(struct page *page,
enum node_stat_item idx, int val)
{
struct mem_cgroup_per_node *pn;

mod_node_page_state(page_pgdat(page), idx, val);
if (mem_cgroup_disabled() || !page->mem_cgroup)
return;
mod_memcg_state(page->mem_cgroup, idx, val);
pn = page->mem_cgroup->nodeinfo[page_to_nid(page)];
this_cpu_add(pn->lruvec_stat->count[idx], val);
preempt_disable();
__mod_lruvec_page_state(page, idx, val);
preempt_enable();
}

unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
Expand Down

0 comments on commit 2845426

Please sign in to comment.