Skip to content

Commit

Permalink
vmscan: Use an indexed array for LRU variables
Browse files Browse the repository at this point in the history
Currently we are defining explicit variables for the inactive and active
list.  An indexed array can be more generic and avoid repeating similar
code in several places in the reclaim code.

We are saving a few bytes in terms of code size:

Before:

   text    data     bss     dec     hex filename
4097753  573120 4092484 8763357  85b7dd vmlinux

After:

   text    data     bss     dec     hex filename
4097729  573120 4092484 8763333  85b7c5 vmlinux

Having an easy way to add new lru lists may ease future work on the
reclaim code.

Signed-off-by: Rik van Riel <[email protected]>
Signed-off-by: Lee Schermerhorn <[email protected]>
Signed-off-by: Christoph Lameter <[email protected]>
Signed-off-by: KOSAKI Motohiro <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
Christoph Lameter authored and torvalds committed Oct 20, 2008
1 parent 62695a8 commit b69408e
Show file tree
Hide file tree
Showing 8 changed files with 171 additions and 170 deletions.
17 changes: 5 additions & 12 deletions include/linux/memcontrol.h
Original file line number Diff line number Diff line change
Expand Up @@ -69,10 +69,8 @@ extern void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem,
extern void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
int priority);

extern long mem_cgroup_calc_reclaim_active(struct mem_cgroup *mem,
struct zone *zone, int priority);
extern long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem,
struct zone *zone, int priority);
extern long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone,
int priority, enum lru_list lru);

#else /* CONFIG_CGROUP_MEM_RES_CTLR */
static inline void page_reset_bad_cgroup(struct page *page)
Expand Down Expand Up @@ -159,14 +157,9 @@ static inline void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
{
}

static inline long mem_cgroup_calc_reclaim_active(struct mem_cgroup *mem,
struct zone *zone, int priority)
{
return 0;
}

static inline long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem,
struct zone *zone, int priority)
static inline long mem_cgroup_calc_reclaim(struct mem_cgroup *mem,
struct zone *zone, int priority,
enum lru_list lru)
{
return 0;
}
Expand Down
49 changes: 38 additions & 11 deletions include/linux/mm_inline.h
Original file line number Diff line number Diff line change
@@ -1,40 +1,67 @@
static inline void
add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l)
{
list_add(&page->lru, &zone->lru[l].list);
__inc_zone_state(zone, NR_LRU_BASE + l);
}

static inline void
del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list l)
{
list_del(&page->lru);
__dec_zone_state(zone, NR_LRU_BASE + l);
}

static inline void
add_page_to_active_list(struct zone *zone, struct page *page)
{
list_add(&page->lru, &zone->active_list);
__inc_zone_state(zone, NR_ACTIVE);
add_page_to_lru_list(zone, page, LRU_ACTIVE);
}

static inline void
add_page_to_inactive_list(struct zone *zone, struct page *page)
{
list_add(&page->lru, &zone->inactive_list);
__inc_zone_state(zone, NR_INACTIVE);
add_page_to_lru_list(zone, page, LRU_INACTIVE);
}

static inline void
del_page_from_active_list(struct zone *zone, struct page *page)
{
list_del(&page->lru);
__dec_zone_state(zone, NR_ACTIVE);
del_page_from_lru_list(zone, page, LRU_ACTIVE);
}

static inline void
del_page_from_inactive_list(struct zone *zone, struct page *page)
{
list_del(&page->lru);
__dec_zone_state(zone, NR_INACTIVE);
del_page_from_lru_list(zone, page, LRU_INACTIVE);
}

static inline void
del_page_from_lru(struct zone *zone, struct page *page)
{
enum lru_list l = LRU_INACTIVE;

list_del(&page->lru);
if (PageActive(page)) {
__ClearPageActive(page);
__dec_zone_state(zone, NR_ACTIVE);
} else {
__dec_zone_state(zone, NR_INACTIVE);
l = LRU_ACTIVE;
}
__dec_zone_state(zone, NR_LRU_BASE + l);
}

/**
* page_lru - which LRU list should a page be on?
* @page: the page to test
*
* Returns the LRU list a page should be on, as an index
* into the array of LRU lists.
*/
static inline enum lru_list page_lru(struct page *page)
{
enum lru_list lru = LRU_BASE;

if (PageActive(page))
lru += LRU_ACTIVE;

return lru;
}
26 changes: 20 additions & 6 deletions include/linux/mmzone.h
Original file line number Diff line number Diff line change
Expand Up @@ -81,8 +81,9 @@ struct zone_padding {
enum zone_stat_item {
/* First 128 byte cacheline (assuming 64 bit words) */
NR_FREE_PAGES,
NR_INACTIVE,
NR_ACTIVE,
NR_LRU_BASE,
NR_INACTIVE = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */
NR_ACTIVE, /* " " " " " */
NR_ANON_PAGES, /* Mapped anonymous pages */
NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
only modified from process context */
Expand All @@ -107,6 +108,19 @@ enum zone_stat_item {
#endif
NR_VM_ZONE_STAT_ITEMS };

enum lru_list {
LRU_BASE,
LRU_INACTIVE=LRU_BASE, /* must match order of NR_[IN]ACTIVE */
LRU_ACTIVE, /* " " " " " */
NR_LRU_LISTS };

#define for_each_lru(l) for (l = 0; l < NR_LRU_LISTS; l++)

static inline int is_active_lru(enum lru_list l)
{
return (l == LRU_ACTIVE);
}

struct per_cpu_pages {
int count; /* number of pages in the list */
int high; /* high watermark, emptying needed */
Expand Down Expand Up @@ -251,10 +265,10 @@ struct zone {

/* Fields commonly accessed by the page reclaim scanner */
spinlock_t lru_lock;
struct list_head active_list;
struct list_head inactive_list;
unsigned long nr_scan_active;
unsigned long nr_scan_inactive;
struct {
struct list_head list;
unsigned long nr_scan;
} lru[NR_LRU_LISTS];
unsigned long pages_scanned; /* since last reclaim */
unsigned long flags; /* zone flags, see below */

Expand Down
115 changes: 43 additions & 72 deletions mm/memcontrol.c
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/vmalloc.h>
#include <linux/mm_inline.h>

#include <asm/uaccess.h>

Expand Down Expand Up @@ -85,22 +86,13 @@ static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
/*
* per-zone information in memory controller.
*/

enum mem_cgroup_zstat_index {
MEM_CGROUP_ZSTAT_ACTIVE,
MEM_CGROUP_ZSTAT_INACTIVE,

NR_MEM_CGROUP_ZSTAT,
};

struct mem_cgroup_per_zone {
/*
* spin_lock to protect the per cgroup LRU
*/
spinlock_t lru_lock;
struct list_head active_list;
struct list_head inactive_list;
unsigned long count[NR_MEM_CGROUP_ZSTAT];
struct list_head lists[NR_LRU_LISTS];
unsigned long count[NR_LRU_LISTS];
};
/* Macro for accessing counter */
#define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)])
Expand Down Expand Up @@ -227,7 +219,7 @@ page_cgroup_zoneinfo(struct page_cgroup *pc)
}

static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem,
enum mem_cgroup_zstat_index idx)
enum lru_list idx)
{
int nid, zid;
struct mem_cgroup_per_zone *mz;
Expand Down Expand Up @@ -297,11 +289,9 @@ static void __mem_cgroup_remove_list(struct mem_cgroup_per_zone *mz,
struct page_cgroup *pc)
{
int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
int lru = !!from;

if (from)
MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1;
else
MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1;
MEM_CGROUP_ZSTAT(mz, lru) -= 1;

mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, false);
list_del(&pc->lru);
Expand All @@ -310,37 +300,35 @@ static void __mem_cgroup_remove_list(struct mem_cgroup_per_zone *mz,
static void __mem_cgroup_add_list(struct mem_cgroup_per_zone *mz,
struct page_cgroup *pc)
{
int to = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
int lru = LRU_INACTIVE;

if (pc->flags & PAGE_CGROUP_FLAG_ACTIVE)
lru += LRU_ACTIVE;

MEM_CGROUP_ZSTAT(mz, lru) += 1;
list_add(&pc->lru, &mz->lists[lru]);

if (!to) {
MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1;
list_add(&pc->lru, &mz->inactive_list);
} else {
MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1;
list_add(&pc->lru, &mz->active_list);
}
mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, true);
}

static void __mem_cgroup_move_lists(struct page_cgroup *pc, bool active)
{
int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);
int lru = LRU_INACTIVE;

if (from)
MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1;
else
MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1;
if (pc->flags & PAGE_CGROUP_FLAG_ACTIVE)
lru += LRU_ACTIVE;

if (active) {
MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1;
MEM_CGROUP_ZSTAT(mz, lru) -= 1;

if (active)
pc->flags |= PAGE_CGROUP_FLAG_ACTIVE;
list_move(&pc->lru, &mz->active_list);
} else {
MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1;
else
pc->flags &= ~PAGE_CGROUP_FLAG_ACTIVE;
list_move(&pc->lru, &mz->inactive_list);
}

lru = !!active;
MEM_CGROUP_ZSTAT(mz, lru) += 1;
list_move(&pc->lru, &mz->lists[lru]);
}

int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
Expand Down Expand Up @@ -412,8 +400,8 @@ long mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem)
{
unsigned long active, inactive;
/* active and inactive are the number of pages. 'long' is ok.*/
active = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_ACTIVE);
inactive = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_INACTIVE);
active = mem_cgroup_get_all_zonestat(mem, LRU_ACTIVE);
inactive = mem_cgroup_get_all_zonestat(mem, LRU_INACTIVE);
return (long) (active / (inactive + 1));
}

Expand Down Expand Up @@ -444,28 +432,17 @@ void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
* (see include/linux/mmzone.h)
*/

long mem_cgroup_calc_reclaim_active(struct mem_cgroup *mem,
struct zone *zone, int priority)
long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone,
int priority, enum lru_list lru)
{
long nr_active;
long nr_pages;
int nid = zone->zone_pgdat->node_id;
int zid = zone_idx(zone);
struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);

nr_active = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE);
return (nr_active >> priority);
}

long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem,
struct zone *zone, int priority)
{
long nr_inactive;
int nid = zone->zone_pgdat->node_id;
int zid = zone_idx(zone);
struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);
nr_pages = MEM_CGROUP_ZSTAT(mz, lru);

nr_inactive = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE);
return (nr_inactive >> priority);
return (nr_pages >> priority);
}

unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
Expand All @@ -484,14 +461,11 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
int nid = z->zone_pgdat->node_id;
int zid = zone_idx(z);
struct mem_cgroup_per_zone *mz;
int lru = !!active;

BUG_ON(!mem_cont);
mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
if (active)
src = &mz->active_list;
else
src = &mz->inactive_list;

src = &mz->lists[lru];

spin_lock(&mz->lru_lock);
scan = 0;
Expand Down Expand Up @@ -863,18 +837,15 @@ int mem_cgroup_resize_limit(struct mem_cgroup *memcg, unsigned long long val)
#define FORCE_UNCHARGE_BATCH (128)
static void mem_cgroup_force_empty_list(struct mem_cgroup *mem,
struct mem_cgroup_per_zone *mz,
int active)
enum lru_list lru)
{
struct page_cgroup *pc;
struct page *page;
int count = FORCE_UNCHARGE_BATCH;
unsigned long flags;
struct list_head *list;

if (active)
list = &mz->active_list;
else
list = &mz->inactive_list;
list = &mz->lists[lru];

spin_lock_irqsave(&mz->lru_lock, flags);
while (!list_empty(list)) {
Expand Down Expand Up @@ -922,11 +893,10 @@ static int mem_cgroup_force_empty(struct mem_cgroup *mem)
for_each_node_state(node, N_POSSIBLE)
for (zid = 0; zid < MAX_NR_ZONES; zid++) {
struct mem_cgroup_per_zone *mz;
enum lru_list l;
mz = mem_cgroup_zoneinfo(mem, node, zid);
/* drop all page_cgroup in active_list */
mem_cgroup_force_empty_list(mem, mz, 1);
/* drop all page_cgroup in inactive_list */
mem_cgroup_force_empty_list(mem, mz, 0);
for_each_lru(l)
mem_cgroup_force_empty_list(mem, mz, l);
}
}
ret = 0;
Expand Down Expand Up @@ -1015,9 +985,9 @@ static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
unsigned long active, inactive;

inactive = mem_cgroup_get_all_zonestat(mem_cont,
MEM_CGROUP_ZSTAT_INACTIVE);
LRU_INACTIVE);
active = mem_cgroup_get_all_zonestat(mem_cont,
MEM_CGROUP_ZSTAT_ACTIVE);
LRU_ACTIVE);
cb->fill(cb, "active", (active) * PAGE_SIZE);
cb->fill(cb, "inactive", (inactive) * PAGE_SIZE);
}
Expand Down Expand Up @@ -1062,6 +1032,7 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
{
struct mem_cgroup_per_node *pn;
struct mem_cgroup_per_zone *mz;
enum lru_list l;
int zone, tmp = node;
/*
* This routine is called against possible nodes.
Expand All @@ -1082,9 +1053,9 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)

for (zone = 0; zone < MAX_NR_ZONES; zone++) {
mz = &pn->zoneinfo[zone];
INIT_LIST_HEAD(&mz->active_list);
INIT_LIST_HEAD(&mz->inactive_list);
spin_lock_init(&mz->lru_lock);
for_each_lru(l)
INIT_LIST_HEAD(&mz->lists[l]);
}
return 0;
}
Expand Down
Loading

0 comments on commit b69408e

Please sign in to comment.