Skip to content

Commit

Permalink
mm/page-writeback.c: make determine_dirtyable_memory static again
Browse files Browse the repository at this point in the history
The tracing ring-buffer used this function briefly, but not anymore.
Make it local to the writeback code again.

Also, move the function so that no forward declaration needs to be
reintroduced.

Signed-off-by: Johannes Weiner <[email protected]>
Acked-by: Mel Gorman <[email protected]>
Reviewed-by: Michal Hocko <[email protected]>
Cc: Wu Fengguang <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
hnaz authored and torvalds committed Jan 11, 2012
1 parent e4e1118 commit 1edf223
Show file tree
Hide file tree
Showing 2 changed files with 60 additions and 64 deletions.
2 changes: 0 additions & 2 deletions include/linux/writeback.h
Original file line number Diff line number Diff line change
Expand Up @@ -138,8 +138,6 @@ extern int vm_highmem_is_dirtyable;
extern int block_dump;
extern int laptop_mode;

extern unsigned long determine_dirtyable_memory(void);

extern int dirty_background_ratio_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);
Expand Down
122 changes: 60 additions & 62 deletions mm/page-writeback.c
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,66 @@ unsigned long global_dirty_limit;
*/
static struct prop_descriptor vm_completions;

/*
* Work out the current dirty-memory clamping and background writeout
* thresholds.
*
* The main aim here is to lower them aggressively if there is a lot of mapped
* memory around. To avoid stressing page reclaim with lots of unreclaimable
* pages. It is better to clamp down on writers than to start swapping, and
* performing lots of scanning.
*
* We only allow 1/2 of the currently-unmapped memory to be dirtied.
*
* We don't permit the clamping level to fall below 5% - that is getting rather
* excessive.
*
* We make sure that the background writeout level is below the adjusted
* clamping level.
*/
static unsigned long highmem_dirtyable_memory(unsigned long total)
{
#ifdef CONFIG_HIGHMEM
int node;
unsigned long x = 0;

for_each_node_state(node, N_HIGH_MEMORY) {
struct zone *z =
&NODE_DATA(node)->node_zones[ZONE_HIGHMEM];

x += zone_page_state(z, NR_FREE_PAGES) +
zone_reclaimable_pages(z);
}
/*
* Make sure that the number of highmem pages is never larger
* than the number of the total dirtyable memory. This can only
* occur in very strange VM situations but we want to make sure
* that this does not occur.
*/
return min(x, total);
#else
return 0;
#endif
}

/**
* determine_dirtyable_memory - amount of memory that may be used
*
* Returns the numebr of pages that can currently be freed and used
* by the kernel for direct mappings.
*/
static unsigned long determine_dirtyable_memory(void)
{
unsigned long x;

x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();

if (!vm_highmem_is_dirtyable)
x -= highmem_dirtyable_memory(x);

return x + 1; /* Ensure that we never return 0 */
}

/*
* couple the period to the dirty_ratio:
*
Expand Down Expand Up @@ -196,7 +256,6 @@ int dirty_ratio_handler(struct ctl_table *table, int write,
return ret;
}


int dirty_bytes_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
Expand Down Expand Up @@ -291,67 +350,6 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
}
EXPORT_SYMBOL(bdi_set_max_ratio);

/*
* Work out the current dirty-memory clamping and background writeout
* thresholds.
*
* The main aim here is to lower them aggressively if there is a lot of mapped
* memory around. To avoid stressing page reclaim with lots of unreclaimable
* pages. It is better to clamp down on writers than to start swapping, and
* performing lots of scanning.
*
* We only allow 1/2 of the currently-unmapped memory to be dirtied.
*
* We don't permit the clamping level to fall below 5% - that is getting rather
* excessive.
*
* We make sure that the background writeout level is below the adjusted
* clamping level.
*/

static unsigned long highmem_dirtyable_memory(unsigned long total)
{
#ifdef CONFIG_HIGHMEM
int node;
unsigned long x = 0;

for_each_node_state(node, N_HIGH_MEMORY) {
struct zone *z =
&NODE_DATA(node)->node_zones[ZONE_HIGHMEM];

x += zone_page_state(z, NR_FREE_PAGES) +
zone_reclaimable_pages(z);
}
/*
* Make sure that the number of highmem pages is never larger
* than the number of the total dirtyable memory. This can only
* occur in very strange VM situations but we want to make sure
* that this does not occur.
*/
return min(x, total);
#else
return 0;
#endif
}

/**
* determine_dirtyable_memory - amount of memory that may be used
*
* Returns the numebr of pages that can currently be freed and used
* by the kernel for direct mappings.
*/
unsigned long determine_dirtyable_memory(void)
{
unsigned long x;

x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();

if (!vm_highmem_is_dirtyable)
x -= highmem_dirtyable_memory(x);

return x + 1; /* Ensure that we never return 0 */
}

static unsigned long dirty_freerun_ceiling(unsigned long thresh,
unsigned long bg_thresh)
{
Expand Down

0 comments on commit 1edf223

Please sign in to comment.