Skip to content

Commit

Permalink
writeback: avoid unnecessary calculation of bdi dirty thresholds
Browse files Browse the repository at this point in the history
Split get_dirty_limits() into global_dirty_limits()+bdi_dirty_limit(), so
that the latter can be avoided when under global dirty background
threshold (which is the normal state for most systems).

Signed-off-by: Wu Fengguang <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Christoph Hellwig <[email protected]>
Cc: Dave Chinner <[email protected]>
Cc: Jens Axboe <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
Wu Fengguang authored and torvalds committed Aug 12, 2010
1 parent e50e372 commit 16c4042
Show file tree
Hide file tree
Showing 4 changed files with 44 additions and 41 deletions.
2 changes: 1 addition & 1 deletion fs/fs-writeback.c
Original file line number Diff line number Diff line change
Expand Up @@ -590,7 +590,7 @@ static inline bool over_bground_thresh(void)
{
unsigned long background_thresh, dirty_thresh;

get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
global_dirty_limits(&background_thresh, &dirty_thresh);

return (global_page_state(NR_FILE_DIRTY) +
global_page_state(NR_UNSTABLE_NFS) >= background_thresh);
Expand Down
5 changes: 3 additions & 2 deletions include/linux/writeback.h
Original file line number Diff line number Diff line change
Expand Up @@ -124,8 +124,9 @@ struct ctl_table;
int dirty_writeback_centisecs_handler(struct ctl_table *, int,
void __user *, size_t *, loff_t *);

void get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty,
unsigned long *pbdi_dirty, struct backing_dev_info *bdi);
void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty);
unsigned long bdi_dirty_limit(struct backing_dev_info *bdi,
unsigned long dirty);

void page_writeback_init(void);
void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
Expand Down
3 changes: 2 additions & 1 deletion mm/backing-dev.c
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,8 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v)
nr_more_io++;
spin_unlock(&inode_lock);

get_dirty_limits(&background_thresh, &dirty_thresh, &bdi_thresh, bdi);
global_dirty_limits(&background_thresh, &dirty_thresh);
bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);

#define K(x) ((x) << (PAGE_SHIFT - 10))
seq_printf(m,
Expand Down
75 changes: 38 additions & 37 deletions mm/page-writeback.c
Original file line number Diff line number Diff line change
Expand Up @@ -267,21 +267,20 @@ static inline void task_dirties_fraction(struct task_struct *tsk,
*
* dirty -= (dirty/8) * p_{t}
*/
static void task_dirty_limit(struct task_struct *tsk, unsigned long *pdirty)
static unsigned long task_dirty_limit(struct task_struct *tsk,
unsigned long bdi_dirty)
{
long numerator, denominator;
unsigned long dirty = *pdirty;
unsigned long dirty = bdi_dirty;
u64 inv = dirty >> 3;

task_dirties_fraction(tsk, &numerator, &denominator);
inv *= numerator;
do_div(inv, denominator);

dirty -= inv;
if (dirty < *pdirty/2)
dirty = *pdirty/2;

*pdirty = dirty;
return max(dirty, bdi_dirty/2);
}

/*
Expand Down Expand Up @@ -391,9 +390,7 @@ unsigned long determine_dirtyable_memory(void)
return x + 1; /* Ensure that we never return 0 */
}

void
get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty,
unsigned long *pbdi_dirty, struct backing_dev_info *bdi)
void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
{
unsigned long background;
unsigned long dirty;
Expand Down Expand Up @@ -425,26 +422,28 @@ get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty,
}
*pbackground = background;
*pdirty = dirty;
}

if (bdi) {
u64 bdi_dirty;
long numerator, denominator;
unsigned long bdi_dirty_limit(struct backing_dev_info *bdi,
unsigned long dirty)
{
u64 bdi_dirty;
long numerator, denominator;

/*
* Calculate this BDI's share of the dirty ratio.
*/
bdi_writeout_fraction(bdi, &numerator, &denominator);
/*
* Calculate this BDI's share of the dirty ratio.
*/
bdi_writeout_fraction(bdi, &numerator, &denominator);

bdi_dirty = (dirty * (100 - bdi_min_ratio)) / 100;
bdi_dirty *= numerator;
do_div(bdi_dirty, denominator);
bdi_dirty += (dirty * bdi->min_ratio) / 100;
if (bdi_dirty > (dirty * bdi->max_ratio) / 100)
bdi_dirty = dirty * bdi->max_ratio / 100;
bdi_dirty = (dirty * (100 - bdi_min_ratio)) / 100;
bdi_dirty *= numerator;
do_div(bdi_dirty, denominator);

*pbdi_dirty = bdi_dirty;
task_dirty_limit(current, pbdi_dirty);
}
bdi_dirty += (dirty * bdi->min_ratio) / 100;
if (bdi_dirty > (dirty * bdi->max_ratio) / 100)
bdi_dirty = dirty * bdi->max_ratio / 100;

return bdi_dirty;
}

/*
Expand Down Expand Up @@ -475,13 +474,24 @@ static void balance_dirty_pages(struct address_space *mapping,
.range_cyclic = 1,
};

get_dirty_limits(&background_thresh, &dirty_thresh,
&bdi_thresh, bdi);

nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
global_page_state(NR_UNSTABLE_NFS);
nr_writeback = global_page_state(NR_WRITEBACK);

global_dirty_limits(&background_thresh, &dirty_thresh);

/*
* Throttle it only when the background writeback cannot
* catch-up. This avoids (excessively) small writeouts
* when the bdi limits are ramping up.
*/
if (nr_reclaimable + nr_writeback <
(background_thresh + dirty_thresh) / 2)
break;

bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
bdi_thresh = task_dirty_limit(current, bdi_thresh);

/*
* In order to avoid the stacked BDI deadlock we need
* to ensure we accurately count the 'dirty' pages when
Expand Down Expand Up @@ -513,15 +523,6 @@ static void balance_dirty_pages(struct address_space *mapping,
if (!dirty_exceeded)
break;

/*
* Throttle it only when the background writeback cannot
* catch-up. This avoids (excessively) small writeouts
* when the bdi limits are ramping up.
*/
if (nr_reclaimable + nr_writeback <
(background_thresh + dirty_thresh) / 2)
break;

if (!bdi->dirty_exceeded)
bdi->dirty_exceeded = 1;

Expand Down Expand Up @@ -634,7 +635,7 @@ void throttle_vm_writeout(gfp_t gfp_mask)
unsigned long dirty_thresh;

for ( ; ; ) {
get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
global_dirty_limits(&background_thresh, &dirty_thresh);

/*
* Boost the allowable dirty threshold a bit for page
Expand Down

0 comments on commit 16c4042

Please sign in to comment.