mirror of
https://github.com/joel16/android_kernel_sony_msm8994.git
synced 2024-12-04 02:13:18 +00:00
writeback: avoid unnecessary calculation of bdi dirty thresholds
Split get_dirty_limits() into global_dirty_limits()+bdi_dirty_limit(), so that the latter can be avoided when under global dirty background threshold (which is the normal state for most systems). Signed-off-by: Wu Fengguang <fengguang.wu@intel.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Christoph Hellwig <hch@infradead.org> Cc: Dave Chinner <david@fromorbit.com> Cc: Jens Axboe <axboe@kernel.dk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
e50e37201a
commit
16c4042f08
@ -590,7 +590,7 @@ static inline bool over_bground_thresh(void)
|
||||
{
|
||||
unsigned long background_thresh, dirty_thresh;
|
||||
|
||||
get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
|
||||
global_dirty_limits(&background_thresh, &dirty_thresh);
|
||||
|
||||
return (global_page_state(NR_FILE_DIRTY) +
|
||||
global_page_state(NR_UNSTABLE_NFS) >= background_thresh);
|
||||
|
@ -124,8 +124,9 @@ struct ctl_table;
|
||||
int dirty_writeback_centisecs_handler(struct ctl_table *, int,
|
||||
void __user *, size_t *, loff_t *);
|
||||
|
||||
void get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty,
|
||||
unsigned long *pbdi_dirty, struct backing_dev_info *bdi);
|
||||
void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty);
|
||||
unsigned long bdi_dirty_limit(struct backing_dev_info *bdi,
|
||||
unsigned long dirty);
|
||||
|
||||
void page_writeback_init(void);
|
||||
void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
|
||||
|
@ -81,7 +81,8 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v)
|
||||
nr_more_io++;
|
||||
spin_unlock(&inode_lock);
|
||||
|
||||
get_dirty_limits(&background_thresh, &dirty_thresh, &bdi_thresh, bdi);
|
||||
global_dirty_limits(&background_thresh, &dirty_thresh);
|
||||
bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
|
||||
|
||||
#define K(x) ((x) << (PAGE_SHIFT - 10))
|
||||
seq_printf(m,
|
||||
|
@ -267,10 +267,11 @@ static inline void task_dirties_fraction(struct task_struct *tsk,
|
||||
*
|
||||
* dirty -= (dirty/8) * p_{t}
|
||||
*/
|
||||
static void task_dirty_limit(struct task_struct *tsk, unsigned long *pdirty)
|
||||
static unsigned long task_dirty_limit(struct task_struct *tsk,
|
||||
unsigned long bdi_dirty)
|
||||
{
|
||||
long numerator, denominator;
|
||||
unsigned long dirty = *pdirty;
|
||||
unsigned long dirty = bdi_dirty;
|
||||
u64 inv = dirty >> 3;
|
||||
|
||||
task_dirties_fraction(tsk, &numerator, &denominator);
|
||||
@ -278,10 +279,8 @@ static void task_dirty_limit(struct task_struct *tsk, unsigned long *pdirty)
|
||||
do_div(inv, denominator);
|
||||
|
||||
dirty -= inv;
|
||||
if (dirty < *pdirty/2)
|
||||
dirty = *pdirty/2;
|
||||
|
||||
*pdirty = dirty;
|
||||
return max(dirty, bdi_dirty/2);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -391,9 +390,7 @@ unsigned long determine_dirtyable_memory(void)
|
||||
return x + 1; /* Ensure that we never return 0 */
|
||||
}
|
||||
|
||||
void
|
||||
get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty,
|
||||
unsigned long *pbdi_dirty, struct backing_dev_info *bdi)
|
||||
void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
|
||||
{
|
||||
unsigned long background;
|
||||
unsigned long dirty;
|
||||
@ -425,26 +422,28 @@ get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty,
|
||||
}
|
||||
*pbackground = background;
|
||||
*pdirty = dirty;
|
||||
}
|
||||
|
||||
if (bdi) {
|
||||
u64 bdi_dirty;
|
||||
long numerator, denominator;
|
||||
unsigned long bdi_dirty_limit(struct backing_dev_info *bdi,
|
||||
unsigned long dirty)
|
||||
{
|
||||
u64 bdi_dirty;
|
||||
long numerator, denominator;
|
||||
|
||||
/*
|
||||
* Calculate this BDI's share of the dirty ratio.
|
||||
*/
|
||||
bdi_writeout_fraction(bdi, &numerator, &denominator);
|
||||
/*
|
||||
* Calculate this BDI's share of the dirty ratio.
|
||||
*/
|
||||
bdi_writeout_fraction(bdi, &numerator, &denominator);
|
||||
|
||||
bdi_dirty = (dirty * (100 - bdi_min_ratio)) / 100;
|
||||
bdi_dirty *= numerator;
|
||||
do_div(bdi_dirty, denominator);
|
||||
bdi_dirty += (dirty * bdi->min_ratio) / 100;
|
||||
if (bdi_dirty > (dirty * bdi->max_ratio) / 100)
|
||||
bdi_dirty = dirty * bdi->max_ratio / 100;
|
||||
bdi_dirty = (dirty * (100 - bdi_min_ratio)) / 100;
|
||||
bdi_dirty *= numerator;
|
||||
do_div(bdi_dirty, denominator);
|
||||
|
||||
*pbdi_dirty = bdi_dirty;
|
||||
task_dirty_limit(current, pbdi_dirty);
|
||||
}
|
||||
bdi_dirty += (dirty * bdi->min_ratio) / 100;
|
||||
if (bdi_dirty > (dirty * bdi->max_ratio) / 100)
|
||||
bdi_dirty = dirty * bdi->max_ratio / 100;
|
||||
|
||||
return bdi_dirty;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -475,13 +474,24 @@ static void balance_dirty_pages(struct address_space *mapping,
|
||||
.range_cyclic = 1,
|
||||
};
|
||||
|
||||
get_dirty_limits(&background_thresh, &dirty_thresh,
|
||||
&bdi_thresh, bdi);
|
||||
|
||||
nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
|
||||
global_page_state(NR_UNSTABLE_NFS);
|
||||
nr_writeback = global_page_state(NR_WRITEBACK);
|
||||
|
||||
global_dirty_limits(&background_thresh, &dirty_thresh);
|
||||
|
||||
/*
|
||||
* Throttle it only when the background writeback cannot
|
||||
* catch-up. This avoids (excessively) small writeouts
|
||||
* when the bdi limits are ramping up.
|
||||
*/
|
||||
if (nr_reclaimable + nr_writeback <
|
||||
(background_thresh + dirty_thresh) / 2)
|
||||
break;
|
||||
|
||||
bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
|
||||
bdi_thresh = task_dirty_limit(current, bdi_thresh);
|
||||
|
||||
/*
|
||||
* In order to avoid the stacked BDI deadlock we need
|
||||
* to ensure we accurately count the 'dirty' pages when
|
||||
@ -513,15 +523,6 @@ static void balance_dirty_pages(struct address_space *mapping,
|
||||
if (!dirty_exceeded)
|
||||
break;
|
||||
|
||||
/*
|
||||
* Throttle it only when the background writeback cannot
|
||||
* catch-up. This avoids (excessively) small writeouts
|
||||
* when the bdi limits are ramping up.
|
||||
*/
|
||||
if (nr_reclaimable + nr_writeback <
|
||||
(background_thresh + dirty_thresh) / 2)
|
||||
break;
|
||||
|
||||
if (!bdi->dirty_exceeded)
|
||||
bdi->dirty_exceeded = 1;
|
||||
|
||||
@ -634,7 +635,7 @@ void throttle_vm_writeout(gfp_t gfp_mask)
|
||||
unsigned long dirty_thresh;
|
||||
|
||||
for ( ; ; ) {
|
||||
get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
|
||||
global_dirty_limits(&background_thresh, &dirty_thresh);
|
||||
|
||||
/*
|
||||
* Boost the allowable dirty threshold a bit for page
|
||||
|
Loading…
Reference in New Issue
Block a user