Use time based periods to age the writeback proportions,

which can adapt equally well to fast/slow devices.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1.4.11 (GNU/Linux)
 
 iQIcBAABAgAGBQJQF0/wAAoJECvKgwp+S8JaszsP/16EO5F5mUCOFgncVRp+8R9U
 BxuKJ61j2R9ckHA+ngMEg72W5vJQds64cjywZnz6HMr0/+3tXUf4QBbU4/4sCeai
 0lpK8MCKgp5KHHCxgO8zyoSaboankUgoDcbSmGJREV1WXoR8VWXsO9gXqiiH9XOe
 e8ADjds/YdxkQbOYDRgZKvLzwWS61K9Kwq5/56GASh2uflw7rkJZ38xqvGbo3YiQ
 IJJwOUYfJjFadIewYARQmkZZyWeAmtY0ADh15Z8pJt+iY4PgcDDlaWagwUH2Oaoi
 vhTFO4KnCjhSpc872et21g/jN/VrcQqzuUF/LUE9rW5irXeDZVCDrCQOuHQ+3Uo5
 YuV3rpNABW/LU8AvtIwt9hUunaKUnrXaSluoL9LzH2VpH++JljQeR4yZ62Q5rpRs
 z4Bow25p7tlbcIJWzueMPdOFUr5s3P6XfQHoLVRWqN94eJ+Z1DPOgBlOain6cCUN
 oivPh4FxZfUscNmX8/7cHaTNEpGzJ0FzLPMNFnGQ2zwG0gk41fnMdWb19aVIE5GL
 /Q96TB24k/v+o1lbxGmyPi0L+Aq+NknvNm+p/YJHAAQrIpu5t2hPvka8/m7Nj7tu
 c3rM75RKiZkEI+3U6Ws1DhhQPtVcfNIVlNYQMGzlIndtK6T0ByueQ4eN5Z7ltjSE
 pS89rv2hyBw7yCaTU6ui
 =gZrM
 -----END PGP SIGNATURE-----

Merge tag 'writeback-proportions' of git://git.kernel.org/pub/scm/linux/kernel/git/wfg/linux

Pull writeback updates from Wu Fengguang:
 "Use time based periods to age the writeback proportions, which can
  adapt equally well to fast/slow devices."

Fix up trivial conflict in comment in fs/sync.c

* tag 'writeback-proportions' of git://git.kernel.org/pub/scm/linux/kernel/git/wfg/linux:
  writeback: Fix some comment errors
  block: Convert BDI proportion calculations to flexible proportions
  lib: Fix possible deadlock in flexible proportion code
  lib: Proportions with flexible period
This commit is contained in:
Linus Torvalds 2012-07-30 22:14:04 -07:00
commit 2e3ee61348
8 changed files with 448 additions and 50 deletions

View File

@ -628,8 +628,8 @@ static long writeback_sb_inodes(struct super_block *sb,
} }
/* /*
* Don't bother with new inodes or inodes beeing freed, first * Don't bother with new inodes or inodes being freed, first
* kind does not need peridic writeout yet, and for the latter * kind does not need periodic writeout yet, and for the latter
* kind writeout is handled by the freer. * kind writeout is handled by the freer.
*/ */
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);

View File

@ -320,7 +320,7 @@ static int grab_super(struct super_block *s) __releases(sb_lock)
/* /*
* grab_super_passive - acquire a passive reference * grab_super_passive - acquire a passive reference
* @s: reference we are trying to grab * @sb: reference we are trying to grab
* *
* Tries to acquire a passive reference. This is used in places where we * Tries to acquire a passive reference. This is used in places where we
* cannot take an active reference but we need to ensure that the * cannot take an active reference but we need to ensure that the

View File

@ -10,7 +10,7 @@
#include <linux/percpu_counter.h> #include <linux/percpu_counter.h>
#include <linux/log2.h> #include <linux/log2.h>
#include <linux/proportions.h> #include <linux/flex_proportions.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/sched.h> #include <linux/sched.h>
@ -89,7 +89,7 @@ struct backing_dev_info {
unsigned long dirty_ratelimit; unsigned long dirty_ratelimit;
unsigned long balanced_dirty_ratelimit; unsigned long balanced_dirty_ratelimit;
struct prop_local_percpu completions; struct fprop_local_percpu completions;
int dirty_exceeded; int dirty_exceeded;
unsigned int min_ratio; unsigned int min_ratio;

View File

@ -0,0 +1,101 @@
/*
* Floating proportions with flexible aging period
*
* Copyright (C) 2011, SUSE, Jan Kara <jack@suse.cz>
*/
#ifndef _LINUX_FLEX_PROPORTIONS_H
#define _LINUX_FLEX_PROPORTIONS_H
#include <linux/percpu_counter.h>
#include <linux/spinlock.h>
#include <linux/seqlock.h>
/*
* When maximum proportion of some event type is specified, this is the
* precision with which we allow limitting. Note that this creates an upper
* bound on the number of events per period like
* ULLONG_MAX >> FPROP_FRAC_SHIFT.
*/
#define FPROP_FRAC_SHIFT 10
#define FPROP_FRAC_BASE (1UL << FPROP_FRAC_SHIFT)
/*
* ---- Global proportion definitions ----
*/
struct fprop_global {
/* Number of events in the current period */
struct percpu_counter events;
/* Current period */
unsigned int period;
/* Synchronization with period transitions */
seqcount_t sequence;
};
int fprop_global_init(struct fprop_global *p);
void fprop_global_destroy(struct fprop_global *p);
bool fprop_new_period(struct fprop_global *p, int periods);
/*
* ---- SINGLE ----
*/
struct fprop_local_single {
/* the local events counter */
unsigned long events;
/* Period in which we last updated events */
unsigned int period;
raw_spinlock_t lock; /* Protect period and numerator */
};
#define INIT_FPROP_LOCAL_SINGLE(name) \
{ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
}
int fprop_local_init_single(struct fprop_local_single *pl);
void fprop_local_destroy_single(struct fprop_local_single *pl);
void __fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl);
void fprop_fraction_single(struct fprop_global *p,
struct fprop_local_single *pl, unsigned long *numerator,
unsigned long *denominator);
static inline
void fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl)
{
unsigned long flags;
local_irq_save(flags);
__fprop_inc_single(p, pl);
local_irq_restore(flags);
}
/*
* ---- PERCPU ----
*/
struct fprop_local_percpu {
/* the local events counter */
struct percpu_counter events;
/* Period in which we last updated events */
unsigned int period;
raw_spinlock_t lock; /* Protect period and numerator */
};
int fprop_local_init_percpu(struct fprop_local_percpu *pl);
void fprop_local_destroy_percpu(struct fprop_local_percpu *pl);
void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl);
void __fprop_inc_percpu_max(struct fprop_global *p, struct fprop_local_percpu *pl,
int max_frac);
void fprop_fraction_percpu(struct fprop_global *p,
struct fprop_local_percpu *pl, unsigned long *numerator,
unsigned long *denominator);
static inline
void fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl)
{
unsigned long flags;
local_irq_save(flags);
__fprop_inc_percpu(p, pl);
local_irq_restore(flags);
}
#endif

View File

@ -11,7 +11,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
rbtree.o radix-tree.o dump_stack.o timerqueue.o\ rbtree.o radix-tree.o dump_stack.o timerqueue.o\
idr.o int_sqrt.o extable.o prio_tree.o \ idr.o int_sqrt.o extable.o prio_tree.o \
sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \ sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \
proportions.o prio_heap.o ratelimit.o show_mem.o \ proportions.o flex_proportions.o prio_heap.o ratelimit.o show_mem.o \
is_single_threaded.o plist.o decompress.o is_single_threaded.o plist.o decompress.o
lib-$(CONFIG_MMU) += ioremap.o lib-$(CONFIG_MMU) += ioremap.o

272
lib/flex_proportions.c Normal file
View File

@ -0,0 +1,272 @@
/*
* Floating proportions with flexible aging period
*
* Copyright (C) 2011, SUSE, Jan Kara <jack@suse.cz>
*
* The goal of this code is: Given different types of event, measure proportion
* of each type of event over time. The proportions are measured with
* exponentially decaying history to give smooth transitions. A formula
* expressing proportion of event of type 'j' is:
*
* p_{j} = (\Sum_{i>=0} x_{i,j}/2^{i+1})/(\Sum_{i>=0} x_i/2^{i+1})
*
* Where x_{i,j} is j's number of events in i-th last time period and x_i is
* total number of events in i-th last time period.
*
* Note that p_{j}'s are normalised, i.e.
*
* \Sum_{j} p_{j} = 1,
*
* This formula can be straightforwardly computed by maintaing denominator
* (let's call it 'd') and for each event type its numerator (let's call it
* 'n_j'). When an event of type 'j' happens, we simply need to do:
* n_j++; d++;
*
* When a new period is declared, we could do:
* d /= 2
* for each j
* n_j /= 2
*
* To avoid iteration over all event types, we instead shift numerator of event
* j lazily when someone asks for a proportion of event j or when event j
* occurs. This can bit trivially implemented by remembering last period in
* which something happened with proportion of type j.
*/
#include <linux/flex_proportions.h>
int fprop_global_init(struct fprop_global *p)
{
int err;
p->period = 0;
/* Use 1 to avoid dealing with periods with 0 events... */
err = percpu_counter_init(&p->events, 1);
if (err)
return err;
seqcount_init(&p->sequence);
return 0;
}
void fprop_global_destroy(struct fprop_global *p)
{
percpu_counter_destroy(&p->events);
}
/*
* Declare @periods new periods. It is upto the caller to make sure period
* transitions cannot happen in parallel.
*
* The function returns true if the proportions are still defined and false
* if aging zeroed out all events. This can be used to detect whether declaring
* further periods has any effect.
*/
bool fprop_new_period(struct fprop_global *p, int periods)
{
u64 events;
unsigned long flags;
local_irq_save(flags);
events = percpu_counter_sum(&p->events);
/*
* Don't do anything if there are no events.
*/
if (events <= 1) {
local_irq_restore(flags);
return false;
}
write_seqcount_begin(&p->sequence);
if (periods < 64)
events -= events >> periods;
/* Use addition to avoid losing events happening between sum and set */
percpu_counter_add(&p->events, -events);
p->period += periods;
write_seqcount_end(&p->sequence);
local_irq_restore(flags);
return true;
}
/*
* ---- SINGLE ----
*/
int fprop_local_init_single(struct fprop_local_single *pl)
{
pl->events = 0;
pl->period = 0;
raw_spin_lock_init(&pl->lock);
return 0;
}
void fprop_local_destroy_single(struct fprop_local_single *pl)
{
}
static void fprop_reflect_period_single(struct fprop_global *p,
struct fprop_local_single *pl)
{
unsigned int period = p->period;
unsigned long flags;
/* Fast path - period didn't change */
if (pl->period == period)
return;
raw_spin_lock_irqsave(&pl->lock, flags);
/* Someone updated pl->period while we were spinning? */
if (pl->period >= period) {
raw_spin_unlock_irqrestore(&pl->lock, flags);
return;
}
/* Aging zeroed our fraction? */
if (period - pl->period < BITS_PER_LONG)
pl->events >>= period - pl->period;
else
pl->events = 0;
pl->period = period;
raw_spin_unlock_irqrestore(&pl->lock, flags);
}
/* Event of type pl happened */
void __fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl)
{
fprop_reflect_period_single(p, pl);
pl->events++;
percpu_counter_add(&p->events, 1);
}
/* Return fraction of events of type pl */
void fprop_fraction_single(struct fprop_global *p,
struct fprop_local_single *pl,
unsigned long *numerator, unsigned long *denominator)
{
unsigned int seq;
s64 num, den;
do {
seq = read_seqcount_begin(&p->sequence);
fprop_reflect_period_single(p, pl);
num = pl->events;
den = percpu_counter_read_positive(&p->events);
} while (read_seqcount_retry(&p->sequence, seq));
/*
* Make fraction <= 1 and denominator > 0 even in presence of percpu
* counter errors
*/
if (den <= num) {
if (num)
den = num;
else
den = 1;
}
*denominator = den;
*numerator = num;
}
/*
* ---- PERCPU ----
*/
#define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))
int fprop_local_init_percpu(struct fprop_local_percpu *pl)
{
int err;
err = percpu_counter_init(&pl->events, 0);
if (err)
return err;
pl->period = 0;
raw_spin_lock_init(&pl->lock);
return 0;
}
void fprop_local_destroy_percpu(struct fprop_local_percpu *pl)
{
percpu_counter_destroy(&pl->events);
}
static void fprop_reflect_period_percpu(struct fprop_global *p,
struct fprop_local_percpu *pl)
{
unsigned int period = p->period;
unsigned long flags;
/* Fast path - period didn't change */
if (pl->period == period)
return;
raw_spin_lock_irqsave(&pl->lock, flags);
/* Someone updated pl->period while we were spinning? */
if (pl->period >= period) {
raw_spin_unlock_irqrestore(&pl->lock, flags);
return;
}
/* Aging zeroed our fraction? */
if (period - pl->period < BITS_PER_LONG) {
s64 val = percpu_counter_read(&pl->events);
if (val < (nr_cpu_ids * PROP_BATCH))
val = percpu_counter_sum(&pl->events);
__percpu_counter_add(&pl->events,
-val + (val >> (period-pl->period)), PROP_BATCH);
} else
percpu_counter_set(&pl->events, 0);
pl->period = period;
raw_spin_unlock_irqrestore(&pl->lock, flags);
}
/* Event of type pl happened */
void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl)
{
fprop_reflect_period_percpu(p, pl);
__percpu_counter_add(&pl->events, 1, PROP_BATCH);
percpu_counter_add(&p->events, 1);
}
void fprop_fraction_percpu(struct fprop_global *p,
struct fprop_local_percpu *pl,
unsigned long *numerator, unsigned long *denominator)
{
unsigned int seq;
s64 num, den;
do {
seq = read_seqcount_begin(&p->sequence);
fprop_reflect_period_percpu(p, pl);
num = percpu_counter_read_positive(&pl->events);
den = percpu_counter_read_positive(&p->events);
} while (read_seqcount_retry(&p->sequence, seq));
/*
* Make fraction <= 1 and denominator > 0 even in presence of percpu
* counter errors
*/
if (den <= num) {
if (num)
den = num;
else
den = 1;
}
*denominator = den;
*numerator = num;
}
/*
* Like __fprop_inc_percpu() except that event is counted only if the given
* type has fraction smaller than @max_frac/FPROP_FRAC_BASE
*/
void __fprop_inc_percpu_max(struct fprop_global *p,
struct fprop_local_percpu *pl, int max_frac)
{
if (unlikely(max_frac < FPROP_FRAC_BASE)) {
unsigned long numerator, denominator;
fprop_fraction_percpu(p, pl, &numerator, &denominator);
if (numerator >
(((u64)denominator) * max_frac) >> FPROP_FRAC_SHIFT)
return;
} else
fprop_reflect_period_percpu(p, pl);
__percpu_counter_add(&pl->events, 1, PROP_BATCH);
percpu_counter_add(&p->events, 1);
}

View File

@ -677,7 +677,7 @@ int bdi_init(struct backing_dev_info *bdi)
bdi->min_ratio = 0; bdi->min_ratio = 0;
bdi->max_ratio = 100; bdi->max_ratio = 100;
bdi->max_prop_frac = PROP_FRAC_BASE; bdi->max_prop_frac = FPROP_FRAC_BASE;
spin_lock_init(&bdi->wb_lock); spin_lock_init(&bdi->wb_lock);
INIT_LIST_HEAD(&bdi->bdi_list); INIT_LIST_HEAD(&bdi->bdi_list);
INIT_LIST_HEAD(&bdi->work_list); INIT_LIST_HEAD(&bdi->work_list);
@ -700,7 +700,7 @@ int bdi_init(struct backing_dev_info *bdi)
bdi->write_bandwidth = INIT_BW; bdi->write_bandwidth = INIT_BW;
bdi->avg_write_bandwidth = INIT_BW; bdi->avg_write_bandwidth = INIT_BW;
err = prop_local_init_percpu(&bdi->completions); err = fprop_local_init_percpu(&bdi->completions);
if (err) { if (err) {
err: err:
@ -744,7 +744,7 @@ void bdi_destroy(struct backing_dev_info *bdi)
for (i = 0; i < NR_BDI_STAT_ITEMS; i++) for (i = 0; i < NR_BDI_STAT_ITEMS; i++)
percpu_counter_destroy(&bdi->bdi_stat[i]); percpu_counter_destroy(&bdi->bdi_stat[i]);
prop_local_destroy_percpu(&bdi->completions); fprop_local_destroy_percpu(&bdi->completions);
} }
EXPORT_SYMBOL(bdi_destroy); EXPORT_SYMBOL(bdi_destroy);

View File

@ -34,6 +34,7 @@
#include <linux/syscalls.h> #include <linux/syscalls.h>
#include <linux/buffer_head.h> /* __set_page_dirty_buffers */ #include <linux/buffer_head.h> /* __set_page_dirty_buffers */
#include <linux/pagevec.h> #include <linux/pagevec.h>
#include <linux/timer.h>
#include <trace/events/writeback.h> #include <trace/events/writeback.h>
/* /*
@ -135,7 +136,20 @@ unsigned long global_dirty_limit;
* measured in page writeback completions. * measured in page writeback completions.
* *
*/ */
static struct prop_descriptor vm_completions; static struct fprop_global writeout_completions;
static void writeout_period(unsigned long t);
/* Timer for aging of writeout_completions */
static struct timer_list writeout_period_timer =
TIMER_DEFERRED_INITIALIZER(writeout_period, 0, 0);
static unsigned long writeout_period_time = 0;
/*
* Length of period for aging writeout fractions of bdis. This is an
* arbitrarily chosen number. The longer the period, the slower fractions will
* reflect changes in current writeout rate.
*/
#define VM_COMPLETIONS_PERIOD_LEN (3*HZ)
/* /*
* Work out the current dirty-memory clamping and background writeout * Work out the current dirty-memory clamping and background writeout
@ -322,34 +336,6 @@ bool zone_dirty_ok(struct zone *zone)
zone_page_state(zone, NR_WRITEBACK) <= limit; zone_page_state(zone, NR_WRITEBACK) <= limit;
} }
/*
* couple the period to the dirty_ratio:
*
* period/2 ~ roundup_pow_of_two(dirty limit)
*/
static int calc_period_shift(void)
{
unsigned long dirty_total;
if (vm_dirty_bytes)
dirty_total = vm_dirty_bytes / PAGE_SIZE;
else
dirty_total = (vm_dirty_ratio * global_dirtyable_memory()) /
100;
return 2 + ilog2(dirty_total - 1);
}
/*
* update the period when the dirty threshold changes.
*/
static void update_completion_period(void)
{
int shift = calc_period_shift();
prop_change_shift(&vm_completions, shift);
writeback_set_ratelimit();
}
int dirty_background_ratio_handler(struct ctl_table *table, int write, int dirty_background_ratio_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, void __user *buffer, size_t *lenp,
loff_t *ppos) loff_t *ppos)
@ -383,7 +369,7 @@ int dirty_ratio_handler(struct ctl_table *table, int write,
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (ret == 0 && write && vm_dirty_ratio != old_ratio) { if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
update_completion_period(); writeback_set_ratelimit();
vm_dirty_bytes = 0; vm_dirty_bytes = 0;
} }
return ret; return ret;
@ -398,12 +384,21 @@ int dirty_bytes_handler(struct ctl_table *table, int write,
ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
if (ret == 0 && write && vm_dirty_bytes != old_bytes) { if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
update_completion_period(); writeback_set_ratelimit();
vm_dirty_ratio = 0; vm_dirty_ratio = 0;
} }
return ret; return ret;
} }
static unsigned long wp_next_time(unsigned long cur_time)
{
cur_time += VM_COMPLETIONS_PERIOD_LEN;
/* 0 has a special meaning... */
if (!cur_time)
return 1;
return cur_time;
}
/* /*
* Increment the BDI's writeout completion count and the global writeout * Increment the BDI's writeout completion count and the global writeout
* completion count. Called from test_clear_page_writeback(). * completion count. Called from test_clear_page_writeback().
@ -411,8 +406,19 @@ int dirty_bytes_handler(struct ctl_table *table, int write,
static inline void __bdi_writeout_inc(struct backing_dev_info *bdi) static inline void __bdi_writeout_inc(struct backing_dev_info *bdi)
{ {
__inc_bdi_stat(bdi, BDI_WRITTEN); __inc_bdi_stat(bdi, BDI_WRITTEN);
__prop_inc_percpu_max(&vm_completions, &bdi->completions, __fprop_inc_percpu_max(&writeout_completions, &bdi->completions,
bdi->max_prop_frac); bdi->max_prop_frac);
/* First event after period switching was turned off? */
if (!unlikely(writeout_period_time)) {
/*
* We can race with other __bdi_writeout_inc calls here but
* it does not cause any harm since the resulting time when
* timer will fire and what is in writeout_period_time will be
* roughly the same.
*/
writeout_period_time = wp_next_time(jiffies);
mod_timer(&writeout_period_timer, writeout_period_time);
}
} }
void bdi_writeout_inc(struct backing_dev_info *bdi) void bdi_writeout_inc(struct backing_dev_info *bdi)
@ -431,10 +437,32 @@ EXPORT_SYMBOL_GPL(bdi_writeout_inc);
static void bdi_writeout_fraction(struct backing_dev_info *bdi, static void bdi_writeout_fraction(struct backing_dev_info *bdi,
long *numerator, long *denominator) long *numerator, long *denominator)
{ {
prop_fraction_percpu(&vm_completions, &bdi->completions, fprop_fraction_percpu(&writeout_completions, &bdi->completions,
numerator, denominator); numerator, denominator);
} }
/*
* On idle system, we can be called long after we scheduled because we use
* deferred timers so count with missed periods.
*/
static void writeout_period(unsigned long t)
{
int miss_periods = (jiffies - writeout_period_time) /
VM_COMPLETIONS_PERIOD_LEN;
if (fprop_new_period(&writeout_completions, miss_periods + 1)) {
writeout_period_time = wp_next_time(writeout_period_time +
miss_periods * VM_COMPLETIONS_PERIOD_LEN);
mod_timer(&writeout_period_timer, writeout_period_time);
} else {
/*
* Aging has zeroed all fractions. Stop wasting CPU on period
* updates.
*/
writeout_period_time = 0;
}
}
/* /*
* bdi_min_ratio keeps the sum of the minimum dirty shares of all * bdi_min_ratio keeps the sum of the minimum dirty shares of all
* registered backing devices, which, for obvious reasons, can not * registered backing devices, which, for obvious reasons, can not
@ -475,7 +503,7 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
ret = -EINVAL; ret = -EINVAL;
} else { } else {
bdi->max_ratio = max_ratio; bdi->max_ratio = max_ratio;
bdi->max_prop_frac = (PROP_FRAC_BASE * max_ratio) / 100; bdi->max_prop_frac = (FPROP_FRAC_BASE * max_ratio) / 100;
} }
spin_unlock_bh(&bdi_lock); spin_unlock_bh(&bdi_lock);
@ -918,7 +946,7 @@ static void bdi_update_dirty_ratelimit(struct backing_dev_info *bdi,
* bdi->dirty_ratelimit = balanced_dirty_ratelimit; * bdi->dirty_ratelimit = balanced_dirty_ratelimit;
* *
* However to get a more stable dirty_ratelimit, the below elaborated * However to get a more stable dirty_ratelimit, the below elaborated
* code makes use of task_ratelimit to filter out sigular points and * code makes use of task_ratelimit to filter out singular points and
* limit the step size. * limit the step size.
* *
* The below code essentially only uses the relative value of * The below code essentially only uses the relative value of
@ -941,7 +969,7 @@ static void bdi_update_dirty_ratelimit(struct backing_dev_info *bdi,
* feel and care are stable dirty rate and small position error. * feel and care are stable dirty rate and small position error.
* *
* |task_ratelimit - dirty_ratelimit| is used to limit the step size * |task_ratelimit - dirty_ratelimit| is used to limit the step size
* and filter out the sigular points of balanced_dirty_ratelimit. Which * and filter out the singular points of balanced_dirty_ratelimit. Which
* keeps jumping around randomly and can even leap far away at times * keeps jumping around randomly and can even leap far away at times
* due to the small 200ms estimation period of dirty_rate (we want to * due to the small 200ms estimation period of dirty_rate (we want to
* keep that period small to reduce time lags). * keep that period small to reduce time lags).
@ -1606,13 +1634,10 @@ static struct notifier_block __cpuinitdata ratelimit_nb = {
*/ */
void __init page_writeback_init(void) void __init page_writeback_init(void)
{ {
int shift;
writeback_set_ratelimit(); writeback_set_ratelimit();
register_cpu_notifier(&ratelimit_nb); register_cpu_notifier(&ratelimit_nb);
shift = calc_period_shift(); fprop_global_init(&writeout_completions);
prop_descriptor_init(&vm_completions, shift);
} }
/** /**