mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-20 01:24:39 +08:00
0e093d9976
If congestion_wait() is called with no BDI congested, the caller will sleep for the full timeout and this may be an unnecessary sleep. This patch adds a wait_iff_congested() that checks congestion and only sleeps if a BDI is congested else, it calls cond_resched() to ensure the caller is not hogging the CPU longer than its quota but otherwise will not sleep. This is aimed at reducing some of the major desktop stalls reported during IO. For example, while kswapd is operating, it calls congestion_wait() but it could just have been reclaiming clean page cache pages with no congestion. Without this patch, it would sleep for a full timeout but after this patch, it'll just call schedule() if it has been on the CPU too long. Similar logic applies to direct reclaimers that are not making enough progress. Signed-off-by: Mel Gorman <mel@csn.ul.ie> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Minchan Kim <minchan.kim@gmail.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Rik van Riel <riel@redhat.com> Cc: Jens Axboe <axboe@kernel.dk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
193 lines
5.3 KiB
C
193 lines
5.3 KiB
C
#undef TRACE_SYSTEM
|
|
#define TRACE_SYSTEM writeback
|
|
|
|
#if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ)
|
|
#define _TRACE_WRITEBACK_H
|
|
|
|
#include <linux/backing-dev.h>
|
|
#include <linux/device.h>
|
|
#include <linux/writeback.h>
|
|
|
|
struct wb_writeback_work;
|
|
|
|
DECLARE_EVENT_CLASS(writeback_work_class,
|
|
TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work),
|
|
TP_ARGS(bdi, work),
|
|
TP_STRUCT__entry(
|
|
__array(char, name, 32)
|
|
__field(long, nr_pages)
|
|
__field(dev_t, sb_dev)
|
|
__field(int, sync_mode)
|
|
__field(int, for_kupdate)
|
|
__field(int, range_cyclic)
|
|
__field(int, for_background)
|
|
),
|
|
TP_fast_assign(
|
|
strncpy(__entry->name, dev_name(bdi->dev), 32);
|
|
__entry->nr_pages = work->nr_pages;
|
|
__entry->sb_dev = work->sb ? work->sb->s_dev : 0;
|
|
__entry->sync_mode = work->sync_mode;
|
|
__entry->for_kupdate = work->for_kupdate;
|
|
__entry->range_cyclic = work->range_cyclic;
|
|
__entry->for_background = work->for_background;
|
|
),
|
|
TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
|
|
"kupdate=%d range_cyclic=%d background=%d",
|
|
__entry->name,
|
|
MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
|
|
__entry->nr_pages,
|
|
__entry->sync_mode,
|
|
__entry->for_kupdate,
|
|
__entry->range_cyclic,
|
|
__entry->for_background
|
|
)
|
|
);
|
|
#define DEFINE_WRITEBACK_WORK_EVENT(name) \
|
|
DEFINE_EVENT(writeback_work_class, name, \
|
|
TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work), \
|
|
TP_ARGS(bdi, work))
|
|
DEFINE_WRITEBACK_WORK_EVENT(writeback_nothread);
|
|
DEFINE_WRITEBACK_WORK_EVENT(writeback_queue);
|
|
DEFINE_WRITEBACK_WORK_EVENT(writeback_exec);
|
|
|
|
TRACE_EVENT(writeback_pages_written,
|
|
TP_PROTO(long pages_written),
|
|
TP_ARGS(pages_written),
|
|
TP_STRUCT__entry(
|
|
__field(long, pages)
|
|
),
|
|
TP_fast_assign(
|
|
__entry->pages = pages_written;
|
|
),
|
|
TP_printk("%ld", __entry->pages)
|
|
);
|
|
|
|
DECLARE_EVENT_CLASS(writeback_class,
|
|
TP_PROTO(struct backing_dev_info *bdi),
|
|
TP_ARGS(bdi),
|
|
TP_STRUCT__entry(
|
|
__array(char, name, 32)
|
|
),
|
|
TP_fast_assign(
|
|
strncpy(__entry->name, dev_name(bdi->dev), 32);
|
|
),
|
|
TP_printk("bdi %s",
|
|
__entry->name
|
|
)
|
|
);
|
|
#define DEFINE_WRITEBACK_EVENT(name) \
|
|
DEFINE_EVENT(writeback_class, name, \
|
|
TP_PROTO(struct backing_dev_info *bdi), \
|
|
TP_ARGS(bdi))
|
|
|
|
DEFINE_WRITEBACK_EVENT(writeback_nowork);
|
|
DEFINE_WRITEBACK_EVENT(writeback_wake_thread);
|
|
DEFINE_WRITEBACK_EVENT(writeback_wake_forker_thread);
|
|
DEFINE_WRITEBACK_EVENT(writeback_bdi_register);
|
|
DEFINE_WRITEBACK_EVENT(writeback_bdi_unregister);
|
|
DEFINE_WRITEBACK_EVENT(writeback_thread_start);
|
|
DEFINE_WRITEBACK_EVENT(writeback_thread_stop);
|
|
|
|
DECLARE_EVENT_CLASS(wbc_class,
|
|
TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
|
|
TP_ARGS(wbc, bdi),
|
|
TP_STRUCT__entry(
|
|
__array(char, name, 32)
|
|
__field(long, nr_to_write)
|
|
__field(long, pages_skipped)
|
|
__field(int, sync_mode)
|
|
__field(int, for_kupdate)
|
|
__field(int, for_background)
|
|
__field(int, for_reclaim)
|
|
__field(int, range_cyclic)
|
|
__field(int, more_io)
|
|
__field(unsigned long, older_than_this)
|
|
__field(long, range_start)
|
|
__field(long, range_end)
|
|
),
|
|
|
|
TP_fast_assign(
|
|
strncpy(__entry->name, dev_name(bdi->dev), 32);
|
|
__entry->nr_to_write = wbc->nr_to_write;
|
|
__entry->pages_skipped = wbc->pages_skipped;
|
|
__entry->sync_mode = wbc->sync_mode;
|
|
__entry->for_kupdate = wbc->for_kupdate;
|
|
__entry->for_background = wbc->for_background;
|
|
__entry->for_reclaim = wbc->for_reclaim;
|
|
__entry->range_cyclic = wbc->range_cyclic;
|
|
__entry->more_io = wbc->more_io;
|
|
__entry->older_than_this = wbc->older_than_this ?
|
|
*wbc->older_than_this : 0;
|
|
__entry->range_start = (long)wbc->range_start;
|
|
__entry->range_end = (long)wbc->range_end;
|
|
),
|
|
|
|
TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
|
|
"bgrd=%d reclm=%d cyclic=%d more=%d older=0x%lx "
|
|
"start=0x%lx end=0x%lx",
|
|
__entry->name,
|
|
__entry->nr_to_write,
|
|
__entry->pages_skipped,
|
|
__entry->sync_mode,
|
|
__entry->for_kupdate,
|
|
__entry->for_background,
|
|
__entry->for_reclaim,
|
|
__entry->range_cyclic,
|
|
__entry->more_io,
|
|
__entry->older_than_this,
|
|
__entry->range_start,
|
|
__entry->range_end)
|
|
)
|
|
|
|
#define DEFINE_WBC_EVENT(name) \
|
|
DEFINE_EVENT(wbc_class, name, \
|
|
TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \
|
|
TP_ARGS(wbc, bdi))
|
|
DEFINE_WBC_EVENT(wbc_writeback_start);
|
|
DEFINE_WBC_EVENT(wbc_writeback_written);
|
|
DEFINE_WBC_EVENT(wbc_writeback_wait);
|
|
DEFINE_WBC_EVENT(wbc_balance_dirty_start);
|
|
DEFINE_WBC_EVENT(wbc_balance_dirty_written);
|
|
DEFINE_WBC_EVENT(wbc_balance_dirty_wait);
|
|
DEFINE_WBC_EVENT(wbc_writepage);
|
|
|
|
DECLARE_EVENT_CLASS(writeback_congest_waited_template,
|
|
|
|
TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
|
|
|
|
TP_ARGS(usec_timeout, usec_delayed),
|
|
|
|
TP_STRUCT__entry(
|
|
__field( unsigned int, usec_timeout )
|
|
__field( unsigned int, usec_delayed )
|
|
),
|
|
|
|
TP_fast_assign(
|
|
__entry->usec_timeout = usec_timeout;
|
|
__entry->usec_delayed = usec_delayed;
|
|
),
|
|
|
|
TP_printk("usec_timeout=%u usec_delayed=%u",
|
|
__entry->usec_timeout,
|
|
__entry->usec_delayed)
|
|
);
|
|
|
|
DEFINE_EVENT(writeback_congest_waited_template, writeback_congestion_wait,
|
|
|
|
TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
|
|
|
|
TP_ARGS(usec_timeout, usec_delayed)
|
|
);
|
|
|
|
DEFINE_EVENT(writeback_congest_waited_template, writeback_wait_iff_congested,
|
|
|
|
TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
|
|
|
|
TP_ARGS(usec_timeout, usec_delayed)
|
|
);
|
|
|
|
#endif /* _TRACE_WRITEBACK_H */
|
|
|
|
/* This part must be outside protection */
|
|
#include <trace/define_trace.h>
|