mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-26 14:14:01 +08:00
d51d1e6450
The trace event trace_mm_vmscan_lru_shrink_inactive() currently has 12 parameters! Seven of them are from the reclaim_stat structure. This structure is currently local to mm/vmscan.c. By moving it to the global vmstat.h header, we can also reference it from the vmscan tracepoints. In moving it, it brings down the overhead of passing so many arguments to the trace event. In the future, we may limit the number of arguments that a trace event may pass (ideally just 6, but more realistically it may be 8). Before this patch, the code to call the trace event is this: 0f 83 aa fe ff ff jae ffffffff811e6261 <shrink_inactive_list+0x1e1> 48 8b 45 a0 mov -0x60(%rbp),%rax 45 8b 64 24 20 mov 0x20(%r12),%r12d 44 8b 6d d4 mov -0x2c(%rbp),%r13d 8b 4d d0 mov -0x30(%rbp),%ecx 44 8b 75 cc mov -0x34(%rbp),%r14d 44 8b 7d c8 mov -0x38(%rbp),%r15d 48 89 45 90 mov %rax,-0x70(%rbp) 8b 83 b8 fe ff ff mov -0x148(%rbx),%eax 8b 55 c0 mov -0x40(%rbp),%edx 8b 7d c4 mov -0x3c(%rbp),%edi 8b 75 b8 mov -0x48(%rbp),%esi 89 45 80 mov %eax,-0x80(%rbp) 65 ff 05 e4 f7 e2 7e incl %gs:0x7ee2f7e4(%rip) # 15bd0 <__preempt_count> 48 8b 05 75 5b 13 01 mov 0x1135b75(%rip),%rax # ffffffff8231bf68 <__tracepoint_mm_vmscan_lru_shrink_inactive+0x28> 48 85 c0 test %rax,%rax 74 72 je ffffffff811e646a <shrink_inactive_list+0x3ea> 48 89 c3 mov %rax,%rbx 4c 8b 10 mov (%rax),%r10 89 f8 mov %edi,%eax 48 89 85 68 ff ff ff mov %rax,-0x98(%rbp) 89 f0 mov %esi,%eax 48 89 85 60 ff ff ff mov %rax,-0xa0(%rbp) 89 c8 mov %ecx,%eax 48 89 85 78 ff ff ff mov %rax,-0x88(%rbp) 89 d0 mov %edx,%eax 48 89 85 70 ff ff ff mov %rax,-0x90(%rbp) 8b 45 8c mov -0x74(%rbp),%eax 48 8b 7b 08 mov 0x8(%rbx),%rdi 48 83 c3 18 add $0x18,%rbx 50 push %rax 41 54 push %r12 41 55 push %r13 ff b5 78 ff ff ff pushq -0x88(%rbp) 41 56 push %r14 41 57 push %r15 ff b5 70 ff ff ff pushq -0x90(%rbp) 4c 8b 8d 68 ff ff ff mov -0x98(%rbp),%r9 4c 8b 85 60 ff ff ff mov -0xa0(%rbp),%r8 48 8b 4d 98 mov -0x68(%rbp),%rcx 48 8b 55 90 mov -0x70(%rbp),%rdx 8b 75 80 mov -0x80(%rbp),%esi 41 ff d2 callq *%r10 After the patch: 0f 83 a8 fe ff ff jae ffffffff811e626d <shrink_inactive_list+0x1cd> 8b 9b b8 fe ff ff mov -0x148(%rbx),%ebx 45 8b 64 24 20 mov 0x20(%r12),%r12d 4c 8b 6d a0 mov -0x60(%rbp),%r13 65 ff 05 f5 f7 e2 7e incl %gs:0x7ee2f7f5(%rip) # 15bd0 <__preempt_count> 4c 8b 35 86 5b 13 01 mov 0x1135b86(%rip),%r14 # ffffffff8231bf68 <__tracepoint_mm_vmscan_lru_shrink_inactive+0x28> 4d 85 f6 test %r14,%r14 74 2a je ffffffff811e6411 <shrink_inactive_list+0x371> 49 8b 06 mov (%r14),%rax 8b 4d 8c mov -0x74(%rbp),%ecx 49 8b 7e 08 mov 0x8(%r14),%rdi 49 83 c6 18 add $0x18,%r14 4c 89 ea mov %r13,%rdx 45 89 e1 mov %r12d,%r9d 4c 8d 45 b8 lea -0x48(%rbp),%r8 89 de mov %ebx,%esi 51 push %rcx 48 8b 4d 98 mov -0x68(%rbp),%rcx ff d0 callq *%rax Link: http://lkml.kernel.org/r/2559d7cb-ec60-1200-2362-04fa34fd02bb@fb.com Link: http://lkml.kernel.org/r/20180322121003.4177af15@gandalf.local.home Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org> Reported-by: Alexei Starovoitov <ast@fb.com> Acked-by: David Rientjes <rientjes@google.com> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Alexei Starovoitov <ast@fb.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
390 lines
11 KiB
C
390 lines
11 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _LINUX_VMSTAT_H
|
|
#define _LINUX_VMSTAT_H
|
|
|
|
#include <linux/types.h>
|
|
#include <linux/percpu.h>
|
|
#include <linux/mmzone.h>
|
|
#include <linux/vm_event_item.h>
|
|
#include <linux/atomic.h>
|
|
#include <linux/static_key.h>
|
|
|
|
extern int sysctl_stat_interval;
|
|
|
|
#ifdef CONFIG_NUMA
|
|
#define ENABLE_NUMA_STAT 1
|
|
#define DISABLE_NUMA_STAT 0
|
|
extern int sysctl_vm_numa_stat;
|
|
DECLARE_STATIC_KEY_TRUE(vm_numa_stat_key);
|
|
extern int sysctl_vm_numa_stat_handler(struct ctl_table *table,
|
|
int write, void __user *buffer, size_t *length, loff_t *ppos);
|
|
#endif
|
|
|
|
struct reclaim_stat {
|
|
unsigned nr_dirty;
|
|
unsigned nr_unqueued_dirty;
|
|
unsigned nr_congested;
|
|
unsigned nr_writeback;
|
|
unsigned nr_immediate;
|
|
unsigned nr_activate;
|
|
unsigned nr_ref_keep;
|
|
unsigned nr_unmap_fail;
|
|
};
|
|
|
|
#ifdef CONFIG_VM_EVENT_COUNTERS
|
|
/*
|
|
* Light weight per cpu counter implementation.
|
|
*
|
|
* Counters should only be incremented and no critical kernel component
|
|
* should rely on the counter values.
|
|
*
|
|
* Counters are handled completely inline. On many platforms the code
|
|
* generated will simply be the increment of a global address.
|
|
*/
|
|
|
|
struct vm_event_state {
|
|
unsigned long event[NR_VM_EVENT_ITEMS];
|
|
};
|
|
|
|
DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
|
|
|
|
/*
|
|
* vm counters are allowed to be racy. Use raw_cpu_ops to avoid the
|
|
* local_irq_disable overhead.
|
|
*/
|
|
static inline void __count_vm_event(enum vm_event_item item)
|
|
{
|
|
raw_cpu_inc(vm_event_states.event[item]);
|
|
}
|
|
|
|
static inline void count_vm_event(enum vm_event_item item)
|
|
{
|
|
this_cpu_inc(vm_event_states.event[item]);
|
|
}
|
|
|
|
static inline void __count_vm_events(enum vm_event_item item, long delta)
|
|
{
|
|
raw_cpu_add(vm_event_states.event[item], delta);
|
|
}
|
|
|
|
static inline void count_vm_events(enum vm_event_item item, long delta)
|
|
{
|
|
this_cpu_add(vm_event_states.event[item], delta);
|
|
}
|
|
|
|
extern void all_vm_events(unsigned long *);
|
|
|
|
extern void vm_events_fold_cpu(int cpu);
|
|
|
|
#else
|
|
|
|
/* Disable counters */
|
|
static inline void count_vm_event(enum vm_event_item item)
|
|
{
|
|
}
|
|
static inline void count_vm_events(enum vm_event_item item, long delta)
|
|
{
|
|
}
|
|
static inline void __count_vm_event(enum vm_event_item item)
|
|
{
|
|
}
|
|
static inline void __count_vm_events(enum vm_event_item item, long delta)
|
|
{
|
|
}
|
|
static inline void all_vm_events(unsigned long *ret)
|
|
{
|
|
}
|
|
static inline void vm_events_fold_cpu(int cpu)
|
|
{
|
|
}
|
|
|
|
#endif /* CONFIG_VM_EVENT_COUNTERS */
|
|
|
|
#ifdef CONFIG_NUMA_BALANCING
|
|
#define count_vm_numa_event(x) count_vm_event(x)
|
|
#define count_vm_numa_events(x, y) count_vm_events(x, y)
|
|
#else
|
|
#define count_vm_numa_event(x) do {} while (0)
|
|
#define count_vm_numa_events(x, y) do { (void)(y); } while (0)
|
|
#endif /* CONFIG_NUMA_BALANCING */
|
|
|
|
#ifdef CONFIG_DEBUG_TLBFLUSH
|
|
#define count_vm_tlb_event(x) count_vm_event(x)
|
|
#define count_vm_tlb_events(x, y) count_vm_events(x, y)
|
|
#else
|
|
#define count_vm_tlb_event(x) do {} while (0)
|
|
#define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
|
|
#endif
|
|
|
|
#ifdef CONFIG_DEBUG_VM_VMACACHE
|
|
#define count_vm_vmacache_event(x) count_vm_event(x)
|
|
#else
|
|
#define count_vm_vmacache_event(x) do {} while (0)
|
|
#endif
|
|
|
|
#define __count_zid_vm_events(item, zid, delta) \
|
|
__count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta)
|
|
|
|
/*
|
|
* Zone and node-based page accounting with per cpu differentials.
|
|
*/
|
|
extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS];
|
|
extern atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS];
|
|
extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS];
|
|
|
|
#ifdef CONFIG_NUMA
|
|
static inline void zone_numa_state_add(long x, struct zone *zone,
|
|
enum numa_stat_item item)
|
|
{
|
|
atomic_long_add(x, &zone->vm_numa_stat[item]);
|
|
atomic_long_add(x, &vm_numa_stat[item]);
|
|
}
|
|
|
|
static inline unsigned long global_numa_state(enum numa_stat_item item)
|
|
{
|
|
long x = atomic_long_read(&vm_numa_stat[item]);
|
|
|
|
return x;
|
|
}
|
|
|
|
static inline unsigned long zone_numa_state_snapshot(struct zone *zone,
|
|
enum numa_stat_item item)
|
|
{
|
|
long x = atomic_long_read(&zone->vm_numa_stat[item]);
|
|
int cpu;
|
|
|
|
for_each_online_cpu(cpu)
|
|
x += per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item];
|
|
|
|
return x;
|
|
}
|
|
#endif /* CONFIG_NUMA */
|
|
|
|
static inline void zone_page_state_add(long x, struct zone *zone,
|
|
enum zone_stat_item item)
|
|
{
|
|
atomic_long_add(x, &zone->vm_stat[item]);
|
|
atomic_long_add(x, &vm_zone_stat[item]);
|
|
}
|
|
|
|
static inline void node_page_state_add(long x, struct pglist_data *pgdat,
|
|
enum node_stat_item item)
|
|
{
|
|
atomic_long_add(x, &pgdat->vm_stat[item]);
|
|
atomic_long_add(x, &vm_node_stat[item]);
|
|
}
|
|
|
|
static inline unsigned long global_zone_page_state(enum zone_stat_item item)
|
|
{
|
|
long x = atomic_long_read(&vm_zone_stat[item]);
|
|
#ifdef CONFIG_SMP
|
|
if (x < 0)
|
|
x = 0;
|
|
#endif
|
|
return x;
|
|
}
|
|
|
|
static inline unsigned long global_node_page_state(enum node_stat_item item)
|
|
{
|
|
long x = atomic_long_read(&vm_node_stat[item]);
|
|
#ifdef CONFIG_SMP
|
|
if (x < 0)
|
|
x = 0;
|
|
#endif
|
|
return x;
|
|
}
|
|
|
|
static inline unsigned long zone_page_state(struct zone *zone,
|
|
enum zone_stat_item item)
|
|
{
|
|
long x = atomic_long_read(&zone->vm_stat[item]);
|
|
#ifdef CONFIG_SMP
|
|
if (x < 0)
|
|
x = 0;
|
|
#endif
|
|
return x;
|
|
}
|
|
|
|
/*
|
|
* More accurate version that also considers the currently pending
|
|
* deltas. For that we need to loop over all cpus to find the current
|
|
* deltas. There is no synchronization so the result cannot be
|
|
* exactly accurate either.
|
|
*/
|
|
static inline unsigned long zone_page_state_snapshot(struct zone *zone,
|
|
enum zone_stat_item item)
|
|
{
|
|
long x = atomic_long_read(&zone->vm_stat[item]);
|
|
|
|
#ifdef CONFIG_SMP
|
|
int cpu;
|
|
for_each_online_cpu(cpu)
|
|
x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
|
|
|
|
if (x < 0)
|
|
x = 0;
|
|
#endif
|
|
return x;
|
|
}
|
|
|
|
#ifdef CONFIG_NUMA
|
|
extern void __inc_numa_state(struct zone *zone, enum numa_stat_item item);
|
|
extern unsigned long sum_zone_node_page_state(int node,
|
|
enum zone_stat_item item);
|
|
extern unsigned long sum_zone_numa_state(int node, enum numa_stat_item item);
|
|
extern unsigned long node_page_state(struct pglist_data *pgdat,
|
|
enum node_stat_item item);
|
|
#else
|
|
#define sum_zone_node_page_state(node, item) global_zone_page_state(item)
|
|
#define node_page_state(node, item) global_node_page_state(item)
|
|
#endif /* CONFIG_NUMA */
|
|
|
|
#define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
|
|
#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
|
|
#define add_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, __d)
|
|
#define sub_node_page_state(__p, __i, __d) mod_node_page_state(__p, __i, -(__d))
|
|
|
|
#ifdef CONFIG_SMP
|
|
void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long);
|
|
void __inc_zone_page_state(struct page *, enum zone_stat_item);
|
|
void __dec_zone_page_state(struct page *, enum zone_stat_item);
|
|
|
|
void __mod_node_page_state(struct pglist_data *, enum node_stat_item item, long);
|
|
void __inc_node_page_state(struct page *, enum node_stat_item);
|
|
void __dec_node_page_state(struct page *, enum node_stat_item);
|
|
|
|
void mod_zone_page_state(struct zone *, enum zone_stat_item, long);
|
|
void inc_zone_page_state(struct page *, enum zone_stat_item);
|
|
void dec_zone_page_state(struct page *, enum zone_stat_item);
|
|
|
|
void mod_node_page_state(struct pglist_data *, enum node_stat_item, long);
|
|
void inc_node_page_state(struct page *, enum node_stat_item);
|
|
void dec_node_page_state(struct page *, enum node_stat_item);
|
|
|
|
extern void inc_node_state(struct pglist_data *, enum node_stat_item);
|
|
extern void __inc_zone_state(struct zone *, enum zone_stat_item);
|
|
extern void __inc_node_state(struct pglist_data *, enum node_stat_item);
|
|
extern void dec_zone_state(struct zone *, enum zone_stat_item);
|
|
extern void __dec_zone_state(struct zone *, enum zone_stat_item);
|
|
extern void __dec_node_state(struct pglist_data *, enum node_stat_item);
|
|
|
|
void quiet_vmstat(void);
|
|
void cpu_vm_stats_fold(int cpu);
|
|
void refresh_zone_stat_thresholds(void);
|
|
|
|
struct ctl_table;
|
|
int vmstat_refresh(struct ctl_table *, int write,
|
|
void __user *buffer, size_t *lenp, loff_t *ppos);
|
|
|
|
void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
|
|
|
|
int calculate_pressure_threshold(struct zone *zone);
|
|
int calculate_normal_threshold(struct zone *zone);
|
|
void set_pgdat_percpu_threshold(pg_data_t *pgdat,
|
|
int (*calculate_pressure)(struct zone *));
|
|
#else /* CONFIG_SMP */
|
|
|
|
/*
|
|
* We do not maintain differentials in a single processor configuration.
|
|
* The functions directly modify the zone and global counters.
|
|
*/
|
|
static inline void __mod_zone_page_state(struct zone *zone,
|
|
enum zone_stat_item item, long delta)
|
|
{
|
|
zone_page_state_add(delta, zone, item);
|
|
}
|
|
|
|
static inline void __mod_node_page_state(struct pglist_data *pgdat,
|
|
enum node_stat_item item, int delta)
|
|
{
|
|
node_page_state_add(delta, pgdat, item);
|
|
}
|
|
|
|
static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
|
|
{
|
|
atomic_long_inc(&zone->vm_stat[item]);
|
|
atomic_long_inc(&vm_zone_stat[item]);
|
|
}
|
|
|
|
static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
|
|
{
|
|
atomic_long_inc(&pgdat->vm_stat[item]);
|
|
atomic_long_inc(&vm_node_stat[item]);
|
|
}
|
|
|
|
static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
|
|
{
|
|
atomic_long_dec(&zone->vm_stat[item]);
|
|
atomic_long_dec(&vm_zone_stat[item]);
|
|
}
|
|
|
|
static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
|
|
{
|
|
atomic_long_dec(&pgdat->vm_stat[item]);
|
|
atomic_long_dec(&vm_node_stat[item]);
|
|
}
|
|
|
|
static inline void __inc_zone_page_state(struct page *page,
|
|
enum zone_stat_item item)
|
|
{
|
|
__inc_zone_state(page_zone(page), item);
|
|
}
|
|
|
|
static inline void __inc_node_page_state(struct page *page,
|
|
enum node_stat_item item)
|
|
{
|
|
__inc_node_state(page_pgdat(page), item);
|
|
}
|
|
|
|
|
|
static inline void __dec_zone_page_state(struct page *page,
|
|
enum zone_stat_item item)
|
|
{
|
|
__dec_zone_state(page_zone(page), item);
|
|
}
|
|
|
|
static inline void __dec_node_page_state(struct page *page,
|
|
enum node_stat_item item)
|
|
{
|
|
__dec_node_state(page_pgdat(page), item);
|
|
}
|
|
|
|
|
|
/*
|
|
* We only use atomic operations to update counters. So there is no need to
|
|
* disable interrupts.
|
|
*/
|
|
#define inc_zone_page_state __inc_zone_page_state
|
|
#define dec_zone_page_state __dec_zone_page_state
|
|
#define mod_zone_page_state __mod_zone_page_state
|
|
|
|
#define inc_node_page_state __inc_node_page_state
|
|
#define dec_node_page_state __dec_node_page_state
|
|
#define mod_node_page_state __mod_node_page_state
|
|
|
|
#define inc_zone_state __inc_zone_state
|
|
#define inc_node_state __inc_node_state
|
|
#define dec_zone_state __dec_zone_state
|
|
|
|
#define set_pgdat_percpu_threshold(pgdat, callback) { }
|
|
|
|
static inline void refresh_zone_stat_thresholds(void) { }
|
|
static inline void cpu_vm_stats_fold(int cpu) { }
|
|
static inline void quiet_vmstat(void) { }
|
|
|
|
static inline void drain_zonestat(struct zone *zone,
|
|
struct per_cpu_pageset *pset) { }
|
|
#endif /* CONFIG_SMP */
|
|
|
|
static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
|
|
int migratetype)
|
|
{
|
|
__mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
|
|
if (is_migrate_cma(migratetype))
|
|
__mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
|
|
}
|
|
|
|
extern const char * const vmstat_text[];
|
|
|
|
#endif /* _LINUX_VMSTAT_H */
|