2019-05-27 14:55:01 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
2013-04-11 20:34:43 +08:00
|
|
|
/* Internal procfs definitions
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
|
|
|
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
|
|
|
|
* Written by David Howells (dhowells@redhat.com)
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/proc_fs.h>
|
2013-04-11 20:34:43 +08:00
|
|
|
#include <linux/proc_ns.h>
|
2018-04-11 07:32:14 +08:00
|
|
|
#include <linux/refcount.h>
|
2013-04-11 20:34:43 +08:00
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/atomic.h>
|
2013-02-28 09:03:15 +08:00
|
|
|
#include <linux/binfmts.h>
|
2017-02-09 01:51:30 +08:00
|
|
|
#include <linux/sched/coredump.h>
|
2017-02-06 17:57:33 +08:00
|
|
|
#include <linux/sched/task.h>
|
2024-06-07 20:23:56 +08:00
|
|
|
#include <linux/mm.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-04-11 20:34:43 +08:00
|
|
|
struct ctl_table_header;
|
|
|
|
struct mempolicy;
|
2007-02-14 16:34:12 +08:00
|
|
|
|
2013-04-11 20:34:43 +08:00
|
|
|
/*
|
|
|
|
* This is not completely implemented yet. The idea is to
|
|
|
|
* create an in-memory tree (like the actual /proc filesystem
|
|
|
|
* tree) of these proc_dir_entries, so that we can dynamically
|
|
|
|
* add new files to /proc.
|
|
|
|
*
|
2014-12-11 07:45:01 +08:00
|
|
|
* parent/subdir are used for the directory structure (every /proc file has a
|
|
|
|
* parent, but "subdir" is empty for all non-directory entries).
|
|
|
|
* subdir_node is used to build the rb tree "subdir" of the parent.
|
2013-04-11 20:34:43 +08:00
|
|
|
*/
|
|
|
|
struct proc_dir_entry {
|
fs/proc/internal.h: rearrange struct proc_dir_entry
struct proc_dir_entry became bit messy over years:
* move 16-bit ->mode_t before namelen to get rid of padding
* make ->in_use first field: it seems to be most used resulting in
smaller code on x86_64 (defconfig):
add/remove: 0/0 grow/shrink: 7/13 up/down: 24/-67 (-43)
Function old new delta
proc_readdir_de 451 455 +4
proc_get_inode 282 286 +4
pde_put 65 69 +4
remove_proc_subtree 294 297 +3
remove_proc_entry 297 300 +3
proc_register 295 298 +3
proc_notify_change 94 97 +3
unuse_pde 27 26 -1
proc_reg_write 89 85 -4
proc_reg_unlocked_ioctl 85 81 -4
proc_reg_read 89 85 -4
proc_reg_llseek 87 83 -4
proc_reg_get_unmapped_area 123 119 -4
proc_entry_rundown 139 135 -4
proc_reg_poll 91 85 -6
proc_reg_mmap 79 73 -6
proc_get_link 55 49 -6
proc_reg_release 108 101 -7
proc_reg_open 298 291 -7
close_pdeo 228 218 -10
* move writeable fields together to a first cacheline (on x86_64),
those include
* ->in_use: reference count, taken every open/read/write/close etc
* ->count: reference count, taken at readdir on every entry
* ->pde_openers: tracks (nearly) every open, dirtied
* ->pde_unload_lock: spinlock protecting ->pde_openers
* ->proc_iops, ->proc_fops, ->data: writeonce fields,
used right together with previous group.
* other rarely written fields go into 1st/2nd and 2nd/3rd cacheline on
32-bit and 64-bit respectively.
Additionally on 32-bit, ->subdir, ->subdir_node, ->namelen, ->name go
fully into 2nd cacheline, separated from writeable fields. They are all
used during lookup.
Link: http://lkml.kernel.org/r/20171220215914.GA7877@avx2
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-02-07 07:37:18 +08:00
|
|
|
/*
|
|
|
|
* number of callers into module in progress;
|
|
|
|
* negative -> it's going away RSN
|
|
|
|
*/
|
|
|
|
atomic_t in_use;
|
2018-04-11 07:32:14 +08:00
|
|
|
refcount_t refcnt;
|
fs/proc/internal.h: rearrange struct proc_dir_entry
struct proc_dir_entry became bit messy over years:
* move 16-bit ->mode_t before namelen to get rid of padding
* make ->in_use first field: it seems to be most used resulting in
smaller code on x86_64 (defconfig):
add/remove: 0/0 grow/shrink: 7/13 up/down: 24/-67 (-43)
Function old new delta
proc_readdir_de 451 455 +4
proc_get_inode 282 286 +4
pde_put 65 69 +4
remove_proc_subtree 294 297 +3
remove_proc_entry 297 300 +3
proc_register 295 298 +3
proc_notify_change 94 97 +3
unuse_pde 27 26 -1
proc_reg_write 89 85 -4
proc_reg_unlocked_ioctl 85 81 -4
proc_reg_read 89 85 -4
proc_reg_llseek 87 83 -4
proc_reg_get_unmapped_area 123 119 -4
proc_entry_rundown 139 135 -4
proc_reg_poll 91 85 -6
proc_reg_mmap 79 73 -6
proc_get_link 55 49 -6
proc_reg_release 108 101 -7
proc_reg_open 298 291 -7
close_pdeo 228 218 -10
* move writeable fields together to a first cacheline (on x86_64),
those include
* ->in_use: reference count, taken every open/read/write/close etc
* ->count: reference count, taken at readdir on every entry
* ->pde_openers: tracks (nearly) every open, dirtied
* ->pde_unload_lock: spinlock protecting ->pde_openers
* ->proc_iops, ->proc_fops, ->data: writeonce fields,
used right together with previous group.
* other rarely written fields go into 1st/2nd and 2nd/3rd cacheline on
32-bit and 64-bit respectively.
Additionally on 32-bit, ->subdir, ->subdir_node, ->namelen, ->name go
fully into 2nd cacheline, separated from writeable fields. They are all
used during lookup.
Link: http://lkml.kernel.org/r/20171220215914.GA7877@avx2
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-02-07 07:37:18 +08:00
|
|
|
struct list_head pde_openers; /* who did ->open, but not ->release */
|
2018-02-07 07:37:21 +08:00
|
|
|
/* protects ->pde_openers and all struct pde_opener instances */
|
|
|
|
spinlock_t pde_unload_lock;
|
fs/proc/internal.h: rearrange struct proc_dir_entry
struct proc_dir_entry became bit messy over years:
* move 16-bit ->mode_t before namelen to get rid of padding
* make ->in_use first field: it seems to be most used resulting in
smaller code on x86_64 (defconfig):
add/remove: 0/0 grow/shrink: 7/13 up/down: 24/-67 (-43)
Function old new delta
proc_readdir_de 451 455 +4
proc_get_inode 282 286 +4
pde_put 65 69 +4
remove_proc_subtree 294 297 +3
remove_proc_entry 297 300 +3
proc_register 295 298 +3
proc_notify_change 94 97 +3
unuse_pde 27 26 -1
proc_reg_write 89 85 -4
proc_reg_unlocked_ioctl 85 81 -4
proc_reg_read 89 85 -4
proc_reg_llseek 87 83 -4
proc_reg_get_unmapped_area 123 119 -4
proc_entry_rundown 139 135 -4
proc_reg_poll 91 85 -6
proc_reg_mmap 79 73 -6
proc_get_link 55 49 -6
proc_reg_release 108 101 -7
proc_reg_open 298 291 -7
close_pdeo 228 218 -10
* move writeable fields together to a first cacheline (on x86_64),
those include
* ->in_use: reference count, taken every open/read/write/close etc
* ->count: reference count, taken at readdir on every entry
* ->pde_openers: tracks (nearly) every open, dirtied
* ->pde_unload_lock: spinlock protecting ->pde_openers
* ->proc_iops, ->proc_fops, ->data: writeonce fields,
used right together with previous group.
* other rarely written fields go into 1st/2nd and 2nd/3rd cacheline on
32-bit and 64-bit respectively.
Additionally on 32-bit, ->subdir, ->subdir_node, ->namelen, ->name go
fully into 2nd cacheline, separated from writeable fields. They are all
used during lookup.
Link: http://lkml.kernel.org/r/20171220215914.GA7877@avx2
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-02-07 07:37:18 +08:00
|
|
|
struct completion *pde_unload_completion;
|
|
|
|
const struct inode_operations *proc_iops;
|
proc: decouple proc from VFS with "struct proc_ops"
Currently core /proc code uses "struct file_operations" for custom hooks,
however, VFS doesn't directly call them. Every time VFS expands
file_operations hook set, /proc code bloats for no reason.
Introduce "struct proc_ops" which contains only those hooks which /proc
allows to call into (open, release, read, write, ioctl, mmap, poll). It
doesn't contain module pointer as well.
Save ~184 bytes per usage:
add/remove: 26/26 grow/shrink: 1/4 up/down: 1922/-6674 (-4752)
Function old new delta
sysvipc_proc_ops - 72 +72
...
config_gz_proc_ops - 72 +72
proc_get_inode 289 339 +50
proc_reg_get_unmapped_area 110 107 -3
close_pdeo 227 224 -3
proc_reg_open 289 284 -5
proc_create_data 60 53 -7
rt_cpu_seq_fops 256 - -256
...
default_affinity_proc_fops 256 - -256
Total: Before=5430095, After=5425343, chg -0.09%
Link: http://lkml.kernel.org/r/20191225172228.GA13378@avx2
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-02-04 09:37:14 +08:00
|
|
|
union {
|
|
|
|
const struct proc_ops *proc_ops;
|
|
|
|
const struct file_operations *proc_dir_ops;
|
|
|
|
};
|
2019-02-02 06:20:01 +08:00
|
|
|
const struct dentry_operations *proc_dops;
|
2018-05-15 21:57:23 +08:00
|
|
|
union {
|
|
|
|
const struct seq_operations *seq_ops;
|
|
|
|
int (*single_show)(struct seq_file *, void *);
|
|
|
|
};
|
2018-05-18 18:46:15 +08:00
|
|
|
proc_write_t write;
|
fs/proc/internal.h: rearrange struct proc_dir_entry
struct proc_dir_entry became bit messy over years:
* move 16-bit ->mode_t before namelen to get rid of padding
* make ->in_use first field: it seems to be most used resulting in
smaller code on x86_64 (defconfig):
add/remove: 0/0 grow/shrink: 7/13 up/down: 24/-67 (-43)
Function old new delta
proc_readdir_de 451 455 +4
proc_get_inode 282 286 +4
pde_put 65 69 +4
remove_proc_subtree 294 297 +3
remove_proc_entry 297 300 +3
proc_register 295 298 +3
proc_notify_change 94 97 +3
unuse_pde 27 26 -1
proc_reg_write 89 85 -4
proc_reg_unlocked_ioctl 85 81 -4
proc_reg_read 89 85 -4
proc_reg_llseek 87 83 -4
proc_reg_get_unmapped_area 123 119 -4
proc_entry_rundown 139 135 -4
proc_reg_poll 91 85 -6
proc_reg_mmap 79 73 -6
proc_get_link 55 49 -6
proc_reg_release 108 101 -7
proc_reg_open 298 291 -7
close_pdeo 228 218 -10
* move writeable fields together to a first cacheline (on x86_64),
those include
* ->in_use: reference count, taken every open/read/write/close etc
* ->count: reference count, taken at readdir on every entry
* ->pde_openers: tracks (nearly) every open, dirtied
* ->pde_unload_lock: spinlock protecting ->pde_openers
* ->proc_iops, ->proc_fops, ->data: writeonce fields,
used right together with previous group.
* other rarely written fields go into 1st/2nd and 2nd/3rd cacheline on
32-bit and 64-bit respectively.
Additionally on 32-bit, ->subdir, ->subdir_node, ->namelen, ->name go
fully into 2nd cacheline, separated from writeable fields. They are all
used during lookup.
Link: http://lkml.kernel.org/r/20171220215914.GA7877@avx2
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-02-07 07:37:18 +08:00
|
|
|
void *data;
|
2018-04-24 23:05:17 +08:00
|
|
|
unsigned int state_size;
|
2013-04-11 20:34:43 +08:00
|
|
|
unsigned int low_ino;
|
|
|
|
nlink_t nlink;
|
|
|
|
kuid_t uid;
|
|
|
|
kgid_t gid;
|
|
|
|
loff_t size;
|
2014-12-11 07:45:01 +08:00
|
|
|
struct proc_dir_entry *parent;
|
2018-04-11 07:32:20 +08:00
|
|
|
struct rb_root subdir;
|
2014-12-11 07:45:01 +08:00
|
|
|
struct rb_node subdir_node;
|
2018-04-11 07:31:52 +08:00
|
|
|
char *name;
|
fs/proc/internal.h: rearrange struct proc_dir_entry
struct proc_dir_entry became bit messy over years:
* move 16-bit ->mode_t before namelen to get rid of padding
* make ->in_use first field: it seems to be most used resulting in
smaller code on x86_64 (defconfig):
add/remove: 0/0 grow/shrink: 7/13 up/down: 24/-67 (-43)
Function old new delta
proc_readdir_de 451 455 +4
proc_get_inode 282 286 +4
pde_put 65 69 +4
remove_proc_subtree 294 297 +3
remove_proc_entry 297 300 +3
proc_register 295 298 +3
proc_notify_change 94 97 +3
unuse_pde 27 26 -1
proc_reg_write 89 85 -4
proc_reg_unlocked_ioctl 85 81 -4
proc_reg_read 89 85 -4
proc_reg_llseek 87 83 -4
proc_reg_get_unmapped_area 123 119 -4
proc_entry_rundown 139 135 -4
proc_reg_poll 91 85 -6
proc_reg_mmap 79 73 -6
proc_get_link 55 49 -6
proc_reg_release 108 101 -7
proc_reg_open 298 291 -7
close_pdeo 228 218 -10
* move writeable fields together to a first cacheline (on x86_64),
those include
* ->in_use: reference count, taken every open/read/write/close etc
* ->count: reference count, taken at readdir on every entry
* ->pde_openers: tracks (nearly) every open, dirtied
* ->pde_unload_lock: spinlock protecting ->pde_openers
* ->proc_iops, ->proc_fops, ->data: writeonce fields,
used right together with previous group.
* other rarely written fields go into 1st/2nd and 2nd/3rd cacheline on
32-bit and 64-bit respectively.
Additionally on 32-bit, ->subdir, ->subdir_node, ->namelen, ->name go
fully into 2nd cacheline, separated from writeable fields. They are all
used during lookup.
Link: http://lkml.kernel.org/r/20171220215914.GA7877@avx2
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-02-07 07:37:18 +08:00
|
|
|
umode_t mode;
|
proc: faster open/read/close with "permanent" files
Now that "struct proc_ops" exist we can start putting there stuff which
could not fly with VFS "struct file_operations"...
Most of fs/proc/inode.c file is dedicated to make open/read/.../close
reliable in the event of disappearing /proc entries which usually happens
if module is getting removed. Files like /proc/cpuinfo which never
disappear simply do not need such protection.
Save 2 atomic ops, 1 allocation, 1 free per open/read/close sequence for such
"permanent" files.
Enable "permanent" flag for
/proc/cpuinfo
/proc/kmsg
/proc/modules
/proc/slabinfo
/proc/stat
/proc/sysvipc/*
/proc/swaps
More will come once I figure out foolproof way to prevent out module
authors from marking their stuff "permanent" for performance reasons
when it is not.
This should help with scalability: benchmark is "read /proc/cpuinfo R times
by N threads scattered over the system".
N R t, s (before) t, s (after)
-----------------------------------------------------
64 4096 1.582458 1.530502 -3.2%
256 4096 6.371926 6.125168 -3.9%
1024 4096 25.64888 24.47528 -4.6%
Benchmark source:
#include <chrono>
#include <iostream>
#include <thread>
#include <vector>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
const int NR_CPUS = sysconf(_SC_NPROCESSORS_ONLN);
int N;
const char *filename;
int R;
int xxx = 0;
int glue(int n)
{
cpu_set_t m;
CPU_ZERO(&m);
CPU_SET(n, &m);
return sched_setaffinity(0, sizeof(cpu_set_t), &m);
}
void f(int n)
{
glue(n % NR_CPUS);
while (*(volatile int *)&xxx == 0) {
}
for (int i = 0; i < R; i++) {
int fd = open(filename, O_RDONLY);
char buf[4096];
ssize_t rv = read(fd, buf, sizeof(buf));
asm volatile ("" :: "g" (rv));
close(fd);
}
}
int main(int argc, char *argv[])
{
if (argc < 4) {
std::cerr << "usage: " << argv[0] << ' ' << "N /proc/filename R
";
return 1;
}
N = atoi(argv[1]);
filename = argv[2];
R = atoi(argv[3]);
for (int i = 0; i < NR_CPUS; i++) {
if (glue(i) == 0)
break;
}
std::vector<std::thread> T;
T.reserve(N);
for (int i = 0; i < N; i++) {
T.emplace_back(f, i);
}
auto t0 = std::chrono::system_clock::now();
{
*(volatile int *)&xxx = 1;
for (auto& t: T) {
t.join();
}
}
auto t1 = std::chrono::system_clock::now();
std::chrono::duration<double> dt = t1 - t0;
std::cout << dt.count() << '
';
return 0;
}
P.S.:
Explicit randomization marker is added because adding non-function pointer
will silently disable structure layout randomization.
[akpm@linux-foundation.org: coding style fixes]
Reported-by: kbuild test robot <lkp@intel.com>
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Joe Perches <joe@perches.com>
Link: http://lkml.kernel.org/r/20200222201539.GA22576@avx2
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-04-07 11:09:01 +08:00
|
|
|
u8 flags;
|
2013-04-11 20:34:43 +08:00
|
|
|
u8 namelen;
|
2018-06-14 02:43:19 +08:00
|
|
|
char inline_name[];
|
2016-10-28 16:22:25 +08:00
|
|
|
} __randomize_layout;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2018-08-22 12:54:09 +08:00
|
|
|
#define SIZEOF_PDE ( \
|
|
|
|
sizeof(struct proc_dir_entry) < 128 ? 128 : \
|
|
|
|
sizeof(struct proc_dir_entry) < 192 ? 192 : \
|
|
|
|
sizeof(struct proc_dir_entry) < 256 ? 256 : \
|
|
|
|
sizeof(struct proc_dir_entry) < 512 ? 512 : \
|
|
|
|
0)
|
|
|
|
#define SIZEOF_PDE_INLINE_NAME (SIZEOF_PDE - sizeof(struct proc_dir_entry))
|
2018-06-14 02:43:19 +08:00
|
|
|
|
proc: faster open/read/close with "permanent" files
Now that "struct proc_ops" exist we can start putting there stuff which
could not fly with VFS "struct file_operations"...
Most of fs/proc/inode.c file is dedicated to make open/read/.../close
reliable in the event of disappearing /proc entries which usually happens
if module is getting removed. Files like /proc/cpuinfo which never
disappear simply do not need such protection.
Save 2 atomic ops, 1 allocation, 1 free per open/read/close sequence for such
"permanent" files.
Enable "permanent" flag for
/proc/cpuinfo
/proc/kmsg
/proc/modules
/proc/slabinfo
/proc/stat
/proc/sysvipc/*
/proc/swaps
More will come once I figure out foolproof way to prevent out module
authors from marking their stuff "permanent" for performance reasons
when it is not.
This should help with scalability: benchmark is "read /proc/cpuinfo R times
by N threads scattered over the system".
N R t, s (before) t, s (after)
-----------------------------------------------------
64 4096 1.582458 1.530502 -3.2%
256 4096 6.371926 6.125168 -3.9%
1024 4096 25.64888 24.47528 -4.6%
Benchmark source:
#include <chrono>
#include <iostream>
#include <thread>
#include <vector>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
const int NR_CPUS = sysconf(_SC_NPROCESSORS_ONLN);
int N;
const char *filename;
int R;
int xxx = 0;
int glue(int n)
{
cpu_set_t m;
CPU_ZERO(&m);
CPU_SET(n, &m);
return sched_setaffinity(0, sizeof(cpu_set_t), &m);
}
void f(int n)
{
glue(n % NR_CPUS);
while (*(volatile int *)&xxx == 0) {
}
for (int i = 0; i < R; i++) {
int fd = open(filename, O_RDONLY);
char buf[4096];
ssize_t rv = read(fd, buf, sizeof(buf));
asm volatile ("" :: "g" (rv));
close(fd);
}
}
int main(int argc, char *argv[])
{
if (argc < 4) {
std::cerr << "usage: " << argv[0] << ' ' << "N /proc/filename R
";
return 1;
}
N = atoi(argv[1]);
filename = argv[2];
R = atoi(argv[3]);
for (int i = 0; i < NR_CPUS; i++) {
if (glue(i) == 0)
break;
}
std::vector<std::thread> T;
T.reserve(N);
for (int i = 0; i < N; i++) {
T.emplace_back(f, i);
}
auto t0 = std::chrono::system_clock::now();
{
*(volatile int *)&xxx = 1;
for (auto& t: T) {
t.join();
}
}
auto t1 = std::chrono::system_clock::now();
std::chrono::duration<double> dt = t1 - t0;
std::cout << dt.count() << '
';
return 0;
}
P.S.:
Explicit randomization marker is added because adding non-function pointer
will silently disable structure layout randomization.
[akpm@linux-foundation.org: coding style fixes]
Reported-by: kbuild test robot <lkp@intel.com>
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Joe Perches <joe@perches.com>
Link: http://lkml.kernel.org/r/20200222201539.GA22576@avx2
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-04-07 11:09:01 +08:00
|
|
|
static inline bool pde_is_permanent(const struct proc_dir_entry *pde)
|
|
|
|
{
|
|
|
|
return pde->flags & PROC_ENTRY_PERMANENT;
|
|
|
|
}
|
|
|
|
|
2022-09-21 01:35:23 +08:00
|
|
|
static inline void pde_make_permanent(struct proc_dir_entry *pde)
|
|
|
|
{
|
|
|
|
pde->flags |= PROC_ENTRY_PERMANENT;
|
|
|
|
}
|
|
|
|
|
2018-04-11 07:31:52 +08:00
|
|
|
extern struct kmem_cache *proc_dir_entry_cache;
|
|
|
|
void pde_free(struct proc_dir_entry *pde);
|
|
|
|
|
2013-04-11 20:34:43 +08:00
|
|
|
union proc_op {
|
|
|
|
int (*proc_get_link)(struct dentry *, struct path *);
|
|
|
|
int (*proc_show)(struct seq_file *m,
|
|
|
|
struct pid_namespace *ns, struct pid *pid,
|
|
|
|
struct task_struct *task);
|
2023-09-13 04:56:48 +08:00
|
|
|
int lsmid;
|
2013-04-11 20:34:43 +08:00
|
|
|
};
|
2006-06-26 15:25:55 +08:00
|
|
|
|
2013-04-11 20:34:43 +08:00
|
|
|
struct proc_inode {
|
2011-05-25 08:12:48 +08:00
|
|
|
struct pid *pid;
|
2016-09-02 05:42:02 +08:00
|
|
|
unsigned int fd;
|
2013-04-11 20:34:43 +08:00
|
|
|
union proc_op op;
|
|
|
|
struct proc_dir_entry *pde;
|
|
|
|
struct ctl_table_header *sysctl;
|
|
|
|
struct ctl_table *sysctl_entry;
|
2020-02-20 07:17:34 +08:00
|
|
|
struct hlist_node sibling_inodes;
|
2014-11-01 23:10:28 +08:00
|
|
|
const struct proc_ns_operations *ns_ops;
|
2013-04-11 20:34:43 +08:00
|
|
|
struct inode vfs_inode;
|
2016-10-28 16:22:25 +08:00
|
|
|
} __randomize_layout;
|
2011-05-25 08:12:48 +08:00
|
|
|
|
2013-04-13 01:03:36 +08:00
|
|
|
/*
|
|
|
|
* General functions
|
|
|
|
*/
|
|
|
|
static inline struct proc_inode *PROC_I(const struct inode *inode)
|
|
|
|
{
|
|
|
|
return container_of(inode, struct proc_inode, vfs_inode);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct proc_dir_entry *PDE(const struct inode *inode)
|
|
|
|
{
|
|
|
|
return PROC_I(inode)->pde;
|
|
|
|
}
|
|
|
|
|
2018-08-22 12:54:37 +08:00
|
|
|
static inline struct pid *proc_pid(const struct inode *inode)
|
2006-06-26 15:25:55 +08:00
|
|
|
{
|
2006-06-26 15:25:56 +08:00
|
|
|
return PROC_I(inode)->pid;
|
2006-06-26 15:25:55 +08:00
|
|
|
}
|
|
|
|
|
2018-08-22 12:54:37 +08:00
|
|
|
static inline struct task_struct *get_proc_task(const struct inode *inode)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2006-06-26 15:25:56 +08:00
|
|
|
return get_pid_task(proc_pid(inode), PIDTYPE_PID);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2017-10-01 02:45:42 +08:00
|
|
|
void task_dump_owner(struct task_struct *task, umode_t mode,
|
2017-01-03 05:23:11 +08:00
|
|
|
kuid_t *ruid, kgid_t *rgid);
|
2012-08-23 18:43:24 +08:00
|
|
|
|
2017-11-18 07:26:49 +08:00
|
|
|
unsigned name_to_int(const struct qstr *qstr);
|
2013-04-12 08:08:50 +08:00
|
|
|
/*
|
2013-04-11 20:34:43 +08:00
|
|
|
* Offset of the first process in the /proc root directory..
|
2013-04-12 08:08:50 +08:00
|
|
|
*/
|
2013-04-11 20:34:43 +08:00
|
|
|
#define FIRST_PROCESS_ENTRY 256
|
2013-04-12 08:08:50 +08:00
|
|
|
|
2013-04-11 20:34:43 +08:00
|
|
|
/* Worst case buffer size needed for holding an integer. */
|
|
|
|
#define PROC_NUMBUF 13
|
2008-07-25 16:48:29 +08:00
|
|
|
|
2024-06-07 20:23:56 +08:00
|
|
|
/**
|
|
|
|
* folio_precise_page_mapcount() - Number of mappings of this folio page.
|
|
|
|
* @folio: The folio.
|
|
|
|
* @page: The page.
|
|
|
|
*
|
|
|
|
* The number of present user page table entries that reference this page
|
|
|
|
* as tracked via the RMAP: either referenced directly (PTE) or as part of
|
|
|
|
* a larger area that covers this page (e.g., PMD).
|
|
|
|
*
|
|
|
|
* Use this function only for the calculation of existing statistics
|
|
|
|
* (USS, PSS, mapcount_max) and for debugging purposes (/proc/kpagecount).
|
|
|
|
*
|
|
|
|
* Do not add new users.
|
|
|
|
*
|
|
|
|
* Returns: The number of mappings of this folio page. 0 for
|
|
|
|
* folios that are not mapped to user space or are not tracked via the RMAP
|
|
|
|
* (e.g., shared zeropage).
|
|
|
|
*/
|
|
|
|
static inline int folio_precise_page_mapcount(struct folio *folio,
|
|
|
|
struct page *page)
|
|
|
|
{
|
|
|
|
int mapcount = atomic_read(&page->_mapcount) + 1;
|
|
|
|
|
2024-08-22 01:39:10 +08:00
|
|
|
if (page_mapcount_is_type(mapcount))
|
2024-06-07 20:23:56 +08:00
|
|
|
mapcount = 0;
|
|
|
|
if (folio_test_large(folio))
|
|
|
|
mapcount += folio_entire_mapcount(folio);
|
|
|
|
|
|
|
|
return mapcount;
|
|
|
|
}
|
|
|
|
|
2013-04-11 20:34:43 +08:00
|
|
|
/*
|
|
|
|
* array.c
|
|
|
|
*/
|
|
|
|
extern const struct file_operations proc_tid_children_operations;
|
2009-04-08 01:19:18 +08:00
|
|
|
|
2018-05-18 23:47:13 +08:00
|
|
|
extern void proc_task_name(struct seq_file *m, struct task_struct *p,
|
|
|
|
bool escape);
|
2013-04-11 20:34:43 +08:00
|
|
|
extern int proc_tid_stat(struct seq_file *, struct pid_namespace *,
|
|
|
|
struct pid *, struct task_struct *);
|
|
|
|
extern int proc_tgid_stat(struct seq_file *, struct pid_namespace *,
|
|
|
|
struct pid *, struct task_struct *);
|
|
|
|
extern int proc_pid_status(struct seq_file *, struct pid_namespace *,
|
|
|
|
struct pid *, struct task_struct *);
|
|
|
|
extern int proc_pid_statm(struct seq_file *, struct pid_namespace *,
|
|
|
|
struct pid *, struct task_struct *);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* base.c
|
|
|
|
*/
|
|
|
|
extern const struct dentry_operations pid_dentry_operations;
|
2023-01-13 19:49:12 +08:00
|
|
|
extern int pid_getattr(struct mnt_idmap *, const struct path *,
|
2021-01-21 21:19:43 +08:00
|
|
|
struct kstat *, u32, unsigned int);
|
2023-01-13 19:49:11 +08:00
|
|
|
extern int proc_setattr(struct mnt_idmap *, struct dentry *,
|
2021-01-21 21:19:43 +08:00
|
|
|
struct iattr *);
|
2020-02-20 08:22:26 +08:00
|
|
|
extern void proc_pid_evict_inode(struct proc_inode *);
|
2016-11-11 05:18:28 +08:00
|
|
|
extern struct inode *proc_pid_make_inode(struct super_block *, struct task_struct *, umode_t);
|
2018-05-03 09:26:16 +08:00
|
|
|
extern void pid_update_inode(struct task_struct *, struct inode *);
|
2013-04-11 20:34:43 +08:00
|
|
|
extern int pid_delete_dentry(const struct dentry *);
|
2013-05-17 00:07:31 +08:00
|
|
|
extern int proc_pid_readdir(struct file *, struct dir_context *);
|
2019-03-06 07:50:29 +08:00
|
|
|
struct dentry *proc_pid_lookup(struct dentry *, unsigned int);
|
2013-04-11 20:34:43 +08:00
|
|
|
extern loff_t mem_lseek(struct file *, loff_t, int);
|
2013-04-04 07:07:30 +08:00
|
|
|
|
2013-04-11 20:34:43 +08:00
|
|
|
/* Lookups */
|
2018-05-03 21:21:05 +08:00
|
|
|
typedef struct dentry *instantiate_t(struct dentry *,
|
2013-04-11 20:34:43 +08:00
|
|
|
struct task_struct *, const void *);
|
2018-06-08 08:10:10 +08:00
|
|
|
bool proc_fill_cache(struct file *, struct dir_context *, const char *, unsigned int,
|
2013-04-11 20:34:43 +08:00
|
|
|
instantiate_t, struct task_struct *, const void *);
|
2009-04-08 01:19:18 +08:00
|
|
|
|
2013-04-11 20:34:43 +08:00
|
|
|
/*
|
|
|
|
* generic.c
|
|
|
|
*/
|
2018-04-24 23:08:36 +08:00
|
|
|
struct proc_dir_entry *proc_create_reg(const char *name, umode_t mode,
|
|
|
|
struct proc_dir_entry **parent, void *data);
|
2018-04-24 23:00:52 +08:00
|
|
|
struct proc_dir_entry *proc_register(struct proc_dir_entry *dir,
|
|
|
|
struct proc_dir_entry *dp);
|
2013-04-11 20:34:43 +08:00
|
|
|
extern struct dentry *proc_lookup(struct inode *, struct dentry *, unsigned int);
|
2018-02-07 07:37:31 +08:00
|
|
|
struct dentry *proc_lookup_de(struct inode *, struct dentry *, struct proc_dir_entry *);
|
2013-05-17 00:07:31 +08:00
|
|
|
extern int proc_readdir(struct file *, struct dir_context *);
|
2018-02-07 07:37:31 +08:00
|
|
|
int proc_readdir_de(struct file *, struct dir_context *, struct proc_dir_entry *);
|
2009-04-08 01:19:18 +08:00
|
|
|
|
2020-12-16 12:42:42 +08:00
|
|
|
static inline void pde_get(struct proc_dir_entry *pde)
|
2009-12-16 08:45:39 +08:00
|
|
|
{
|
2018-04-11 07:32:14 +08:00
|
|
|
refcount_inc(&pde->refcnt);
|
2009-12-16 08:45:39 +08:00
|
|
|
}
|
2013-04-11 20:34:43 +08:00
|
|
|
extern void pde_put(struct proc_dir_entry *);
|
2009-04-08 01:19:18 +08:00
|
|
|
|
2015-05-12 05:44:25 +08:00
|
|
|
static inline bool is_empty_pde(const struct proc_dir_entry *pde)
|
|
|
|
{
|
|
|
|
return S_ISDIR(pde->mode) && !pde->proc_iops;
|
|
|
|
}
|
2018-05-18 18:46:15 +08:00
|
|
|
extern ssize_t proc_simple_write(struct file *, const char __user *, size_t, loff_t *);
|
2015-05-12 05:44:25 +08:00
|
|
|
|
2009-04-08 01:19:18 +08:00
|
|
|
/*
|
2013-04-11 20:34:43 +08:00
|
|
|
* inode.c
|
2009-04-08 01:19:18 +08:00
|
|
|
*/
|
2013-04-11 20:34:43 +08:00
|
|
|
struct pde_opener {
|
|
|
|
struct list_head lh;
|
2019-12-05 08:50:05 +08:00
|
|
|
struct file *file;
|
2016-12-13 08:45:17 +08:00
|
|
|
bool closing;
|
2013-04-11 20:34:43 +08:00
|
|
|
struct completion *c;
|
2018-04-11 07:31:05 +08:00
|
|
|
} __randomize_layout;
|
2015-02-22 11:16:11 +08:00
|
|
|
extern const struct inode_operations proc_link_inode_operations;
|
2013-04-11 20:34:43 +08:00
|
|
|
extern const struct inode_operations proc_pid_link_inode_operations;
|
2018-11-02 07:07:25 +08:00
|
|
|
extern const struct super_operations proc_sops;
|
2010-03-08 08:41:34 +08:00
|
|
|
|
2018-04-11 07:31:09 +08:00
|
|
|
void proc_init_kmemcache(void);
|
2020-02-21 22:43:23 +08:00
|
|
|
void proc_invalidate_siblings_dcache(struct hlist_head *inodes, spinlock_t *lock);
|
2016-12-13 08:45:32 +08:00
|
|
|
void set_proc_pid_nlink(void);
|
2013-04-11 20:34:43 +08:00
|
|
|
extern struct inode *proc_get_inode(struct super_block *, struct proc_dir_entry *);
|
|
|
|
extern void proc_entry_rundown(struct proc_dir_entry *);
|
2010-03-08 08:41:34 +08:00
|
|
|
|
2013-04-11 20:34:43 +08:00
|
|
|
/*
|
|
|
|
* proc_namespaces.c
|
|
|
|
*/
|
2010-03-08 08:41:34 +08:00
|
|
|
extern const struct inode_operations proc_ns_dir_inode_operations;
|
|
|
|
extern const struct file_operations proc_ns_dir_operations;
|
|
|
|
|
2013-04-11 20:34:43 +08:00
|
|
|
/*
|
|
|
|
* proc_net.c
|
|
|
|
*/
|
|
|
|
extern const struct file_operations proc_net_operations;
|
|
|
|
extern const struct inode_operations proc_net_inode_operations;
|
|
|
|
|
|
|
|
#ifdef CONFIG_NET
|
|
|
|
extern int proc_net_init(void);
|
|
|
|
#else
|
|
|
|
static inline int proc_net_init(void) { return 0; }
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* proc_self.c
|
|
|
|
*/
|
2013-03-30 07:27:05 +08:00
|
|
|
extern int proc_setup_self(struct super_block *);
|
2013-04-12 09:29:19 +08:00
|
|
|
|
2014-07-31 18:10:50 +08:00
|
|
|
/*
|
|
|
|
* proc_thread_self.c
|
|
|
|
*/
|
|
|
|
extern int proc_setup_thread_self(struct super_block *);
|
|
|
|
extern void proc_thread_self_init(void);
|
|
|
|
|
2013-04-12 09:29:19 +08:00
|
|
|
/*
|
2013-04-11 20:34:43 +08:00
|
|
|
* proc_sysctl.c
|
2013-04-12 09:29:19 +08:00
|
|
|
*/
|
2013-04-11 20:34:43 +08:00
|
|
|
#ifdef CONFIG_PROC_SYSCTL
|
|
|
|
extern int proc_sys_init(void);
|
2017-02-10 15:35:02 +08:00
|
|
|
extern void proc_sys_evict_inode(struct inode *inode,
|
|
|
|
struct ctl_table_header *head);
|
2013-04-11 20:34:43 +08:00
|
|
|
#else
|
|
|
|
static inline void proc_sys_init(void) { }
|
2017-02-10 15:35:02 +08:00
|
|
|
static inline void proc_sys_evict_inode(struct inode *inode,
|
|
|
|
struct ctl_table_header *head) { }
|
2013-04-11 20:34:43 +08:00
|
|
|
#endif
|
2013-04-12 09:29:19 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* proc_tty.c
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_TTY
|
|
|
|
extern void proc_tty_init(void);
|
|
|
|
#else
|
|
|
|
static inline void proc_tty_init(void) {}
|
|
|
|
#endif
|
2013-04-11 20:34:43 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* root.c
|
|
|
|
*/
|
|
|
|
extern struct proc_dir_entry proc_root;
|
|
|
|
|
|
|
|
extern void proc_self_init(void);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* task_[no]mmu.c
|
|
|
|
*/
|
mm: add /proc/pid/smaps_rollup
/proc/pid/smaps_rollup is a new proc file that improves the performance
of user programs that determine aggregate memory statistics (e.g., total
PSS) of a process.
Android regularly "samples" the memory usage of various processes in
order to balance its memory pool sizes. This sampling process involves
opening /proc/pid/smaps and summing certain fields. For very large
processes, sampling memory use this way can take several hundred
milliseconds, due mostly to the overhead of the seq_printf calls in
task_mmu.c.
smaps_rollup improves the situation. It contains most of the fields of
/proc/pid/smaps, but instead of a set of fields for each VMA,
smaps_rollup instead contains one synthetic smaps-format entry
representing the whole process. In the single smaps_rollup synthetic
entry, each field is the summation of the corresponding field in all of
the real-smaps VMAs. Using a common format for smaps_rollup and smaps
allows userspace parsers to repurpose parsers meant for use with
non-rollup smaps for smaps_rollup, and it allows userspace to switch
between smaps_rollup and smaps at runtime (say, based on the
availability of smaps_rollup in a given kernel) with minimal fuss.
By using smaps_rollup instead of smaps, a caller can avoid the
significant overhead of formatting, reading, and parsing each of a large
process's potentially very numerous memory mappings. For sampling
system_server's PSS in Android, we measured a 12x speedup, representing
a savings of several hundred milliseconds.
One alternative to a new per-process proc file would have been including
PSS information in /proc/pid/status. We considered this option but
thought that PSS would be too expensive (by a few orders of magnitude)
to collect relative to what's already emitted as part of
/proc/pid/status, and slowing every user of /proc/pid/status for the
sake of readers that happen to want PSS feels wrong.
The code itself works by reusing the existing VMA-walking framework we
use for regular smaps generation and keeping the mem_size_stats
structure around between VMA walks instead of using a fresh one for each
VMA. In this way, summation happens automatically. We let seq_file
walk over the VMAs just as it does for regular smaps and just emit
nothing to the seq_file until we hit the last VMA.
Benchmarks:
using smaps:
iterations:1000 pid:1163 pss:220023808
0m29.46s real 0m08.28s user 0m20.98s system
using smaps_rollup:
iterations:1000 pid:1163 pss:220702720
0m04.39s real 0m00.03s user 0m04.31s system
We're using the PSS samples we collect asynchronously for
system-management tasks like fine-tuning oom_adj_score, memory use
tracking for debugging, application-level memory-use attribution, and
deciding whether we want to kill large processes during system idle
maintenance windows. Android has been using PSS for these purposes for
a long time; as the average process VMA count has increased and and
devices become more efficiency-conscious, PSS-collection inefficiency
has started to matter more. IMHO, it'd be a lot safer to optimize the
existing PSS-collection model, which has been fine-tuned over the years,
instead of changing the memory tracking approach entirely to work around
smaps-generation inefficiency.
Tim said:
: There are two main reasons why Android gathers PSS information:
:
: 1. Android devices can show the user the amount of memory used per
: application via the settings app. This is a less important use case.
:
: 2. We log PSS to help identify leaks in applications. We have found
: an enormous number of bugs (in the Android platform, in Google's own
: apps, and in third-party applications) using this data.
:
: To do this, system_server (the main process in Android userspace) will
: sample the PSS of a process three seconds after it changes state (for
: example, app is launched and becomes the foreground application) and about
: every ten minutes after that. The net result is that PSS collection is
: regularly running on at least one process in the system (usually a few
: times a minute while the screen is on, less when screen is off due to
: suspend). PSS of a process is an incredibly useful stat to track, and we
: aren't going to get rid of it. We've looked at some very hacky approaches
: using RSS ("take the RSS of the target process, subtract the RSS of the
: zygote process that is the parent of all Android apps") to reduce the
: accounting time, but it regularly overestimated the memory used by 20+
: percent. Accordingly, I don't think that there's a good alternative to
: using PSS.
:
: We started looking into PSS collection performance after we noticed random
: frequency spikes while a phone's screen was off; occasionally, one of the
: CPU clusters would ramp to a high frequency because there was 200-300ms of
: constant CPU work from a single thread in the main Android userspace
: process. The work causing the spike (which is reasonable governor
: behavior given the amount of CPU time needed) was always PSS collection.
: As a result, Android is burning more power than we should be on PSS
: collection.
:
: The other issue (and why I'm less sure about improving smaps as a
: long-term solution) is that the number of VMAs per process has increased
: significantly from release to release. After trying to figure out why we
: were seeing these 200-300ms PSS collection times on Android O but had not
: noticed it in previous versions, we found that the number of VMAs in the
: main system process increased by 50% from Android N to Android O (from
: ~1800 to ~2700) and varying increases in every userspace process. Android
: M to N also had an increase in the number of VMAs, although not as much.
: I'm not sure why this is increasing so much over time, but thinking about
: ASLR and ways to make ASLR better, I expect that this will continue to
: increase going forward. I would not be surprised if we hit 5000 VMAs on
: the main Android process (system_server) by 2020.
:
: If we assume that the number of VMAs is going to increase over time, then
: doing anything we can do to reduce the overhead of each VMA during PSS
: collection seems like the right way to go, and that means outputting an
: aggregate statistic (to avoid whatever overhead there is per line in
: writing smaps and in reading each line from userspace).
Link: http://lkml.kernel.org/r/20170812022148.178293-1-dancol@google.com
Signed-off-by: Daniel Colascione <dancol@google.com>
Cc: Tim Murray <timmurray@google.com>
Cc: Joel Fernandes <joelaf@google.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Randy Dunlap <rdunlap@infradead.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Sonny Rao <sonnyrao@chromium.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2017-09-07 07:25:08 +08:00
|
|
|
struct mem_size_stats;
|
2013-04-11 20:34:43 +08:00
|
|
|
struct proc_maps_private {
|
2014-10-10 06:25:51 +08:00
|
|
|
struct inode *inode;
|
2013-04-11 20:34:43 +08:00
|
|
|
struct task_struct *task;
|
2014-10-10 06:25:26 +08:00
|
|
|
struct mm_struct *mm;
|
2022-09-07 03:48:57 +08:00
|
|
|
struct vma_iterator iter;
|
2013-04-11 20:34:43 +08:00
|
|
|
#ifdef CONFIG_NUMA
|
|
|
|
struct mempolicy *task_mempolicy;
|
|
|
|
#endif
|
2016-10-28 16:22:25 +08:00
|
|
|
} __randomize_layout;
|
2013-04-11 20:34:43 +08:00
|
|
|
|
2014-10-10 06:25:24 +08:00
|
|
|
struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode);
|
|
|
|
|
2013-04-11 20:34:43 +08:00
|
|
|
extern const struct file_operations proc_pid_maps_operations;
|
|
|
|
extern const struct file_operations proc_pid_numa_maps_operations;
|
|
|
|
extern const struct file_operations proc_pid_smaps_operations;
|
mm: add /proc/pid/smaps_rollup
/proc/pid/smaps_rollup is a new proc file that improves the performance
of user programs that determine aggregate memory statistics (e.g., total
PSS) of a process.
Android regularly "samples" the memory usage of various processes in
order to balance its memory pool sizes. This sampling process involves
opening /proc/pid/smaps and summing certain fields. For very large
processes, sampling memory use this way can take several hundred
milliseconds, due mostly to the overhead of the seq_printf calls in
task_mmu.c.
smaps_rollup improves the situation. It contains most of the fields of
/proc/pid/smaps, but instead of a set of fields for each VMA,
smaps_rollup instead contains one synthetic smaps-format entry
representing the whole process. In the single smaps_rollup synthetic
entry, each field is the summation of the corresponding field in all of
the real-smaps VMAs. Using a common format for smaps_rollup and smaps
allows userspace parsers to repurpose parsers meant for use with
non-rollup smaps for smaps_rollup, and it allows userspace to switch
between smaps_rollup and smaps at runtime (say, based on the
availability of smaps_rollup in a given kernel) with minimal fuss.
By using smaps_rollup instead of smaps, a caller can avoid the
significant overhead of formatting, reading, and parsing each of a large
process's potentially very numerous memory mappings. For sampling
system_server's PSS in Android, we measured a 12x speedup, representing
a savings of several hundred milliseconds.
One alternative to a new per-process proc file would have been including
PSS information in /proc/pid/status. We considered this option but
thought that PSS would be too expensive (by a few orders of magnitude)
to collect relative to what's already emitted as part of
/proc/pid/status, and slowing every user of /proc/pid/status for the
sake of readers that happen to want PSS feels wrong.
The code itself works by reusing the existing VMA-walking framework we
use for regular smaps generation and keeping the mem_size_stats
structure around between VMA walks instead of using a fresh one for each
VMA. In this way, summation happens automatically. We let seq_file
walk over the VMAs just as it does for regular smaps and just emit
nothing to the seq_file until we hit the last VMA.
Benchmarks:
using smaps:
iterations:1000 pid:1163 pss:220023808
0m29.46s real 0m08.28s user 0m20.98s system
using smaps_rollup:
iterations:1000 pid:1163 pss:220702720
0m04.39s real 0m00.03s user 0m04.31s system
We're using the PSS samples we collect asynchronously for
system-management tasks like fine-tuning oom_adj_score, memory use
tracking for debugging, application-level memory-use attribution, and
deciding whether we want to kill large processes during system idle
maintenance windows. Android has been using PSS for these purposes for
a long time; as the average process VMA count has increased and and
devices become more efficiency-conscious, PSS-collection inefficiency
has started to matter more. IMHO, it'd be a lot safer to optimize the
existing PSS-collection model, which has been fine-tuned over the years,
instead of changing the memory tracking approach entirely to work around
smaps-generation inefficiency.
Tim said:
: There are two main reasons why Android gathers PSS information:
:
: 1. Android devices can show the user the amount of memory used per
: application via the settings app. This is a less important use case.
:
: 2. We log PSS to help identify leaks in applications. We have found
: an enormous number of bugs (in the Android platform, in Google's own
: apps, and in third-party applications) using this data.
:
: To do this, system_server (the main process in Android userspace) will
: sample the PSS of a process three seconds after it changes state (for
: example, app is launched and becomes the foreground application) and about
: every ten minutes after that. The net result is that PSS collection is
: regularly running on at least one process in the system (usually a few
: times a minute while the screen is on, less when screen is off due to
: suspend). PSS of a process is an incredibly useful stat to track, and we
: aren't going to get rid of it. We've looked at some very hacky approaches
: using RSS ("take the RSS of the target process, subtract the RSS of the
: zygote process that is the parent of all Android apps") to reduce the
: accounting time, but it regularly overestimated the memory used by 20+
: percent. Accordingly, I don't think that there's a good alternative to
: using PSS.
:
: We started looking into PSS collection performance after we noticed random
: frequency spikes while a phone's screen was off; occasionally, one of the
: CPU clusters would ramp to a high frequency because there was 200-300ms of
: constant CPU work from a single thread in the main Android userspace
: process. The work causing the spike (which is reasonable governor
: behavior given the amount of CPU time needed) was always PSS collection.
: As a result, Android is burning more power than we should be on PSS
: collection.
:
: The other issue (and why I'm less sure about improving smaps as a
: long-term solution) is that the number of VMAs per process has increased
: significantly from release to release. After trying to figure out why we
: were seeing these 200-300ms PSS collection times on Android O but had not
: noticed it in previous versions, we found that the number of VMAs in the
: main system process increased by 50% from Android N to Android O (from
: ~1800 to ~2700) and varying increases in every userspace process. Android
: M to N also had an increase in the number of VMAs, although not as much.
: I'm not sure why this is increasing so much over time, but thinking about
: ASLR and ways to make ASLR better, I expect that this will continue to
: increase going forward. I would not be surprised if we hit 5000 VMAs on
: the main Android process (system_server) by 2020.
:
: If we assume that the number of VMAs is going to increase over time, then
: doing anything we can do to reduce the overhead of each VMA during PSS
: collection seems like the right way to go, and that means outputting an
: aggregate statistic (to avoid whatever overhead there is per line in
: writing smaps and in reading each line from userspace).
Link: http://lkml.kernel.org/r/20170812022148.178293-1-dancol@google.com
Signed-off-by: Daniel Colascione <dancol@google.com>
Cc: Tim Murray <timmurray@google.com>
Cc: Joel Fernandes <joelaf@google.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Randy Dunlap <rdunlap@infradead.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Sonny Rao <sonnyrao@chromium.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2017-09-07 07:25:08 +08:00
|
|
|
extern const struct file_operations proc_pid_smaps_rollup_operations;
|
2013-04-11 20:34:43 +08:00
|
|
|
extern const struct file_operations proc_clear_refs_operations;
|
|
|
|
extern const struct file_operations proc_pagemap_operations;
|
|
|
|
|
|
|
|
extern unsigned long task_vsize(struct mm_struct *);
|
|
|
|
extern unsigned long task_statm(struct mm_struct *,
|
|
|
|
unsigned long *, unsigned long *,
|
|
|
|
unsigned long *, unsigned long *);
|
|
|
|
extern void task_mem(struct seq_file *, struct mm_struct *);
|
2020-12-16 12:42:39 +08:00
|
|
|
|
|
|
|
extern const struct dentry_operations proc_net_dentry_ops;
|
|
|
|
static inline void pde_force_lookup(struct proc_dir_entry *pde)
|
|
|
|
{
|
|
|
|
/* /proc/net/ entries can be changed under us by setns(CLONE_NEWNET) */
|
|
|
|
pde->proc_dops = &proc_net_dentry_ops;
|
|
|
|
}
|
2024-08-07 00:02:29 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Add a new procfs dentry that can't serve as a mountpoint. That should
|
|
|
|
* encompass anything that is ephemeral and can just disappear while the
|
|
|
|
* process is still around.
|
|
|
|
*/
|
|
|
|
static inline struct dentry *proc_splice_unmountable(struct inode *inode,
|
|
|
|
struct dentry *dentry, const struct dentry_operations *d_ops)
|
|
|
|
{
|
|
|
|
d_set_d_op(dentry, d_ops);
|
|
|
|
dont_mount(dentry);
|
|
|
|
return d_splice_alias(inode, dentry);
|
|
|
|
}
|