mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-27 06:04:23 +08:00
617a814f14
this pull request are: "Align kvrealloc() with krealloc()" from Danilo Krummrich. Adds consistency to the APIs and behaviour of these two core allocation functions. This also simplifies/enables Rustification. "Some cleanups for shmem" from Baolin Wang. No functional changes - mode code reuse, better function naming, logic simplifications. "mm: some small page fault cleanups" from Josef Bacik. No functional changes - code cleanups only. "Various memory tiering fixes" from Zi Yan. A small fix and a little cleanup. "mm/swap: remove boilerplate" from Yu Zhao. Code cleanups and simplifications and .text shrinkage. "Kernel stack usage histogram" from Pasha Tatashin and Shakeel Butt. This is a feature, it adds new feilds to /proc/vmstat such as $ grep kstack /proc/vmstat kstack_1k 3 kstack_2k 188 kstack_4k 11391 kstack_8k 243 kstack_16k 0 which tells us that 11391 processes used 4k of stack while none at all used 16k. Useful for some system tuning things, but partivularly useful for "the dynamic kernel stack project". "kmemleak: support for percpu memory leak detect" from Pavel Tikhomirov. Teaches kmemleak to detect leaksage of percpu memory. "mm: memcg: page counters optimizations" from Roman Gushchin. "3 independent small optimizations of page counters". "mm: split PTE/PMD PT table Kconfig cleanups+clarifications" from David Hildenbrand. Improves PTE/PMD splitlock detection, makes powerpc/8xx work correctly by design rather than by accident. "mm: remove arch_make_page_accessible()" from David Hildenbrand. Some folio conversions which make arch_make_page_accessible() unneeded. "mm, memcg: cg2 memory{.swap,}.peak write handlers" fro David Finkel. Cleans up and fixes our handling of the resetting of the cgroup/process peak-memory-use detector. "Make core VMA operations internal and testable" from Lorenzo Stoakes. Rationalizaion and encapsulation of the VMA manipulation APIs. With a view to better enable testing of the VMA functions, even from a userspace-only harness. "mm: zswap: fixes for global shrinker" from Takero Funaki. Fix issues in the zswap global shrinker, resulting in improved performance. "mm: print the promo watermark in zoneinfo" from Kaiyang Zhao. Fill in some missing info in /proc/zoneinfo. "mm: replace follow_page() by folio_walk" from David Hildenbrand. Code cleanups and rationalizations (conversion to folio_walk()) resulting in the removal of follow_page(). "improving dynamic zswap shrinker protection scheme" from Nhat Pham. Some tuning to improve zswap's dynamic shrinker. Significant reductions in swapin and improvements in performance are shown. "mm: Fix several issues with unaccepted memory" from Kirill Shutemov. Improvements to the new unaccepted memory feature, "mm/mprotect: Fix dax puds" from Peter Xu. Implements mprotect on DAX PUDs. This was missing, although nobody seems to have notied yet. "Introduce a store type enum for the Maple tree" from Sidhartha Kumar. Cleanups and modest performance improvements for the maple tree library code. "memcg: further decouple v1 code from v2" from Shakeel Butt. Move more cgroup v1 remnants away from the v2 memcg code. "memcg: initiate deprecation of v1 features" from Shakeel Butt. Adds various warnings telling users that memcg v1 features are deprecated. "mm: swap: mTHP swap allocator base on swap cluster order" from Chris Li. Greatly improves the success rate of the mTHP swap allocation. "mm: introduce numa_memblks" from Mike Rapoport. Moves various disparate per-arch implementations of numa_memblk code into generic code. "mm: batch free swaps for zap_pte_range()" from Barry Song. Greatly improves the performance of munmap() of swap-filled ptes. "support large folio swap-out and swap-in for shmem" from Baolin Wang. With this series we no longer split shmem large folios into simgle-page folios when swapping out shmem. "mm/hugetlb: alloc/free gigantic folios" from Yu Zhao. Nice performance improvements and code reductions for gigantic folios. "support shmem mTHP collapse" from Baolin Wang. Adds support for khugepaged's collapsing of shmem mTHP folios. "mm: Optimize mseal checks" from Pedro Falcato. Fixes an mprotect() performance regression due to the addition of mseal(). "Increase the number of bits available in page_type" from Matthew Wilcox. Increases the number of bits available in page_type! "Simplify the page flags a little" from Matthew Wilcox. Many legacy page flags are now folio flags, so the page-based flags and their accessors/mutators can be removed. "mm: store zero pages to be swapped out in a bitmap" from Usama Arif. An optimization which permits us to avoid writing/reading zero-filled zswap pages to backing store. "Avoid MAP_FIXED gap exposure" from Liam Howlett. Fixes a race window which occurs when a MAP_FIXED operqtion is occurring during an unrelated vma tree walk. "mm: remove vma_merge()" from Lorenzo Stoakes. Major rotorooting of the vma_merge() functionality, making ot cleaner, more testable and better tested. "misc fixups for DAMON {self,kunit} tests" from SeongJae Park. Minor fixups of DAMON selftests and kunit tests. "mm: memory_hotplug: improve do_migrate_range()" from Kefeng Wang. Code cleanups and folio conversions. "Shmem mTHP controls and stats improvements" from Ryan Roberts. Cleanups for shmem controls and stats. "mm: count the number of anonymous THPs per size" from Barry Song. Expose additional anon THP stats to userspace for improved tuning. "mm: finish isolate/putback_lru_page()" from Kefeng Wang: more folio conversions and removal of now-unused page-based APIs. "replace per-quota region priorities histogram buffer with per-context one" from SeongJae Park. DAMON histogram rationalization. "Docs/damon: update GitHub repo URLs and maintainer-profile" from SeongJae Park. DAMON documentation updates. "mm/vdpa: correct misuse of non-direct-reclaim __GFP_NOFAIL and improve related doc and warn" from Jason Wang: fixes usage of page allocator __GFP_NOFAIL and GFP_ATOMIC flags. "mm: split underused THPs" from Yu Zhao. Improve THP=always policy - this was overprovisioning THPs in sparsely accessed memory areas. "zram: introduce custom comp backends API" frm Sergey Senozhatsky. Add support for zram run-time compression algorithm tuning. "mm: Care about shadow stack guard gap when getting an unmapped area" from Mark Brown. Fix up the various arch_get_unmapped_area() implementations to better respect guard areas. "Improve mem_cgroup_iter()" from Kinsey Ho. Improve the reliability of mem_cgroup_iter() and various code cleanups. "mm: Support huge pfnmaps" from Peter Xu. Extends the usage of huge pfnmap support. "resource: Fix region_intersects() vs add_memory_driver_managed()" from Huang Ying. Fix a bug in region_intersects() for systems with CXL memory. "mm: hwpoison: two more poison recovery" from Kefeng Wang. Teaches a couple more code paths to correctly recover from the encountering of poisoned memry. "mm: enable large folios swap-in support" from Barry Song. Support the swapin of mTHP memory into appropriately-sized folios, rather than into single-page folios. -----BEGIN PGP SIGNATURE----- iHUEABYIAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCZu1BBwAKCRDdBJ7gKXxA jlWNAQDYlqQLun7bgsAN4sSvi27VUuWv1q70jlMXTfmjJAvQqwD/fBFVR6IOOiw7 AkDbKWP2k0hWPiNJBGwoqxdHHx09Xgo= =s0T+ -----END PGP SIGNATURE----- Merge tag 'mm-stable-2024-09-20-02-31' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Pull MM updates from Andrew Morton: "Along with the usual shower of singleton patches, notable patch series in this pull request are: - "Align kvrealloc() with krealloc()" from Danilo Krummrich. Adds consistency to the APIs and behaviour of these two core allocation functions. This also simplifies/enables Rustification. - "Some cleanups for shmem" from Baolin Wang. No functional changes - mode code reuse, better function naming, logic simplifications. - "mm: some small page fault cleanups" from Josef Bacik. No functional changes - code cleanups only. - "Various memory tiering fixes" from Zi Yan. A small fix and a little cleanup. - "mm/swap: remove boilerplate" from Yu Zhao. Code cleanups and simplifications and .text shrinkage. - "Kernel stack usage histogram" from Pasha Tatashin and Shakeel Butt. This is a feature, it adds new feilds to /proc/vmstat such as $ grep kstack /proc/vmstat kstack_1k 3 kstack_2k 188 kstack_4k 11391 kstack_8k 243 kstack_16k 0 which tells us that 11391 processes used 4k of stack while none at all used 16k. Useful for some system tuning things, but partivularly useful for "the dynamic kernel stack project". - "kmemleak: support for percpu memory leak detect" from Pavel Tikhomirov. Teaches kmemleak to detect leaksage of percpu memory. - "mm: memcg: page counters optimizations" from Roman Gushchin. "3 independent small optimizations of page counters". - "mm: split PTE/PMD PT table Kconfig cleanups+clarifications" from David Hildenbrand. Improves PTE/PMD splitlock detection, makes powerpc/8xx work correctly by design rather than by accident. - "mm: remove arch_make_page_accessible()" from David Hildenbrand. Some folio conversions which make arch_make_page_accessible() unneeded. - "mm, memcg: cg2 memory{.swap,}.peak write handlers" fro David Finkel. Cleans up and fixes our handling of the resetting of the cgroup/process peak-memory-use detector. - "Make core VMA operations internal and testable" from Lorenzo Stoakes. Rationalizaion and encapsulation of the VMA manipulation APIs. With a view to better enable testing of the VMA functions, even from a userspace-only harness. - "mm: zswap: fixes for global shrinker" from Takero Funaki. Fix issues in the zswap global shrinker, resulting in improved performance. - "mm: print the promo watermark in zoneinfo" from Kaiyang Zhao. Fill in some missing info in /proc/zoneinfo. - "mm: replace follow_page() by folio_walk" from David Hildenbrand. Code cleanups and rationalizations (conversion to folio_walk()) resulting in the removal of follow_page(). - "improving dynamic zswap shrinker protection scheme" from Nhat Pham. Some tuning to improve zswap's dynamic shrinker. Significant reductions in swapin and improvements in performance are shown. - "mm: Fix several issues with unaccepted memory" from Kirill Shutemov. Improvements to the new unaccepted memory feature, - "mm/mprotect: Fix dax puds" from Peter Xu. Implements mprotect on DAX PUDs. This was missing, although nobody seems to have notied yet. - "Introduce a store type enum for the Maple tree" from Sidhartha Kumar. Cleanups and modest performance improvements for the maple tree library code. - "memcg: further decouple v1 code from v2" from Shakeel Butt. Move more cgroup v1 remnants away from the v2 memcg code. - "memcg: initiate deprecation of v1 features" from Shakeel Butt. Adds various warnings telling users that memcg v1 features are deprecated. - "mm: swap: mTHP swap allocator base on swap cluster order" from Chris Li. Greatly improves the success rate of the mTHP swap allocation. - "mm: introduce numa_memblks" from Mike Rapoport. Moves various disparate per-arch implementations of numa_memblk code into generic code. - "mm: batch free swaps for zap_pte_range()" from Barry Song. Greatly improves the performance of munmap() of swap-filled ptes. - "support large folio swap-out and swap-in for shmem" from Baolin Wang. With this series we no longer split shmem large folios into simgle-page folios when swapping out shmem. - "mm/hugetlb: alloc/free gigantic folios" from Yu Zhao. Nice performance improvements and code reductions for gigantic folios. - "support shmem mTHP collapse" from Baolin Wang. Adds support for khugepaged's collapsing of shmem mTHP folios. - "mm: Optimize mseal checks" from Pedro Falcato. Fixes an mprotect() performance regression due to the addition of mseal(). - "Increase the number of bits available in page_type" from Matthew Wilcox. Increases the number of bits available in page_type! - "Simplify the page flags a little" from Matthew Wilcox. Many legacy page flags are now folio flags, so the page-based flags and their accessors/mutators can be removed. - "mm: store zero pages to be swapped out in a bitmap" from Usama Arif. An optimization which permits us to avoid writing/reading zero-filled zswap pages to backing store. - "Avoid MAP_FIXED gap exposure" from Liam Howlett. Fixes a race window which occurs when a MAP_FIXED operqtion is occurring during an unrelated vma tree walk. - "mm: remove vma_merge()" from Lorenzo Stoakes. Major rotorooting of the vma_merge() functionality, making ot cleaner, more testable and better tested. - "misc fixups for DAMON {self,kunit} tests" from SeongJae Park. Minor fixups of DAMON selftests and kunit tests. - "mm: memory_hotplug: improve do_migrate_range()" from Kefeng Wang. Code cleanups and folio conversions. - "Shmem mTHP controls and stats improvements" from Ryan Roberts. Cleanups for shmem controls and stats. - "mm: count the number of anonymous THPs per size" from Barry Song. Expose additional anon THP stats to userspace for improved tuning. - "mm: finish isolate/putback_lru_page()" from Kefeng Wang: more folio conversions and removal of now-unused page-based APIs. - "replace per-quota region priorities histogram buffer with per-context one" from SeongJae Park. DAMON histogram rationalization. - "Docs/damon: update GitHub repo URLs and maintainer-profile" from SeongJae Park. DAMON documentation updates. - "mm/vdpa: correct misuse of non-direct-reclaim __GFP_NOFAIL and improve related doc and warn" from Jason Wang: fixes usage of page allocator __GFP_NOFAIL and GFP_ATOMIC flags. - "mm: split underused THPs" from Yu Zhao. Improve THP=always policy. This was overprovisioning THPs in sparsely accessed memory areas. - "zram: introduce custom comp backends API" frm Sergey Senozhatsky. Add support for zram run-time compression algorithm tuning. - "mm: Care about shadow stack guard gap when getting an unmapped area" from Mark Brown. Fix up the various arch_get_unmapped_area() implementations to better respect guard areas. - "Improve mem_cgroup_iter()" from Kinsey Ho. Improve the reliability of mem_cgroup_iter() and various code cleanups. - "mm: Support huge pfnmaps" from Peter Xu. Extends the usage of huge pfnmap support. - "resource: Fix region_intersects() vs add_memory_driver_managed()" from Huang Ying. Fix a bug in region_intersects() for systems with CXL memory. - "mm: hwpoison: two more poison recovery" from Kefeng Wang. Teaches a couple more code paths to correctly recover from the encountering of poisoned memry. - "mm: enable large folios swap-in support" from Barry Song. Support the swapin of mTHP memory into appropriately-sized folios, rather than into single-page folios" * tag 'mm-stable-2024-09-20-02-31' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (416 commits) zram: free secondary algorithms names uprobes: turn xol_area->pages[2] into xol_area->page uprobes: introduce the global struct vm_special_mapping xol_mapping Revert "uprobes: use vm_special_mapping close() functionality" mm: support large folios swap-in for sync io devices mm: add nr argument in mem_cgroup_swapin_uncharge_swap() helper to support large folios mm: fix swap_read_folio_zeromap() for large folios with partial zeromap mm/debug_vm_pgtable: Use pxdp_get() for accessing page table entries set_memory: add __must_check to generic stubs mm/vma: return the exact errno in vms_gather_munmap_vmas() memcg: cleanup with !CONFIG_MEMCG_V1 mm/show_mem.c: report alloc tags in human readable units mm: support poison recovery from copy_present_page() mm: support poison recovery from do_cow_fault() resource, kunit: add test case for region_intersects() resource: make alloc_free_mem_region() works for iomem_resource mm: z3fold: deprecate CONFIG_Z3FOLD vfio/pci: implement huge_fault support mm/arm64: support large pfn mappings mm/x86: support large pfn mappings ...
364 lines
10 KiB
C
364 lines
10 KiB
C
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
/* Internal procfs definitions
|
|
*
|
|
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
|
|
* Written by David Howells (dhowells@redhat.com)
|
|
*/
|
|
|
|
#include <linux/proc_fs.h>
|
|
#include <linux/proc_ns.h>
|
|
#include <linux/refcount.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/atomic.h>
|
|
#include <linux/binfmts.h>
|
|
#include <linux/sched/coredump.h>
|
|
#include <linux/sched/task.h>
|
|
#include <linux/mm.h>
|
|
|
|
struct ctl_table_header;
|
|
struct mempolicy;
|
|
|
|
/*
|
|
* This is not completely implemented yet. The idea is to
|
|
* create an in-memory tree (like the actual /proc filesystem
|
|
* tree) of these proc_dir_entries, so that we can dynamically
|
|
* add new files to /proc.
|
|
*
|
|
* parent/subdir are used for the directory structure (every /proc file has a
|
|
* parent, but "subdir" is empty for all non-directory entries).
|
|
* subdir_node is used to build the rb tree "subdir" of the parent.
|
|
*/
|
|
struct proc_dir_entry {
|
|
/*
|
|
* number of callers into module in progress;
|
|
* negative -> it's going away RSN
|
|
*/
|
|
atomic_t in_use;
|
|
refcount_t refcnt;
|
|
struct list_head pde_openers; /* who did ->open, but not ->release */
|
|
/* protects ->pde_openers and all struct pde_opener instances */
|
|
spinlock_t pde_unload_lock;
|
|
struct completion *pde_unload_completion;
|
|
const struct inode_operations *proc_iops;
|
|
union {
|
|
const struct proc_ops *proc_ops;
|
|
const struct file_operations *proc_dir_ops;
|
|
};
|
|
const struct dentry_operations *proc_dops;
|
|
union {
|
|
const struct seq_operations *seq_ops;
|
|
int (*single_show)(struct seq_file *, void *);
|
|
};
|
|
proc_write_t write;
|
|
void *data;
|
|
unsigned int state_size;
|
|
unsigned int low_ino;
|
|
nlink_t nlink;
|
|
kuid_t uid;
|
|
kgid_t gid;
|
|
loff_t size;
|
|
struct proc_dir_entry *parent;
|
|
struct rb_root subdir;
|
|
struct rb_node subdir_node;
|
|
char *name;
|
|
umode_t mode;
|
|
u8 flags;
|
|
u8 namelen;
|
|
char inline_name[];
|
|
} __randomize_layout;
|
|
|
|
#define SIZEOF_PDE ( \
|
|
sizeof(struct proc_dir_entry) < 128 ? 128 : \
|
|
sizeof(struct proc_dir_entry) < 192 ? 192 : \
|
|
sizeof(struct proc_dir_entry) < 256 ? 256 : \
|
|
sizeof(struct proc_dir_entry) < 512 ? 512 : \
|
|
0)
|
|
#define SIZEOF_PDE_INLINE_NAME (SIZEOF_PDE - sizeof(struct proc_dir_entry))
|
|
|
|
static inline bool pde_is_permanent(const struct proc_dir_entry *pde)
|
|
{
|
|
return pde->flags & PROC_ENTRY_PERMANENT;
|
|
}
|
|
|
|
static inline void pde_make_permanent(struct proc_dir_entry *pde)
|
|
{
|
|
pde->flags |= PROC_ENTRY_PERMANENT;
|
|
}
|
|
|
|
extern struct kmem_cache *proc_dir_entry_cache;
|
|
void pde_free(struct proc_dir_entry *pde);
|
|
|
|
union proc_op {
|
|
int (*proc_get_link)(struct dentry *, struct path *);
|
|
int (*proc_show)(struct seq_file *m,
|
|
struct pid_namespace *ns, struct pid *pid,
|
|
struct task_struct *task);
|
|
int lsmid;
|
|
};
|
|
|
|
struct proc_inode {
|
|
struct pid *pid;
|
|
unsigned int fd;
|
|
union proc_op op;
|
|
struct proc_dir_entry *pde;
|
|
struct ctl_table_header *sysctl;
|
|
struct ctl_table *sysctl_entry;
|
|
struct hlist_node sibling_inodes;
|
|
const struct proc_ns_operations *ns_ops;
|
|
struct inode vfs_inode;
|
|
} __randomize_layout;
|
|
|
|
/*
|
|
* General functions
|
|
*/
|
|
static inline struct proc_inode *PROC_I(const struct inode *inode)
|
|
{
|
|
return container_of(inode, struct proc_inode, vfs_inode);
|
|
}
|
|
|
|
static inline struct proc_dir_entry *PDE(const struct inode *inode)
|
|
{
|
|
return PROC_I(inode)->pde;
|
|
}
|
|
|
|
static inline struct pid *proc_pid(const struct inode *inode)
|
|
{
|
|
return PROC_I(inode)->pid;
|
|
}
|
|
|
|
static inline struct task_struct *get_proc_task(const struct inode *inode)
|
|
{
|
|
return get_pid_task(proc_pid(inode), PIDTYPE_PID);
|
|
}
|
|
|
|
void task_dump_owner(struct task_struct *task, umode_t mode,
|
|
kuid_t *ruid, kgid_t *rgid);
|
|
|
|
unsigned name_to_int(const struct qstr *qstr);
|
|
/*
|
|
* Offset of the first process in the /proc root directory..
|
|
*/
|
|
#define FIRST_PROCESS_ENTRY 256
|
|
|
|
/* Worst case buffer size needed for holding an integer. */
|
|
#define PROC_NUMBUF 13
|
|
|
|
/**
|
|
* folio_precise_page_mapcount() - Number of mappings of this folio page.
|
|
* @folio: The folio.
|
|
* @page: The page.
|
|
*
|
|
* The number of present user page table entries that reference this page
|
|
* as tracked via the RMAP: either referenced directly (PTE) or as part of
|
|
* a larger area that covers this page (e.g., PMD).
|
|
*
|
|
* Use this function only for the calculation of existing statistics
|
|
* (USS, PSS, mapcount_max) and for debugging purposes (/proc/kpagecount).
|
|
*
|
|
* Do not add new users.
|
|
*
|
|
* Returns: The number of mappings of this folio page. 0 for
|
|
* folios that are not mapped to user space or are not tracked via the RMAP
|
|
* (e.g., shared zeropage).
|
|
*/
|
|
static inline int folio_precise_page_mapcount(struct folio *folio,
|
|
struct page *page)
|
|
{
|
|
int mapcount = atomic_read(&page->_mapcount) + 1;
|
|
|
|
if (page_mapcount_is_type(mapcount))
|
|
mapcount = 0;
|
|
if (folio_test_large(folio))
|
|
mapcount += folio_entire_mapcount(folio);
|
|
|
|
return mapcount;
|
|
}
|
|
|
|
/*
|
|
* array.c
|
|
*/
|
|
extern const struct file_operations proc_tid_children_operations;
|
|
|
|
extern void proc_task_name(struct seq_file *m, struct task_struct *p,
|
|
bool escape);
|
|
extern int proc_tid_stat(struct seq_file *, struct pid_namespace *,
|
|
struct pid *, struct task_struct *);
|
|
extern int proc_tgid_stat(struct seq_file *, struct pid_namespace *,
|
|
struct pid *, struct task_struct *);
|
|
extern int proc_pid_status(struct seq_file *, struct pid_namespace *,
|
|
struct pid *, struct task_struct *);
|
|
extern int proc_pid_statm(struct seq_file *, struct pid_namespace *,
|
|
struct pid *, struct task_struct *);
|
|
|
|
/*
|
|
* base.c
|
|
*/
|
|
extern const struct dentry_operations pid_dentry_operations;
|
|
extern int pid_getattr(struct mnt_idmap *, const struct path *,
|
|
struct kstat *, u32, unsigned int);
|
|
extern int proc_setattr(struct mnt_idmap *, struct dentry *,
|
|
struct iattr *);
|
|
extern void proc_pid_evict_inode(struct proc_inode *);
|
|
extern struct inode *proc_pid_make_inode(struct super_block *, struct task_struct *, umode_t);
|
|
extern void pid_update_inode(struct task_struct *, struct inode *);
|
|
extern int pid_delete_dentry(const struct dentry *);
|
|
extern int proc_pid_readdir(struct file *, struct dir_context *);
|
|
struct dentry *proc_pid_lookup(struct dentry *, unsigned int);
|
|
extern loff_t mem_lseek(struct file *, loff_t, int);
|
|
|
|
/* Lookups */
|
|
typedef struct dentry *instantiate_t(struct dentry *,
|
|
struct task_struct *, const void *);
|
|
bool proc_fill_cache(struct file *, struct dir_context *, const char *, unsigned int,
|
|
instantiate_t, struct task_struct *, const void *);
|
|
|
|
/*
|
|
* generic.c
|
|
*/
|
|
struct proc_dir_entry *proc_create_reg(const char *name, umode_t mode,
|
|
struct proc_dir_entry **parent, void *data);
|
|
struct proc_dir_entry *proc_register(struct proc_dir_entry *dir,
|
|
struct proc_dir_entry *dp);
|
|
extern struct dentry *proc_lookup(struct inode *, struct dentry *, unsigned int);
|
|
struct dentry *proc_lookup_de(struct inode *, struct dentry *, struct proc_dir_entry *);
|
|
extern int proc_readdir(struct file *, struct dir_context *);
|
|
int proc_readdir_de(struct file *, struct dir_context *, struct proc_dir_entry *);
|
|
|
|
static inline void pde_get(struct proc_dir_entry *pde)
|
|
{
|
|
refcount_inc(&pde->refcnt);
|
|
}
|
|
extern void pde_put(struct proc_dir_entry *);
|
|
|
|
static inline bool is_empty_pde(const struct proc_dir_entry *pde)
|
|
{
|
|
return S_ISDIR(pde->mode) && !pde->proc_iops;
|
|
}
|
|
extern ssize_t proc_simple_write(struct file *, const char __user *, size_t, loff_t *);
|
|
|
|
/*
|
|
* inode.c
|
|
*/
|
|
struct pde_opener {
|
|
struct list_head lh;
|
|
struct file *file;
|
|
bool closing;
|
|
struct completion *c;
|
|
} __randomize_layout;
|
|
extern const struct inode_operations proc_link_inode_operations;
|
|
extern const struct inode_operations proc_pid_link_inode_operations;
|
|
extern const struct super_operations proc_sops;
|
|
|
|
void proc_init_kmemcache(void);
|
|
void proc_invalidate_siblings_dcache(struct hlist_head *inodes, spinlock_t *lock);
|
|
void set_proc_pid_nlink(void);
|
|
extern struct inode *proc_get_inode(struct super_block *, struct proc_dir_entry *);
|
|
extern void proc_entry_rundown(struct proc_dir_entry *);
|
|
|
|
/*
|
|
* proc_namespaces.c
|
|
*/
|
|
extern const struct inode_operations proc_ns_dir_inode_operations;
|
|
extern const struct file_operations proc_ns_dir_operations;
|
|
|
|
/*
|
|
* proc_net.c
|
|
*/
|
|
extern const struct file_operations proc_net_operations;
|
|
extern const struct inode_operations proc_net_inode_operations;
|
|
|
|
#ifdef CONFIG_NET
|
|
extern int proc_net_init(void);
|
|
#else
|
|
static inline int proc_net_init(void) { return 0; }
|
|
#endif
|
|
|
|
/*
|
|
* proc_self.c
|
|
*/
|
|
extern int proc_setup_self(struct super_block *);
|
|
|
|
/*
|
|
* proc_thread_self.c
|
|
*/
|
|
extern int proc_setup_thread_self(struct super_block *);
|
|
extern void proc_thread_self_init(void);
|
|
|
|
/*
|
|
* proc_sysctl.c
|
|
*/
|
|
#ifdef CONFIG_PROC_SYSCTL
|
|
extern int proc_sys_init(void);
|
|
extern void proc_sys_evict_inode(struct inode *inode,
|
|
struct ctl_table_header *head);
|
|
#else
|
|
static inline void proc_sys_init(void) { }
|
|
static inline void proc_sys_evict_inode(struct inode *inode,
|
|
struct ctl_table_header *head) { }
|
|
#endif
|
|
|
|
/*
|
|
* proc_tty.c
|
|
*/
|
|
#ifdef CONFIG_TTY
|
|
extern void proc_tty_init(void);
|
|
#else
|
|
static inline void proc_tty_init(void) {}
|
|
#endif
|
|
|
|
/*
|
|
* root.c
|
|
*/
|
|
extern struct proc_dir_entry proc_root;
|
|
|
|
extern void proc_self_init(void);
|
|
|
|
/*
|
|
* task_[no]mmu.c
|
|
*/
|
|
struct mem_size_stats;
|
|
struct proc_maps_private {
|
|
struct inode *inode;
|
|
struct task_struct *task;
|
|
struct mm_struct *mm;
|
|
struct vma_iterator iter;
|
|
#ifdef CONFIG_NUMA
|
|
struct mempolicy *task_mempolicy;
|
|
#endif
|
|
} __randomize_layout;
|
|
|
|
struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode);
|
|
|
|
extern const struct file_operations proc_pid_maps_operations;
|
|
extern const struct file_operations proc_pid_numa_maps_operations;
|
|
extern const struct file_operations proc_pid_smaps_operations;
|
|
extern const struct file_operations proc_pid_smaps_rollup_operations;
|
|
extern const struct file_operations proc_clear_refs_operations;
|
|
extern const struct file_operations proc_pagemap_operations;
|
|
|
|
extern unsigned long task_vsize(struct mm_struct *);
|
|
extern unsigned long task_statm(struct mm_struct *,
|
|
unsigned long *, unsigned long *,
|
|
unsigned long *, unsigned long *);
|
|
extern void task_mem(struct seq_file *, struct mm_struct *);
|
|
|
|
extern const struct dentry_operations proc_net_dentry_ops;
|
|
static inline void pde_force_lookup(struct proc_dir_entry *pde)
|
|
{
|
|
/* /proc/net/ entries can be changed under us by setns(CLONE_NEWNET) */
|
|
pde->proc_dops = &proc_net_dentry_ops;
|
|
}
|
|
|
|
/*
|
|
* Add a new procfs dentry that can't serve as a mountpoint. That should
|
|
* encompass anything that is ephemeral and can just disappear while the
|
|
* process is still around.
|
|
*/
|
|
static inline struct dentry *proc_splice_unmountable(struct inode *inode,
|
|
struct dentry *dentry, const struct dentry_operations *d_ops)
|
|
{
|
|
d_set_d_op(dentry, d_ops);
|
|
dont_mount(dentry);
|
|
return d_splice_alias(inode, dentry);
|
|
}
|