mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-11-23 18:14:04 +08:00
Merge branch 'akpm' (patches from Andrew)
Merge more updates from Andrew Morton: "Subsystems affected by this patch series: mm/hotfixes, lz4, exec, mailmap, mm/thp, autofs, sysctl, mm/kmemleak, mm/misc and lib" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (35 commits) virtio: pci: constify ioreadX() iomem argument (as in generic implementation) ntb: intel: constify ioreadX() iomem argument (as in generic implementation) rtl818x: constify ioreadX() iomem argument (as in generic implementation) iomap: constify ioreadX() iomem argument (as in generic implementation) sh: use generic strncpy() sh: clkfwk: remove r8/r16/r32 include/asm-generic/vmlinux.lds.h: align ro_after_init mm: annotate a data race in page_zonenum() mm/swap.c: annotate data races for lru_rotate_pvecs mm/rmap: annotate a data race at tlb_flush_batched mm/mempool: fix a data race in mempool_free() mm/list_lru: fix a data race in list_lru_count_one mm/memcontrol: fix a data race in scan count mm/page_counter: fix various data races at memsw mm/swapfile: fix and annotate various data races mm/filemap.c: fix a data race in filemap_fault() mm/swap_state: mark various intentional data races mm/page_io: mark various intentional data races mm/frontswap: mark various intentional data races mm/kmemleak: silence KCSAN splats in checksum ...
This commit is contained in:
commit
18737f4243
1
.mailmap
1
.mailmap
@ -104,6 +104,7 @@ Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@linux.vnet.ibm.com>
|
||||
Greg Kroah-Hartman <greg@echidna.(none)>
|
||||
Greg Kroah-Hartman <gregkh@suse.de>
|
||||
Greg Kroah-Hartman <greg@kroah.com>
|
||||
Greg Kurz <groug@kaod.org> <gkurz@linux.vnet.ibm.com>
|
||||
Gregory CLEMENT <gregory.clement@bootlin.com> <gregory.clement@free-electrons.com>
|
||||
Hanjun Guo <guohanjun@huawei.com> <hanjun.guo@linaro.org>
|
||||
Heiko Carstens <hca@linux.ibm.com> <h.carstens@de.ibm.com>
|
||||
|
@ -384,7 +384,7 @@ struct el_apecs_procdata
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
__EXTERN_INLINE unsigned int apecs_ioread8(void __iomem *xaddr)
|
||||
__EXTERN_INLINE unsigned int apecs_ioread8(const void __iomem *xaddr)
|
||||
{
|
||||
unsigned long addr = (unsigned long) xaddr;
|
||||
unsigned long result, base_and_type;
|
||||
@ -420,7 +420,7 @@ __EXTERN_INLINE void apecs_iowrite8(u8 b, void __iomem *xaddr)
|
||||
*(vuip) ((addr << 5) + base_and_type) = w;
|
||||
}
|
||||
|
||||
__EXTERN_INLINE unsigned int apecs_ioread16(void __iomem *xaddr)
|
||||
__EXTERN_INLINE unsigned int apecs_ioread16(const void __iomem *xaddr)
|
||||
{
|
||||
unsigned long addr = (unsigned long) xaddr;
|
||||
unsigned long result, base_and_type;
|
||||
@ -456,7 +456,7 @@ __EXTERN_INLINE void apecs_iowrite16(u16 b, void __iomem *xaddr)
|
||||
*(vuip) ((addr << 5) + base_and_type) = w;
|
||||
}
|
||||
|
||||
__EXTERN_INLINE unsigned int apecs_ioread32(void __iomem *xaddr)
|
||||
__EXTERN_INLINE unsigned int apecs_ioread32(const void __iomem *xaddr)
|
||||
{
|
||||
unsigned long addr = (unsigned long) xaddr;
|
||||
if (addr < APECS_DENSE_MEM)
|
||||
|
@ -342,7 +342,7 @@ struct el_CIA_sysdata_mcheck {
|
||||
#define vuip volatile unsigned int __force *
|
||||
#define vulp volatile unsigned long __force *
|
||||
|
||||
__EXTERN_INLINE unsigned int cia_ioread8(void __iomem *xaddr)
|
||||
__EXTERN_INLINE unsigned int cia_ioread8(const void __iomem *xaddr)
|
||||
{
|
||||
unsigned long addr = (unsigned long) xaddr;
|
||||
unsigned long result, base_and_type;
|
||||
@ -374,7 +374,7 @@ __EXTERN_INLINE void cia_iowrite8(u8 b, void __iomem *xaddr)
|
||||
*(vuip) ((addr << 5) + base_and_type) = w;
|
||||
}
|
||||
|
||||
__EXTERN_INLINE unsigned int cia_ioread16(void __iomem *xaddr)
|
||||
__EXTERN_INLINE unsigned int cia_ioread16(const void __iomem *xaddr)
|
||||
{
|
||||
unsigned long addr = (unsigned long) xaddr;
|
||||
unsigned long result, base_and_type;
|
||||
@ -404,7 +404,7 @@ __EXTERN_INLINE void cia_iowrite16(u16 b, void __iomem *xaddr)
|
||||
*(vuip) ((addr << 5) + base_and_type) = w;
|
||||
}
|
||||
|
||||
__EXTERN_INLINE unsigned int cia_ioread32(void __iomem *xaddr)
|
||||
__EXTERN_INLINE unsigned int cia_ioread32(const void __iomem *xaddr)
|
||||
{
|
||||
unsigned long addr = (unsigned long) xaddr;
|
||||
if (addr < CIA_DENSE_MEM)
|
||||
|
@ -230,7 +230,7 @@ union el_lca {
|
||||
} while (0)
|
||||
|
||||
|
||||
__EXTERN_INLINE unsigned int lca_ioread8(void __iomem *xaddr)
|
||||
__EXTERN_INLINE unsigned int lca_ioread8(const void __iomem *xaddr)
|
||||
{
|
||||
unsigned long addr = (unsigned long) xaddr;
|
||||
unsigned long result, base_and_type;
|
||||
@ -266,7 +266,7 @@ __EXTERN_INLINE void lca_iowrite8(u8 b, void __iomem *xaddr)
|
||||
*(vuip) ((addr << 5) + base_and_type) = w;
|
||||
}
|
||||
|
||||
__EXTERN_INLINE unsigned int lca_ioread16(void __iomem *xaddr)
|
||||
__EXTERN_INLINE unsigned int lca_ioread16(const void __iomem *xaddr)
|
||||
{
|
||||
unsigned long addr = (unsigned long) xaddr;
|
||||
unsigned long result, base_and_type;
|
||||
@ -302,7 +302,7 @@ __EXTERN_INLINE void lca_iowrite16(u16 b, void __iomem *xaddr)
|
||||
*(vuip) ((addr << 5) + base_and_type) = w;
|
||||
}
|
||||
|
||||
__EXTERN_INLINE unsigned int lca_ioread32(void __iomem *xaddr)
|
||||
__EXTERN_INLINE unsigned int lca_ioread32(const void __iomem *xaddr)
|
||||
{
|
||||
unsigned long addr = (unsigned long) xaddr;
|
||||
if (addr < LCA_DENSE_MEM)
|
||||
|
@ -332,10 +332,10 @@ struct io7 {
|
||||
#define vucp volatile unsigned char __force *
|
||||
#define vusp volatile unsigned short __force *
|
||||
|
||||
extern unsigned int marvel_ioread8(void __iomem *);
|
||||
extern unsigned int marvel_ioread8(const void __iomem *);
|
||||
extern void marvel_iowrite8(u8 b, void __iomem *);
|
||||
|
||||
__EXTERN_INLINE unsigned int marvel_ioread16(void __iomem *addr)
|
||||
__EXTERN_INLINE unsigned int marvel_ioread16(const void __iomem *addr)
|
||||
{
|
||||
return __kernel_ldwu(*(vusp)addr);
|
||||
}
|
||||
|
@ -267,7 +267,7 @@ extern inline int __mcpcia_is_mmio(unsigned long addr)
|
||||
return (addr & 0x80000000UL) == 0;
|
||||
}
|
||||
|
||||
__EXTERN_INLINE unsigned int mcpcia_ioread8(void __iomem *xaddr)
|
||||
__EXTERN_INLINE unsigned int mcpcia_ioread8(const void __iomem *xaddr)
|
||||
{
|
||||
unsigned long addr = (unsigned long)xaddr & MCPCIA_MEM_MASK;
|
||||
unsigned long hose = (unsigned long)xaddr & ~MCPCIA_MEM_MASK;
|
||||
@ -291,7 +291,7 @@ __EXTERN_INLINE void mcpcia_iowrite8(u8 b, void __iomem *xaddr)
|
||||
*(vuip) ((addr << 5) + hose + 0x00) = w;
|
||||
}
|
||||
|
||||
__EXTERN_INLINE unsigned int mcpcia_ioread16(void __iomem *xaddr)
|
||||
__EXTERN_INLINE unsigned int mcpcia_ioread16(const void __iomem *xaddr)
|
||||
{
|
||||
unsigned long addr = (unsigned long)xaddr & MCPCIA_MEM_MASK;
|
||||
unsigned long hose = (unsigned long)xaddr & ~MCPCIA_MEM_MASK;
|
||||
@ -315,7 +315,7 @@ __EXTERN_INLINE void mcpcia_iowrite16(u16 b, void __iomem *xaddr)
|
||||
*(vuip) ((addr << 5) + hose + 0x08) = w;
|
||||
}
|
||||
|
||||
__EXTERN_INLINE unsigned int mcpcia_ioread32(void __iomem *xaddr)
|
||||
__EXTERN_INLINE unsigned int mcpcia_ioread32(const void __iomem *xaddr)
|
||||
{
|
||||
unsigned long addr = (unsigned long)xaddr;
|
||||
|
||||
|
@ -572,7 +572,7 @@ __EXTERN_INLINE int t2_is_mmio(const volatile void __iomem *addr)
|
||||
it doesn't make sense to merge the pio and mmio routines. */
|
||||
|
||||
#define IOPORT(OS, NS) \
|
||||
__EXTERN_INLINE unsigned int t2_ioread##NS(void __iomem *xaddr) \
|
||||
__EXTERN_INLINE unsigned int t2_ioread##NS(const void __iomem *xaddr) \
|
||||
{ \
|
||||
if (t2_is_mmio(xaddr)) \
|
||||
return t2_read##OS(xaddr); \
|
||||
|
@ -150,9 +150,9 @@ static inline void generic_##NAME(TYPE b, QUAL void __iomem *addr) \
|
||||
alpha_mv.mv_##NAME(b, addr); \
|
||||
}
|
||||
|
||||
REMAP1(unsigned int, ioread8, /**/)
|
||||
REMAP1(unsigned int, ioread16, /**/)
|
||||
REMAP1(unsigned int, ioread32, /**/)
|
||||
REMAP1(unsigned int, ioread8, const)
|
||||
REMAP1(unsigned int, ioread16, const)
|
||||
REMAP1(unsigned int, ioread32, const)
|
||||
REMAP1(u8, readb, const volatile)
|
||||
REMAP1(u16, readw, const volatile)
|
||||
REMAP1(u32, readl, const volatile)
|
||||
@ -307,7 +307,7 @@ static inline int __is_mmio(const volatile void __iomem *addr)
|
||||
*/
|
||||
|
||||
#if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
|
||||
extern inline unsigned int ioread8(void __iomem *addr)
|
||||
extern inline unsigned int ioread8(const void __iomem *addr)
|
||||
{
|
||||
unsigned int ret;
|
||||
mb();
|
||||
@ -316,7 +316,7 @@ extern inline unsigned int ioread8(void __iomem *addr)
|
||||
return ret;
|
||||
}
|
||||
|
||||
extern inline unsigned int ioread16(void __iomem *addr)
|
||||
extern inline unsigned int ioread16(const void __iomem *addr)
|
||||
{
|
||||
unsigned int ret;
|
||||
mb();
|
||||
@ -359,7 +359,7 @@ extern inline void outw(u16 b, unsigned long port)
|
||||
#endif
|
||||
|
||||
#if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
|
||||
extern inline unsigned int ioread32(void __iomem *addr)
|
||||
extern inline unsigned int ioread32(const void __iomem *addr)
|
||||
{
|
||||
unsigned int ret;
|
||||
mb();
|
||||
|
@ -7,15 +7,15 @@
|
||||
|
||||
#if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
|
||||
__EXTERN_INLINE unsigned int
|
||||
IO_CONCAT(__IO_PREFIX,ioread8)(void __iomem *a)
|
||||
IO_CONCAT(__IO_PREFIX,ioread8)(const void __iomem *a)
|
||||
{
|
||||
return __kernel_ldbu(*(volatile u8 __force *)a);
|
||||
return __kernel_ldbu(*(const volatile u8 __force *)a);
|
||||
}
|
||||
|
||||
__EXTERN_INLINE unsigned int
|
||||
IO_CONCAT(__IO_PREFIX,ioread16)(void __iomem *a)
|
||||
IO_CONCAT(__IO_PREFIX,ioread16)(const void __iomem *a)
|
||||
{
|
||||
return __kernel_ldwu(*(volatile u16 __force *)a);
|
||||
return __kernel_ldwu(*(const volatile u16 __force *)a);
|
||||
}
|
||||
|
||||
__EXTERN_INLINE void
|
||||
@ -33,9 +33,9 @@ IO_CONCAT(__IO_PREFIX,iowrite16)(u16 b, void __iomem *a)
|
||||
|
||||
#if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
|
||||
__EXTERN_INLINE unsigned int
|
||||
IO_CONCAT(__IO_PREFIX,ioread32)(void __iomem *a)
|
||||
IO_CONCAT(__IO_PREFIX,ioread32)(const void __iomem *a)
|
||||
{
|
||||
return *(volatile u32 __force *)a;
|
||||
return *(const volatile u32 __force *)a;
|
||||
}
|
||||
|
||||
__EXTERN_INLINE void
|
||||
@ -73,14 +73,14 @@ IO_CONCAT(__IO_PREFIX,writew)(u16 b, volatile void __iomem *a)
|
||||
__EXTERN_INLINE u8
|
||||
IO_CONCAT(__IO_PREFIX,readb)(const volatile void __iomem *a)
|
||||
{
|
||||
void __iomem *addr = (void __iomem *)a;
|
||||
const void __iomem *addr = (const void __iomem *)a;
|
||||
return IO_CONCAT(__IO_PREFIX,ioread8)(addr);
|
||||
}
|
||||
|
||||
__EXTERN_INLINE u16
|
||||
IO_CONCAT(__IO_PREFIX,readw)(const volatile void __iomem *a)
|
||||
{
|
||||
void __iomem *addr = (void __iomem *)a;
|
||||
const void __iomem *addr = (const void __iomem *)a;
|
||||
return IO_CONCAT(__IO_PREFIX,ioread16)(addr);
|
||||
}
|
||||
|
||||
|
@ -305,7 +305,7 @@ __EXTERN_INLINE int jensen_is_mmio(const volatile void __iomem *addr)
|
||||
that it doesn't make sense to merge them. */
|
||||
|
||||
#define IOPORT(OS, NS) \
|
||||
__EXTERN_INLINE unsigned int jensen_ioread##NS(void __iomem *xaddr) \
|
||||
__EXTERN_INLINE unsigned int jensen_ioread##NS(const void __iomem *xaddr) \
|
||||
{ \
|
||||
if (jensen_is_mmio(xaddr)) \
|
||||
return jensen_read##OS(xaddr - 0x100000000ul); \
|
||||
|
@ -46,9 +46,9 @@ struct alpha_machine_vector
|
||||
void (*mv_pci_tbi)(struct pci_controller *hose,
|
||||
dma_addr_t start, dma_addr_t end);
|
||||
|
||||
unsigned int (*mv_ioread8)(void __iomem *);
|
||||
unsigned int (*mv_ioread16)(void __iomem *);
|
||||
unsigned int (*mv_ioread32)(void __iomem *);
|
||||
unsigned int (*mv_ioread8)(const void __iomem *);
|
||||
unsigned int (*mv_ioread16)(const void __iomem *);
|
||||
unsigned int (*mv_ioread32)(const void __iomem *);
|
||||
|
||||
void (*mv_iowrite8)(u8, void __iomem *);
|
||||
void (*mv_iowrite16)(u16, void __iomem *);
|
||||
|
@ -806,7 +806,7 @@ void __iomem *marvel_ioportmap (unsigned long addr)
|
||||
}
|
||||
|
||||
unsigned int
|
||||
marvel_ioread8(void __iomem *xaddr)
|
||||
marvel_ioread8(const void __iomem *xaddr)
|
||||
{
|
||||
unsigned long addr = (unsigned long) xaddr;
|
||||
if (__marvel_is_port_kbd(addr))
|
||||
|
@ -14,7 +14,7 @@
|
||||
"generic", which bumps through the machine vector. */
|
||||
|
||||
unsigned int
|
||||
ioread8(void __iomem *addr)
|
||||
ioread8(const void __iomem *addr)
|
||||
{
|
||||
unsigned int ret;
|
||||
mb();
|
||||
@ -23,7 +23,7 @@ ioread8(void __iomem *addr)
|
||||
return ret;
|
||||
}
|
||||
|
||||
unsigned int ioread16(void __iomem *addr)
|
||||
unsigned int ioread16(const void __iomem *addr)
|
||||
{
|
||||
unsigned int ret;
|
||||
mb();
|
||||
@ -32,7 +32,7 @@ unsigned int ioread16(void __iomem *addr)
|
||||
return ret;
|
||||
}
|
||||
|
||||
unsigned int ioread32(void __iomem *addr)
|
||||
unsigned int ioread32(const void __iomem *addr)
|
||||
{
|
||||
unsigned int ret;
|
||||
mb();
|
||||
@ -257,7 +257,7 @@ EXPORT_SYMBOL(readq_relaxed);
|
||||
/*
|
||||
* Read COUNT 8-bit bytes from port PORT into memory starting at SRC.
|
||||
*/
|
||||
void ioread8_rep(void __iomem *port, void *dst, unsigned long count)
|
||||
void ioread8_rep(const void __iomem *port, void *dst, unsigned long count)
|
||||
{
|
||||
while ((unsigned long)dst & 0x3) {
|
||||
if (!count)
|
||||
@ -300,7 +300,7 @@ EXPORT_SYMBOL(insb);
|
||||
* the interfaces seems to be slow: just using the inlined version
|
||||
* of the inw() breaks things.
|
||||
*/
|
||||
void ioread16_rep(void __iomem *port, void *dst, unsigned long count)
|
||||
void ioread16_rep(const void __iomem *port, void *dst, unsigned long count)
|
||||
{
|
||||
if (unlikely((unsigned long)dst & 0x3)) {
|
||||
if (!count)
|
||||
@ -340,7 +340,7 @@ EXPORT_SYMBOL(insw);
|
||||
* but the interfaces seems to be slow: just using the inlined version
|
||||
* of the inl() breaks things.
|
||||
*/
|
||||
void ioread32_rep(void __iomem *port, void *dst, unsigned long count)
|
||||
void ioread32_rep(const void __iomem *port, void *dst, unsigned long count)
|
||||
{
|
||||
if (unlikely((unsigned long)dst & 0x3)) {
|
||||
while (count--) {
|
||||
|
@ -249,7 +249,7 @@
|
||||
316 common mlockall sys_mlockall
|
||||
317 common munlockall sys_munlockall
|
||||
318 common sysinfo sys_sysinfo
|
||||
319 common _sysctl sys_sysctl
|
||||
319 common _sysctl sys_ni_syscall
|
||||
# 320 was sys_idle
|
||||
321 common oldumount sys_oldumount
|
||||
322 common swapon sys_swapon
|
||||
|
@ -3,7 +3,6 @@ CONFIG_LOCALVERSION="gum"
|
||||
CONFIG_SYSVIPC=y
|
||||
CONFIG_SYSFS_DEPRECATED_V2=y
|
||||
CONFIG_EXPERT=y
|
||||
# CONFIG_SYSCTL_SYSCALL is not set
|
||||
# CONFIG_EPOLL is not set
|
||||
# CONFIG_SHMEM is not set
|
||||
# CONFIG_VM_EVENT_COUNTERS is not set
|
||||
|
@ -162,7 +162,7 @@
|
||||
146 common writev sys_writev
|
||||
147 common getsid sys_getsid
|
||||
148 common fdatasync sys_fdatasync
|
||||
149 common _sysctl sys_sysctl
|
||||
149 common _sysctl sys_ni_syscall
|
||||
150 common mlock sys_mlock
|
||||
151 common munlock sys_munlock
|
||||
152 common mlockall sys_mlockall
|
||||
|
@ -308,8 +308,8 @@ __SYSCALL(__NR_writev, compat_sys_writev)
|
||||
__SYSCALL(__NR_getsid, sys_getsid)
|
||||
#define __NR_fdatasync 148
|
||||
__SYSCALL(__NR_fdatasync, sys_fdatasync)
|
||||
#define __NR__sysctl 149
|
||||
__SYSCALL(__NR__sysctl, compat_sys_sysctl)
|
||||
/* 149 was sys_sysctl */
|
||||
__SYSCALL(149, sys_ni_syscall)
|
||||
#define __NR_mlock 150
|
||||
__SYSCALL(__NR_mlock, sys_mlock)
|
||||
#define __NR_munlock 151
|
||||
|
@ -135,7 +135,7 @@
|
||||
123 common writev sys_writev
|
||||
124 common pread64 sys_pread64
|
||||
125 common pwrite64 sys_pwrite64
|
||||
126 common _sysctl sys_sysctl
|
||||
126 common _sysctl sys_ni_syscall
|
||||
127 common mmap sys_mmap
|
||||
128 common munmap sys_munmap
|
||||
129 common mlock sys_mlock
|
||||
|
@ -156,7 +156,7 @@
|
||||
146 common writev sys_writev
|
||||
147 common getsid sys_getsid
|
||||
148 common fdatasync sys_fdatasync
|
||||
149 common _sysctl sys_sysctl
|
||||
149 common _sysctl sys_ni_syscall
|
||||
150 common mlock sys_mlock
|
||||
151 common munlock sys_munlock
|
||||
152 common mlockall sys_mlockall
|
||||
|
@ -156,7 +156,7 @@
|
||||
146 common writev sys_writev
|
||||
147 common getsid sys_getsid
|
||||
148 common fdatasync sys_fdatasync
|
||||
149 common _sysctl sys_sysctl
|
||||
149 common _sysctl sys_ni_syscall
|
||||
150 common mlock sys_mlock
|
||||
151 common munlock sys_munlock
|
||||
152 common mlockall sys_mlockall
|
||||
|
@ -17,7 +17,6 @@ CONFIG_CGROUP_CPUACCT=y
|
||||
CONFIG_NAMESPACES=y
|
||||
CONFIG_USER_NS=y
|
||||
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
|
||||
CONFIG_SYSCTL_SYSCALL=y
|
||||
CONFIG_KALLSYMS_ALL=y
|
||||
CONFIG_EMBEDDED=y
|
||||
# CONFIG_VM_EVENT_COUNTERS is not set
|
||||
|
@ -159,7 +159,7 @@
|
||||
149 n32 munlockall sys_munlockall
|
||||
150 n32 vhangup sys_vhangup
|
||||
151 n32 pivot_root sys_pivot_root
|
||||
152 n32 _sysctl compat_sys_sysctl
|
||||
152 n32 _sysctl sys_ni_syscall
|
||||
153 n32 prctl sys_prctl
|
||||
154 n32 adjtimex sys_adjtimex_time32
|
||||
155 n32 setrlimit compat_sys_setrlimit
|
||||
|
@ -159,7 +159,7 @@
|
||||
149 n64 munlockall sys_munlockall
|
||||
150 n64 vhangup sys_vhangup
|
||||
151 n64 pivot_root sys_pivot_root
|
||||
152 n64 _sysctl sys_sysctl
|
||||
152 n64 _sysctl sys_ni_syscall
|
||||
153 n64 prctl sys_prctl
|
||||
154 n64 adjtimex sys_adjtimex
|
||||
155 n64 setrlimit sys_setrlimit
|
||||
|
@ -164,7 +164,7 @@
|
||||
150 o32 unused150 sys_ni_syscall
|
||||
151 o32 getsid sys_getsid
|
||||
152 o32 fdatasync sys_fdatasync
|
||||
153 o32 _sysctl sys_sysctl compat_sys_sysctl
|
||||
153 o32 _sysctl sys_ni_syscall
|
||||
154 o32 mlock sys_mlock
|
||||
155 o32 munlock sys_munlock
|
||||
156 o32 mlockall sys_mlockall
|
||||
|
@ -303,8 +303,8 @@ extern void outsl (unsigned long port, const void *src, unsigned long count);
|
||||
#define ioread64be ioread64be
|
||||
#define iowrite64 iowrite64
|
||||
#define iowrite64be iowrite64be
|
||||
extern u64 ioread64(void __iomem *addr);
|
||||
extern u64 ioread64be(void __iomem *addr);
|
||||
extern u64 ioread64(const void __iomem *addr);
|
||||
extern u64 ioread64be(const void __iomem *addr);
|
||||
extern void iowrite64(u64 val, void __iomem *addr);
|
||||
extern void iowrite64be(u64 val, void __iomem *addr);
|
||||
|
||||
|
@ -163,7 +163,7 @@
|
||||
146 common writev sys_writev compat_sys_writev
|
||||
147 common getsid sys_getsid
|
||||
148 common fdatasync sys_fdatasync
|
||||
149 common _sysctl sys_sysctl compat_sys_sysctl
|
||||
149 common _sysctl sys_ni_syscall
|
||||
150 common mlock sys_mlock
|
||||
151 common munlock sys_munlock
|
||||
152 common mlockall sys_mlockall
|
||||
|
@ -43,13 +43,13 @@
|
||||
#endif
|
||||
|
||||
struct iomap_ops {
|
||||
unsigned int (*read8)(void __iomem *);
|
||||
unsigned int (*read16)(void __iomem *);
|
||||
unsigned int (*read16be)(void __iomem *);
|
||||
unsigned int (*read32)(void __iomem *);
|
||||
unsigned int (*read32be)(void __iomem *);
|
||||
u64 (*read64)(void __iomem *);
|
||||
u64 (*read64be)(void __iomem *);
|
||||
unsigned int (*read8)(const void __iomem *);
|
||||
unsigned int (*read16)(const void __iomem *);
|
||||
unsigned int (*read16be)(const void __iomem *);
|
||||
unsigned int (*read32)(const void __iomem *);
|
||||
unsigned int (*read32be)(const void __iomem *);
|
||||
u64 (*read64)(const void __iomem *);
|
||||
u64 (*read64be)(const void __iomem *);
|
||||
void (*write8)(u8, void __iomem *);
|
||||
void (*write16)(u16, void __iomem *);
|
||||
void (*write16be)(u16, void __iomem *);
|
||||
@ -57,9 +57,9 @@ struct iomap_ops {
|
||||
void (*write32be)(u32, void __iomem *);
|
||||
void (*write64)(u64, void __iomem *);
|
||||
void (*write64be)(u64, void __iomem *);
|
||||
void (*read8r)(void __iomem *, void *, unsigned long);
|
||||
void (*read16r)(void __iomem *, void *, unsigned long);
|
||||
void (*read32r)(void __iomem *, void *, unsigned long);
|
||||
void (*read8r)(const void __iomem *, void *, unsigned long);
|
||||
void (*read16r)(const void __iomem *, void *, unsigned long);
|
||||
void (*read32r)(const void __iomem *, void *, unsigned long);
|
||||
void (*write8r)(void __iomem *, const void *, unsigned long);
|
||||
void (*write16r)(void __iomem *, const void *, unsigned long);
|
||||
void (*write32r)(void __iomem *, const void *, unsigned long);
|
||||
@ -69,17 +69,17 @@ struct iomap_ops {
|
||||
|
||||
#define ADDR2PORT(addr) ((unsigned long __force)(addr) & 0xffffff)
|
||||
|
||||
static unsigned int ioport_read8(void __iomem *addr)
|
||||
static unsigned int ioport_read8(const void __iomem *addr)
|
||||
{
|
||||
return inb(ADDR2PORT(addr));
|
||||
}
|
||||
|
||||
static unsigned int ioport_read16(void __iomem *addr)
|
||||
static unsigned int ioport_read16(const void __iomem *addr)
|
||||
{
|
||||
return inw(ADDR2PORT(addr));
|
||||
}
|
||||
|
||||
static unsigned int ioport_read32(void __iomem *addr)
|
||||
static unsigned int ioport_read32(const void __iomem *addr)
|
||||
{
|
||||
return inl(ADDR2PORT(addr));
|
||||
}
|
||||
@ -99,17 +99,17 @@ static void ioport_write32(u32 datum, void __iomem *addr)
|
||||
outl(datum, ADDR2PORT(addr));
|
||||
}
|
||||
|
||||
static void ioport_read8r(void __iomem *addr, void *dst, unsigned long count)
|
||||
static void ioport_read8r(const void __iomem *addr, void *dst, unsigned long count)
|
||||
{
|
||||
insb(ADDR2PORT(addr), dst, count);
|
||||
}
|
||||
|
||||
static void ioport_read16r(void __iomem *addr, void *dst, unsigned long count)
|
||||
static void ioport_read16r(const void __iomem *addr, void *dst, unsigned long count)
|
||||
{
|
||||
insw(ADDR2PORT(addr), dst, count);
|
||||
}
|
||||
|
||||
static void ioport_read32r(void __iomem *addr, void *dst, unsigned long count)
|
||||
static void ioport_read32r(const void __iomem *addr, void *dst, unsigned long count)
|
||||
{
|
||||
insl(ADDR2PORT(addr), dst, count);
|
||||
}
|
||||
@ -150,37 +150,37 @@ static const struct iomap_ops ioport_ops = {
|
||||
|
||||
/* Legacy I/O memory ops */
|
||||
|
||||
static unsigned int iomem_read8(void __iomem *addr)
|
||||
static unsigned int iomem_read8(const void __iomem *addr)
|
||||
{
|
||||
return readb(addr);
|
||||
}
|
||||
|
||||
static unsigned int iomem_read16(void __iomem *addr)
|
||||
static unsigned int iomem_read16(const void __iomem *addr)
|
||||
{
|
||||
return readw(addr);
|
||||
}
|
||||
|
||||
static unsigned int iomem_read16be(void __iomem *addr)
|
||||
static unsigned int iomem_read16be(const void __iomem *addr)
|
||||
{
|
||||
return __raw_readw(addr);
|
||||
}
|
||||
|
||||
static unsigned int iomem_read32(void __iomem *addr)
|
||||
static unsigned int iomem_read32(const void __iomem *addr)
|
||||
{
|
||||
return readl(addr);
|
||||
}
|
||||
|
||||
static unsigned int iomem_read32be(void __iomem *addr)
|
||||
static unsigned int iomem_read32be(const void __iomem *addr)
|
||||
{
|
||||
return __raw_readl(addr);
|
||||
}
|
||||
|
||||
static u64 iomem_read64(void __iomem *addr)
|
||||
static u64 iomem_read64(const void __iomem *addr)
|
||||
{
|
||||
return readq(addr);
|
||||
}
|
||||
|
||||
static u64 iomem_read64be(void __iomem *addr)
|
||||
static u64 iomem_read64be(const void __iomem *addr)
|
||||
{
|
||||
return __raw_readq(addr);
|
||||
}
|
||||
@ -220,7 +220,7 @@ static void iomem_write64be(u64 datum, void __iomem *addr)
|
||||
__raw_writel(datum, addr);
|
||||
}
|
||||
|
||||
static void iomem_read8r(void __iomem *addr, void *dst, unsigned long count)
|
||||
static void iomem_read8r(const void __iomem *addr, void *dst, unsigned long count)
|
||||
{
|
||||
while (count--) {
|
||||
*(u8 *)dst = __raw_readb(addr);
|
||||
@ -228,7 +228,7 @@ static void iomem_read8r(void __iomem *addr, void *dst, unsigned long count)
|
||||
}
|
||||
}
|
||||
|
||||
static void iomem_read16r(void __iomem *addr, void *dst, unsigned long count)
|
||||
static void iomem_read16r(const void __iomem *addr, void *dst, unsigned long count)
|
||||
{
|
||||
while (count--) {
|
||||
*(u16 *)dst = __raw_readw(addr);
|
||||
@ -236,7 +236,7 @@ static void iomem_read16r(void __iomem *addr, void *dst, unsigned long count)
|
||||
}
|
||||
}
|
||||
|
||||
static void iomem_read32r(void __iomem *addr, void *dst, unsigned long count)
|
||||
static void iomem_read32r(const void __iomem *addr, void *dst, unsigned long count)
|
||||
{
|
||||
while (count--) {
|
||||
*(u32 *)dst = __raw_readl(addr);
|
||||
@ -297,49 +297,49 @@ static const struct iomap_ops *iomap_ops[8] = {
|
||||
};
|
||||
|
||||
|
||||
unsigned int ioread8(void __iomem *addr)
|
||||
unsigned int ioread8(const void __iomem *addr)
|
||||
{
|
||||
if (unlikely(INDIRECT_ADDR(addr)))
|
||||
return iomap_ops[ADDR_TO_REGION(addr)]->read8(addr);
|
||||
return *((u8 *)addr);
|
||||
}
|
||||
|
||||
unsigned int ioread16(void __iomem *addr)
|
||||
unsigned int ioread16(const void __iomem *addr)
|
||||
{
|
||||
if (unlikely(INDIRECT_ADDR(addr)))
|
||||
return iomap_ops[ADDR_TO_REGION(addr)]->read16(addr);
|
||||
return le16_to_cpup((u16 *)addr);
|
||||
}
|
||||
|
||||
unsigned int ioread16be(void __iomem *addr)
|
||||
unsigned int ioread16be(const void __iomem *addr)
|
||||
{
|
||||
if (unlikely(INDIRECT_ADDR(addr)))
|
||||
return iomap_ops[ADDR_TO_REGION(addr)]->read16be(addr);
|
||||
return *((u16 *)addr);
|
||||
}
|
||||
|
||||
unsigned int ioread32(void __iomem *addr)
|
||||
unsigned int ioread32(const void __iomem *addr)
|
||||
{
|
||||
if (unlikely(INDIRECT_ADDR(addr)))
|
||||
return iomap_ops[ADDR_TO_REGION(addr)]->read32(addr);
|
||||
return le32_to_cpup((u32 *)addr);
|
||||
}
|
||||
|
||||
unsigned int ioread32be(void __iomem *addr)
|
||||
unsigned int ioread32be(const void __iomem *addr)
|
||||
{
|
||||
if (unlikely(INDIRECT_ADDR(addr)))
|
||||
return iomap_ops[ADDR_TO_REGION(addr)]->read32be(addr);
|
||||
return *((u32 *)addr);
|
||||
}
|
||||
|
||||
u64 ioread64(void __iomem *addr)
|
||||
u64 ioread64(const void __iomem *addr)
|
||||
{
|
||||
if (unlikely(INDIRECT_ADDR(addr)))
|
||||
return iomap_ops[ADDR_TO_REGION(addr)]->read64(addr);
|
||||
return le64_to_cpup((u64 *)addr);
|
||||
}
|
||||
|
||||
u64 ioread64be(void __iomem *addr)
|
||||
u64 ioread64be(const void __iomem *addr)
|
||||
{
|
||||
if (unlikely(INDIRECT_ADDR(addr)))
|
||||
return iomap_ops[ADDR_TO_REGION(addr)]->read64be(addr);
|
||||
@ -411,7 +411,7 @@ void iowrite64be(u64 datum, void __iomem *addr)
|
||||
|
||||
/* Repeating interfaces */
|
||||
|
||||
void ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
|
||||
void ioread8_rep(const void __iomem *addr, void *dst, unsigned long count)
|
||||
{
|
||||
if (unlikely(INDIRECT_ADDR(addr))) {
|
||||
iomap_ops[ADDR_TO_REGION(addr)]->read8r(addr, dst, count);
|
||||
@ -423,7 +423,7 @@ void ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
|
||||
}
|
||||
}
|
||||
|
||||
void ioread16_rep(void __iomem *addr, void *dst, unsigned long count)
|
||||
void ioread16_rep(const void __iomem *addr, void *dst, unsigned long count)
|
||||
{
|
||||
if (unlikely(INDIRECT_ADDR(addr))) {
|
||||
iomap_ops[ADDR_TO_REGION(addr)]->read16r(addr, dst, count);
|
||||
@ -435,7 +435,7 @@ void ioread16_rep(void __iomem *addr, void *dst, unsigned long count)
|
||||
}
|
||||
}
|
||||
|
||||
void ioread32_rep(void __iomem *addr, void *dst, unsigned long count)
|
||||
void ioread32_rep(const void __iomem *addr, void *dst, unsigned long count)
|
||||
{
|
||||
if (unlikely(INDIRECT_ADDR(addr))) {
|
||||
iomap_ops[ADDR_TO_REGION(addr)]->read32r(addr, dst, count);
|
||||
|
@ -15,23 +15,23 @@
|
||||
* Here comes the ppc64 implementation of the IOMAP
|
||||
* interfaces.
|
||||
*/
|
||||
unsigned int ioread8(void __iomem *addr)
|
||||
unsigned int ioread8(const void __iomem *addr)
|
||||
{
|
||||
return readb(addr);
|
||||
}
|
||||
unsigned int ioread16(void __iomem *addr)
|
||||
unsigned int ioread16(const void __iomem *addr)
|
||||
{
|
||||
return readw(addr);
|
||||
}
|
||||
unsigned int ioread16be(void __iomem *addr)
|
||||
unsigned int ioread16be(const void __iomem *addr)
|
||||
{
|
||||
return readw_be(addr);
|
||||
}
|
||||
unsigned int ioread32(void __iomem *addr)
|
||||
unsigned int ioread32(const void __iomem *addr)
|
||||
{
|
||||
return readl(addr);
|
||||
}
|
||||
unsigned int ioread32be(void __iomem *addr)
|
||||
unsigned int ioread32be(const void __iomem *addr)
|
||||
{
|
||||
return readl_be(addr);
|
||||
}
|
||||
@ -41,27 +41,27 @@ EXPORT_SYMBOL(ioread16be);
|
||||
EXPORT_SYMBOL(ioread32);
|
||||
EXPORT_SYMBOL(ioread32be);
|
||||
#ifdef __powerpc64__
|
||||
u64 ioread64(void __iomem *addr)
|
||||
u64 ioread64(const void __iomem *addr)
|
||||
{
|
||||
return readq(addr);
|
||||
}
|
||||
u64 ioread64_lo_hi(void __iomem *addr)
|
||||
u64 ioread64_lo_hi(const void __iomem *addr)
|
||||
{
|
||||
return readq(addr);
|
||||
}
|
||||
u64 ioread64_hi_lo(void __iomem *addr)
|
||||
u64 ioread64_hi_lo(const void __iomem *addr)
|
||||
{
|
||||
return readq(addr);
|
||||
}
|
||||
u64 ioread64be(void __iomem *addr)
|
||||
u64 ioread64be(const void __iomem *addr)
|
||||
{
|
||||
return readq_be(addr);
|
||||
}
|
||||
u64 ioread64be_lo_hi(void __iomem *addr)
|
||||
u64 ioread64be_lo_hi(const void __iomem *addr)
|
||||
{
|
||||
return readq_be(addr);
|
||||
}
|
||||
u64 ioread64be_hi_lo(void __iomem *addr)
|
||||
u64 ioread64be_hi_lo(const void __iomem *addr)
|
||||
{
|
||||
return readq_be(addr);
|
||||
}
|
||||
@ -139,15 +139,15 @@ EXPORT_SYMBOL(iowrite64be_hi_lo);
|
||||
* FIXME! We could make these do EEH handling if we really
|
||||
* wanted. Not clear if we do.
|
||||
*/
|
||||
void ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
|
||||
void ioread8_rep(const void __iomem *addr, void *dst, unsigned long count)
|
||||
{
|
||||
readsb(addr, dst, count);
|
||||
}
|
||||
void ioread16_rep(void __iomem *addr, void *dst, unsigned long count)
|
||||
void ioread16_rep(const void __iomem *addr, void *dst, unsigned long count)
|
||||
{
|
||||
readsw(addr, dst, count);
|
||||
}
|
||||
void ioread32_rep(void __iomem *addr, void *dst, unsigned long count)
|
||||
void ioread32_rep(const void __iomem *addr, void *dst, unsigned long count)
|
||||
{
|
||||
readsl(addr, dst, count);
|
||||
}
|
||||
|
@ -197,7 +197,7 @@
|
||||
146 common writev sys_writev compat_sys_writev
|
||||
147 common getsid sys_getsid
|
||||
148 common fdatasync sys_fdatasync
|
||||
149 nospu _sysctl sys_sysctl compat_sys_sysctl
|
||||
149 nospu _sysctl sys_ni_syscall
|
||||
150 common mlock sys_mlock
|
||||
151 common munlock sys_munlock
|
||||
152 common mlockall sys_mlockall
|
||||
|
@ -138,7 +138,7 @@
|
||||
146 common writev sys_writev compat_sys_writev
|
||||
147 common getsid sys_getsid sys_getsid
|
||||
148 common fdatasync sys_fdatasync sys_fdatasync
|
||||
149 common _sysctl sys_sysctl compat_sys_sysctl
|
||||
149 common _sysctl - -
|
||||
150 common mlock sys_mlock sys_mlock
|
||||
151 common munlock sys_munlock sys_munlock
|
||||
152 common mlockall sys_mlockall sys_mlockall
|
||||
|
@ -1,7 +1,6 @@
|
||||
CONFIG_SYSVIPC=y
|
||||
CONFIG_BSD_PROCESS_ACCT=y
|
||||
CONFIG_LOG_BUF_SHIFT=14
|
||||
# CONFIG_SYSCTL_SYSCALL is not set
|
||||
CONFIG_SLAB=y
|
||||
CONFIG_PROFILING=y
|
||||
CONFIG_MODULES=y
|
||||
|
@ -5,7 +5,6 @@ CONFIG_LOG_BUF_SHIFT=14
|
||||
CONFIG_NAMESPACES=y
|
||||
CONFIG_UTS_NS=y
|
||||
CONFIG_IPC_NS=y
|
||||
# CONFIG_SYSCTL_SYSCALL is not set
|
||||
CONFIG_SLAB=y
|
||||
CONFIG_PROFILING=y
|
||||
CONFIG_OPROFILE=y
|
||||
|
@ -3,7 +3,6 @@ CONFIG_IKCONFIG=y
|
||||
CONFIG_IKCONFIG_PROC=y
|
||||
CONFIG_LOG_BUF_SHIFT=14
|
||||
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
|
||||
# CONFIG_SYSCTL_SYSCALL is not set
|
||||
CONFIG_SLAB=y
|
||||
# CONFIG_BLK_DEV_BSG is not set
|
||||
CONFIG_CPU_SUBTYPE_SH7709=y
|
||||
|
@ -1,6 +1,5 @@
|
||||
CONFIG_SYSVIPC=y
|
||||
CONFIG_LOG_BUF_SHIFT=14
|
||||
# CONFIG_SYSCTL_SYSCALL is not set
|
||||
CONFIG_KALLSYMS_EXTRA_PASS=y
|
||||
CONFIG_SLAB=y
|
||||
CONFIG_MODULES=y
|
||||
|
@ -1,6 +1,5 @@
|
||||
CONFIG_SYSVIPC=y
|
||||
CONFIG_LOG_BUF_SHIFT=14
|
||||
# CONFIG_SYSCTL_SYSCALL is not set
|
||||
CONFIG_KALLSYMS_EXTRA_PASS=y
|
||||
CONFIG_SLAB=y
|
||||
CONFIG_MODULES=y
|
||||
|
@ -2,7 +2,6 @@ CONFIG_BSD_PROCESS_ACCT=y
|
||||
CONFIG_LOG_BUF_SHIFT=14
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
|
||||
# CONFIG_SYSCTL_SYSCALL is not set
|
||||
CONFIG_SLAB=y
|
||||
# CONFIG_BLK_DEV_BSG is not set
|
||||
CONFIG_CPU_SUBTYPE_SH4_202=y
|
||||
|
@ -4,7 +4,6 @@ CONFIG_IKCONFIG_PROC=y
|
||||
CONFIG_LOG_BUF_SHIFT=14
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
|
||||
# CONFIG_SYSCTL_SYSCALL is not set
|
||||
CONFIG_SLAB=y
|
||||
CONFIG_PROFILING=y
|
||||
CONFIG_OPROFILE=y
|
||||
|
@ -3,7 +3,6 @@ CONFIG_BSD_PROCESS_ACCT=y
|
||||
CONFIG_IKCONFIG=y
|
||||
CONFIG_IKCONFIG_PROC=y
|
||||
CONFIG_LOG_BUF_SHIFT=14
|
||||
# CONFIG_SYSCTL_SYSCALL is not set
|
||||
# CONFIG_FUTEX is not set
|
||||
# CONFIG_EPOLL is not set
|
||||
CONFIG_SLAB=y
|
||||
|
@ -7,7 +7,6 @@ CONFIG_RCU_TRACE=y
|
||||
CONFIG_IKCONFIG=y
|
||||
CONFIG_IKCONFIG_PROC=y
|
||||
CONFIG_LOG_BUF_SHIFT=14
|
||||
# CONFIG_SYSCTL_SYSCALL is not set
|
||||
CONFIG_SLAB=y
|
||||
CONFIG_PROFILING=y
|
||||
CONFIG_OPROFILE=y
|
||||
|
@ -1,7 +1,6 @@
|
||||
CONFIG_SYSVIPC=y
|
||||
CONFIG_LOG_BUF_SHIFT=14
|
||||
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
|
||||
# CONFIG_SYSCTL_SYSCALL is not set
|
||||
CONFIG_SLAB=y
|
||||
CONFIG_PROFILING=y
|
||||
CONFIG_OPROFILE=y
|
||||
|
@ -1,7 +1,6 @@
|
||||
CONFIG_SYSVIPC=y
|
||||
CONFIG_LOG_BUF_SHIFT=14
|
||||
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
|
||||
# CONFIG_SYSCTL_SYSCALL is not set
|
||||
CONFIG_SLAB=y
|
||||
CONFIG_PROFILING=y
|
||||
CONFIG_OPROFILE=y
|
||||
|
@ -18,7 +18,6 @@ CONFIG_USER_NS=y
|
||||
CONFIG_PID_NS=y
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
# CONFIG_UID16 is not set
|
||||
# CONFIG_SYSCTL_SYSCALL is not set
|
||||
CONFIG_KALLSYMS_ALL=y
|
||||
# CONFIG_ELF_CORE is not set
|
||||
# CONFIG_COMPAT_BRK is not set
|
||||
|
@ -2,7 +2,6 @@
|
||||
CONFIG_SYSVIPC=y
|
||||
CONFIG_POSIX_MQUEUE=y
|
||||
CONFIG_LOG_BUF_SHIFT=14
|
||||
# CONFIG_SYSCTL_SYSCALL is not set
|
||||
# CONFIG_FUTEX is not set
|
||||
# CONFIG_EPOLL is not set
|
||||
# CONFIG_SHMEM is not set
|
||||
|
@ -1,7 +1,6 @@
|
||||
# CONFIG_LOCALVERSION_AUTO is not set
|
||||
CONFIG_LOG_BUF_SHIFT=14
|
||||
# CONFIG_UID16 is not set
|
||||
# CONFIG_SYSCTL_SYSCALL is not set
|
||||
# CONFIG_KALLSYMS is not set
|
||||
# CONFIG_HOTPLUG is not set
|
||||
# CONFIG_ELF_CORE is not set
|
||||
|
@ -2,7 +2,6 @@
|
||||
CONFIG_LOG_BUF_SHIFT=14
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
|
||||
# CONFIG_SYSCTL_SYSCALL is not set
|
||||
# CONFIG_KALLSYMS is not set
|
||||
# CONFIG_HOTPLUG is not set
|
||||
CONFIG_SLAB=y
|
||||
|
@ -5,7 +5,6 @@ CONFIG_IKCONFIG=y
|
||||
CONFIG_IKCONFIG_PROC=y
|
||||
CONFIG_LOG_BUF_SHIFT=14
|
||||
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
|
||||
# CONFIG_SYSCTL_SYSCALL is not set
|
||||
# CONFIG_HOTPLUG is not set
|
||||
CONFIG_SLAB=y
|
||||
CONFIG_MODULES=y
|
||||
|
@ -3,7 +3,6 @@ CONFIG_BSD_PROCESS_ACCT=y
|
||||
CONFIG_LOG_BUF_SHIFT=14
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
|
||||
# CONFIG_SYSCTL_SYSCALL is not set
|
||||
# CONFIG_HOTPLUG is not set
|
||||
CONFIG_SLAB=y
|
||||
CONFIG_MODULES=y
|
||||
|
@ -1,7 +1,6 @@
|
||||
# CONFIG_SWAP is not set
|
||||
CONFIG_LOG_BUF_SHIFT=14
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
# CONFIG_SYSCTL_SYSCALL is not set
|
||||
# CONFIG_HOTPLUG is not set
|
||||
CONFIG_SLAB=y
|
||||
# CONFIG_BLK_DEV_BSG is not set
|
||||
|
@ -3,7 +3,6 @@ CONFIG_POSIX_MQUEUE=y
|
||||
CONFIG_BSD_PROCESS_ACCT=y
|
||||
CONFIG_LOG_BUF_SHIFT=14
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
# CONFIG_SYSCTL_SYSCALL is not set
|
||||
CONFIG_SLAB=y
|
||||
CONFIG_PROFILING=y
|
||||
CONFIG_OPROFILE=m
|
||||
|
@ -2,7 +2,6 @@
|
||||
CONFIG_SYSVIPC=y
|
||||
CONFIG_POSIX_MQUEUE=y
|
||||
CONFIG_LOG_BUF_SHIFT=14
|
||||
# CONFIG_SYSCTL_SYSCALL is not set
|
||||
# CONFIG_FUTEX is not set
|
||||
# CONFIG_EPOLL is not set
|
||||
# CONFIG_SHMEM is not set
|
||||
|
@ -8,7 +8,6 @@ CONFIG_TASK_XACCT=y
|
||||
CONFIG_TASK_IO_ACCOUNTING=y
|
||||
CONFIG_LOG_BUF_SHIFT=14
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
# CONFIG_SYSCTL_SYSCALL is not set
|
||||
CONFIG_KALLSYMS_ALL=y
|
||||
CONFIG_SLAB=y
|
||||
CONFIG_MODULES=y
|
||||
|
@ -5,7 +5,6 @@ CONFIG_LOG_BUF_SHIFT=14
|
||||
CONFIG_NAMESPACES=y
|
||||
CONFIG_UTS_NS=y
|
||||
CONFIG_IPC_NS=y
|
||||
# CONFIG_SYSCTL_SYSCALL is not set
|
||||
CONFIG_SLAB=y
|
||||
CONFIG_PROFILING=y
|
||||
CONFIG_OPROFILE=y
|
||||
|
@ -1,7 +1,6 @@
|
||||
# CONFIG_SWAP is not set
|
||||
CONFIG_LOG_BUF_SHIFT=14
|
||||
# CONFIG_UID16 is not set
|
||||
# CONFIG_SYSCTL_SYSCALL is not set
|
||||
# CONFIG_KALLSYMS is not set
|
||||
# CONFIG_HOTPLUG is not set
|
||||
# CONFIG_BUG is not set
|
||||
|
@ -6,7 +6,6 @@ CONFIG_IKCONFIG_PROC=y
|
||||
CONFIG_LOG_BUF_SHIFT=16
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
|
||||
# CONFIG_SYSCTL_SYSCALL is not set
|
||||
CONFIG_SLAB=y
|
||||
CONFIG_MODULES=y
|
||||
CONFIG_MODULE_UNLOAD=y
|
||||
|
@ -28,32 +28,6 @@ static inline char *strcpy(char *__dest, const char *__src)
|
||||
return __xdest;
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_STRNCPY
|
||||
static inline char *strncpy(char *__dest, const char *__src, size_t __n)
|
||||
{
|
||||
register char *__xdest = __dest;
|
||||
unsigned long __dummy;
|
||||
|
||||
if (__n == 0)
|
||||
return __xdest;
|
||||
|
||||
__asm__ __volatile__(
|
||||
"1:\n"
|
||||
"mov.b @%1+, %2\n\t"
|
||||
"mov.b %2, @%0\n\t"
|
||||
"cmp/eq #0, %2\n\t"
|
||||
"bt/s 2f\n\t"
|
||||
" cmp/eq %5,%1\n\t"
|
||||
"bf/s 1b\n\t"
|
||||
" add #1, %0\n"
|
||||
"2:"
|
||||
: "=r" (__dest), "=r" (__src), "=&z" (__dummy)
|
||||
: "0" (__dest), "1" (__src), "r" (__src+__n)
|
||||
: "memory", "t");
|
||||
|
||||
return __xdest;
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_STRCMP
|
||||
static inline int strcmp(const char *__cs, const char *__ct)
|
||||
{
|
||||
|
@ -8,31 +8,31 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
unsigned int ioread8(void __iomem *addr)
|
||||
unsigned int ioread8(const void __iomem *addr)
|
||||
{
|
||||
return readb(addr);
|
||||
}
|
||||
EXPORT_SYMBOL(ioread8);
|
||||
|
||||
unsigned int ioread16(void __iomem *addr)
|
||||
unsigned int ioread16(const void __iomem *addr)
|
||||
{
|
||||
return readw(addr);
|
||||
}
|
||||
EXPORT_SYMBOL(ioread16);
|
||||
|
||||
unsigned int ioread16be(void __iomem *addr)
|
||||
unsigned int ioread16be(const void __iomem *addr)
|
||||
{
|
||||
return be16_to_cpu(__raw_readw(addr));
|
||||
}
|
||||
EXPORT_SYMBOL(ioread16be);
|
||||
|
||||
unsigned int ioread32(void __iomem *addr)
|
||||
unsigned int ioread32(const void __iomem *addr)
|
||||
{
|
||||
return readl(addr);
|
||||
}
|
||||
EXPORT_SYMBOL(ioread32);
|
||||
|
||||
unsigned int ioread32be(void __iomem *addr)
|
||||
unsigned int ioread32be(const void __iomem *addr)
|
||||
{
|
||||
return be32_to_cpu(__raw_readl(addr));
|
||||
}
|
||||
@ -74,7 +74,7 @@ EXPORT_SYMBOL(iowrite32be);
|
||||
* convert to CPU byte order. We write in "IO byte
|
||||
* order" (we also don't have IO barriers).
|
||||
*/
|
||||
static inline void mmio_insb(void __iomem *addr, u8 *dst, int count)
|
||||
static inline void mmio_insb(const void __iomem *addr, u8 *dst, int count)
|
||||
{
|
||||
while (--count >= 0) {
|
||||
u8 data = __raw_readb(addr);
|
||||
@ -83,7 +83,7 @@ static inline void mmio_insb(void __iomem *addr, u8 *dst, int count)
|
||||
}
|
||||
}
|
||||
|
||||
static inline void mmio_insw(void __iomem *addr, u16 *dst, int count)
|
||||
static inline void mmio_insw(const void __iomem *addr, u16 *dst, int count)
|
||||
{
|
||||
while (--count >= 0) {
|
||||
u16 data = __raw_readw(addr);
|
||||
@ -92,7 +92,7 @@ static inline void mmio_insw(void __iomem *addr, u16 *dst, int count)
|
||||
}
|
||||
}
|
||||
|
||||
static inline void mmio_insl(void __iomem *addr, u32 *dst, int count)
|
||||
static inline void mmio_insl(const void __iomem *addr, u32 *dst, int count)
|
||||
{
|
||||
while (--count >= 0) {
|
||||
u32 data = __raw_readl(addr);
|
||||
@ -125,19 +125,19 @@ static inline void mmio_outsl(void __iomem *addr, const u32 *src, int count)
|
||||
}
|
||||
}
|
||||
|
||||
void ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
|
||||
void ioread8_rep(const void __iomem *addr, void *dst, unsigned long count)
|
||||
{
|
||||
mmio_insb(addr, dst, count);
|
||||
}
|
||||
EXPORT_SYMBOL(ioread8_rep);
|
||||
|
||||
void ioread16_rep(void __iomem *addr, void *dst, unsigned long count)
|
||||
void ioread16_rep(const void __iomem *addr, void *dst, unsigned long count)
|
||||
{
|
||||
mmio_insw(addr, dst, count);
|
||||
}
|
||||
EXPORT_SYMBOL(ioread16_rep);
|
||||
|
||||
void ioread32_rep(void __iomem *addr, void *dst, unsigned long count)
|
||||
void ioread32_rep(const void __iomem *addr, void *dst, unsigned long count)
|
||||
{
|
||||
mmio_insl(addr, dst, count);
|
||||
}
|
||||
|
@ -156,7 +156,7 @@
|
||||
146 common writev sys_writev
|
||||
147 common getsid sys_getsid
|
||||
148 common fdatasync sys_fdatasync
|
||||
149 common _sysctl sys_sysctl
|
||||
149 common _sysctl sys_ni_syscall
|
||||
150 common mlock sys_mlock
|
||||
151 common munlock sys_munlock
|
||||
152 common mlockall sys_mlockall
|
||||
|
@ -300,7 +300,7 @@
|
||||
249 64 nanosleep sys_nanosleep
|
||||
250 32 mremap sys_mremap
|
||||
250 64 mremap sys_64_mremap
|
||||
251 common _sysctl sys_sysctl compat_sys_sysctl
|
||||
251 common _sysctl sys_ni_syscall
|
||||
252 common getsid sys_getsid
|
||||
253 common fdatasync sys_fdatasync
|
||||
254 32 nfsservctl sys_ni_syscall sys_nis_syscall
|
||||
|
@ -160,7 +160,7 @@
|
||||
146 i386 writev sys_writev compat_sys_writev
|
||||
147 i386 getsid sys_getsid
|
||||
148 i386 fdatasync sys_fdatasync
|
||||
149 i386 _sysctl sys_sysctl compat_sys_sysctl
|
||||
149 i386 _sysctl sys_ni_syscall
|
||||
150 i386 mlock sys_mlock
|
||||
151 i386 munlock sys_munlock
|
||||
152 i386 mlockall sys_mlockall
|
||||
|
@ -164,7 +164,7 @@
|
||||
153 common vhangup sys_vhangup
|
||||
154 common modify_ldt sys_modify_ldt
|
||||
155 common pivot_root sys_pivot_root
|
||||
156 64 _sysctl sys_sysctl
|
||||
156 64 _sysctl sys_ni_syscall
|
||||
157 common prctl sys_prctl
|
||||
158 common arch_prctl sys_arch_prctl
|
||||
159 common adjtimex sys_adjtimex
|
||||
|
@ -222,7 +222,7 @@
|
||||
204 common quotactl sys_quotactl
|
||||
# 205 was old nfsservctl
|
||||
205 common nfsservctl sys_ni_syscall
|
||||
206 common _sysctl sys_sysctl
|
||||
206 common _sysctl sys_ni_syscall
|
||||
207 common bdflush sys_bdflush
|
||||
208 common uname sys_newuname
|
||||
209 common sysinfo sys_sysinfo
|
||||
|
@ -679,7 +679,7 @@ pdc_receive(struct pdc_state *pdcs)
|
||||
|
||||
/* read last_rx_curr from register once */
|
||||
pdcs->last_rx_curr =
|
||||
(ioread32(&pdcs->rxregs_64->status0) &
|
||||
(ioread32((const void __iomem *)&pdcs->rxregs_64->status0) &
|
||||
CRYPTO_D64_RS0_CD_MASK) / RING_ENTRY_SIZE;
|
||||
|
||||
do {
|
||||
|
@ -150,17 +150,17 @@ void rtl8180_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data);
|
||||
void rtl8180_set_anaparam(struct rtl8180_priv *priv, u32 anaparam);
|
||||
void rtl8180_set_anaparam2(struct rtl8180_priv *priv, u32 anaparam2);
|
||||
|
||||
static inline u8 rtl818x_ioread8(struct rtl8180_priv *priv, u8 __iomem *addr)
|
||||
static inline u8 rtl818x_ioread8(struct rtl8180_priv *priv, const u8 __iomem *addr)
|
||||
{
|
||||
return ioread8(addr);
|
||||
}
|
||||
|
||||
static inline u16 rtl818x_ioread16(struct rtl8180_priv *priv, __le16 __iomem *addr)
|
||||
static inline u16 rtl818x_ioread16(struct rtl8180_priv *priv, const __le16 __iomem *addr)
|
||||
{
|
||||
return ioread16(addr);
|
||||
}
|
||||
|
||||
static inline u32 rtl818x_ioread32(struct rtl8180_priv *priv, __le32 __iomem *addr)
|
||||
static inline u32 rtl818x_ioread32(struct rtl8180_priv *priv, const __le32 __iomem *addr)
|
||||
{
|
||||
return ioread32(addr);
|
||||
}
|
||||
|
@ -1205,7 +1205,7 @@ int intel_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx, int sidx,
|
||||
ndev->peer_reg->spad);
|
||||
}
|
||||
|
||||
static u64 xeon_db_ioread(void __iomem *mmio)
|
||||
static u64 xeon_db_ioread(const void __iomem *mmio)
|
||||
{
|
||||
return (u64)ioread16(mmio);
|
||||
}
|
||||
|
@ -91,7 +91,7 @@
|
||||
#define GEN3_DB_TOTAL_SHIFT 33
|
||||
#define GEN3_SPAD_COUNT 16
|
||||
|
||||
static inline u64 gen3_db_ioread(void __iomem *mmio)
|
||||
static inline u64 gen3_db_ioread(const void __iomem *mmio)
|
||||
{
|
||||
return ioread64(mmio);
|
||||
}
|
||||
|
@ -103,7 +103,7 @@ struct intel_ntb_dev;
|
||||
struct intel_ntb_reg {
|
||||
int (*poll_link)(struct intel_ntb_dev *ndev);
|
||||
int (*link_is_up)(struct intel_ntb_dev *ndev);
|
||||
u64 (*db_ioread)(void __iomem *mmio);
|
||||
u64 (*db_ioread)(const void __iomem *mmio);
|
||||
void (*db_iowrite)(u64 db_bits, void __iomem *mmio);
|
||||
unsigned long ntb_ctl;
|
||||
resource_size_t db_size;
|
||||
|
@ -1490,10 +1490,8 @@ static int btt_rw_page(struct block_device *bdev, sector_t sector,
|
||||
{
|
||||
struct btt *btt = bdev->bd_disk->private_data;
|
||||
int rc;
|
||||
unsigned int len;
|
||||
|
||||
len = hpage_nr_pages(page) * PAGE_SIZE;
|
||||
rc = btt_do_bvec(btt, NULL, page, len, 0, op, sector);
|
||||
rc = btt_do_bvec(btt, NULL, page, thp_size(page), 0, op, sector);
|
||||
if (rc == 0)
|
||||
page_endio(page, op_is_write(op), 0);
|
||||
|
||||
|
@ -238,11 +238,9 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
|
||||
blk_status_t rc;
|
||||
|
||||
if (op_is_write(op))
|
||||
rc = pmem_do_write(pmem, page, 0, sector,
|
||||
hpage_nr_pages(page) * PAGE_SIZE);
|
||||
rc = pmem_do_write(pmem, page, 0, sector, thp_size(page));
|
||||
else
|
||||
rc = pmem_do_read(pmem, page, 0, sector,
|
||||
hpage_nr_pages(page) * PAGE_SIZE);
|
||||
rc = pmem_do_read(pmem, page, 0, sector, thp_size(page));
|
||||
/*
|
||||
* The ->rw_page interface is subtle and tricky. The core
|
||||
* retries on any error, so we can only invoke page_endio() in
|
||||
|
@ -36,21 +36,6 @@ static void sh_clk_write(int value, struct clk *clk)
|
||||
iowrite32(value, clk->mapped_reg);
|
||||
}
|
||||
|
||||
static unsigned int r8(const void __iomem *addr)
|
||||
{
|
||||
return ioread8(addr);
|
||||
}
|
||||
|
||||
static unsigned int r16(const void __iomem *addr)
|
||||
{
|
||||
return ioread16(addr);
|
||||
}
|
||||
|
||||
static unsigned int r32(const void __iomem *addr)
|
||||
{
|
||||
return ioread32(addr);
|
||||
}
|
||||
|
||||
static int sh_clk_mstp_enable(struct clk *clk)
|
||||
{
|
||||
sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk);
|
||||
@ -61,11 +46,11 @@ static int sh_clk_mstp_enable(struct clk *clk)
|
||||
(phys_addr_t)clk->enable_reg + clk->mapped_reg;
|
||||
|
||||
if (clk->flags & CLK_ENABLE_REG_8BIT)
|
||||
read = r8;
|
||||
read = ioread8;
|
||||
else if (clk->flags & CLK_ENABLE_REG_16BIT)
|
||||
read = r16;
|
||||
read = ioread16;
|
||||
else
|
||||
read = r32;
|
||||
read = ioread32;
|
||||
|
||||
for (i = 1000;
|
||||
(read(mapped_status) & (1 << clk->enable_bit)) && i;
|
||||
|
@ -27,16 +27,16 @@
|
||||
* method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses
|
||||
* for 16-bit fields and 8-bit accesses for 8-bit fields.
|
||||
*/
|
||||
static inline u8 vp_ioread8(u8 __iomem *addr)
|
||||
static inline u8 vp_ioread8(const u8 __iomem *addr)
|
||||
{
|
||||
return ioread8(addr);
|
||||
}
|
||||
static inline u16 vp_ioread16 (__le16 __iomem *addr)
|
||||
static inline u16 vp_ioread16 (const __le16 __iomem *addr)
|
||||
{
|
||||
return ioread16(addr);
|
||||
}
|
||||
|
||||
static inline u32 vp_ioread32(__le32 __iomem *addr)
|
||||
static inline u32 vp_ioread32(const __le32 __iomem *addr)
|
||||
{
|
||||
return ioread32(addr);
|
||||
}
|
||||
|
@ -20,7 +20,7 @@
|
||||
* another mount. This situation arises when starting automount(8)
|
||||
* or other user space daemon which uses direct mounts or offset
|
||||
* mounts (used for autofs lazy mount/umount of nested mount trees),
|
||||
* which have been left busy at at service shutdown.
|
||||
* which have been left busy at service shutdown.
|
||||
*/
|
||||
|
||||
typedef int (*ioctl_fn)(struct file *, struct autofs_sb_info *,
|
||||
@ -496,7 +496,7 @@ static int autofs_dev_ioctl_askumount(struct file *fp,
|
||||
* located path is the root of a mount we return 1 along with
|
||||
* the super magic of the mount or 0 otherwise.
|
||||
*
|
||||
* In both cases the the device number (as returned by
|
||||
* In both cases the device number (as returned by
|
||||
* new_encode_dev()) is also returned.
|
||||
*/
|
||||
static int autofs_dev_ioctl_ismountpoint(struct file *fp,
|
||||
|
@ -2849,8 +2849,10 @@ static int may_open(const struct path *path, int acc_mode, int flag)
|
||||
case S_IFLNK:
|
||||
return -ELOOP;
|
||||
case S_IFDIR:
|
||||
if (acc_mode & (MAY_WRITE | MAY_EXEC))
|
||||
if (acc_mode & MAY_WRITE)
|
||||
return -EISDIR;
|
||||
if (acc_mode & MAY_EXEC)
|
||||
return -EACCES;
|
||||
break;
|
||||
case S_IFBLK:
|
||||
case S_IFCHR:
|
||||
|
@ -26,14 +26,14 @@
|
||||
* in the low address range. Architectures for which this is not
|
||||
* true can't use this generic implementation.
|
||||
*/
|
||||
extern unsigned int ioread8(void __iomem *);
|
||||
extern unsigned int ioread16(void __iomem *);
|
||||
extern unsigned int ioread16be(void __iomem *);
|
||||
extern unsigned int ioread32(void __iomem *);
|
||||
extern unsigned int ioread32be(void __iomem *);
|
||||
extern unsigned int ioread8(const void __iomem *);
|
||||
extern unsigned int ioread16(const void __iomem *);
|
||||
extern unsigned int ioread16be(const void __iomem *);
|
||||
extern unsigned int ioread32(const void __iomem *);
|
||||
extern unsigned int ioread32be(const void __iomem *);
|
||||
#ifdef CONFIG_64BIT
|
||||
extern u64 ioread64(void __iomem *);
|
||||
extern u64 ioread64be(void __iomem *);
|
||||
extern u64 ioread64(const void __iomem *);
|
||||
extern u64 ioread64be(const void __iomem *);
|
||||
#endif
|
||||
|
||||
#ifdef readq
|
||||
@ -41,10 +41,10 @@ extern u64 ioread64be(void __iomem *);
|
||||
#define ioread64_hi_lo ioread64_hi_lo
|
||||
#define ioread64be_lo_hi ioread64be_lo_hi
|
||||
#define ioread64be_hi_lo ioread64be_hi_lo
|
||||
extern u64 ioread64_lo_hi(void __iomem *addr);
|
||||
extern u64 ioread64_hi_lo(void __iomem *addr);
|
||||
extern u64 ioread64be_lo_hi(void __iomem *addr);
|
||||
extern u64 ioread64be_hi_lo(void __iomem *addr);
|
||||
extern u64 ioread64_lo_hi(const void __iomem *addr);
|
||||
extern u64 ioread64_hi_lo(const void __iomem *addr);
|
||||
extern u64 ioread64be_lo_hi(const void __iomem *addr);
|
||||
extern u64 ioread64be_hi_lo(const void __iomem *addr);
|
||||
#endif
|
||||
|
||||
extern void iowrite8(u8, void __iomem *);
|
||||
@ -79,9 +79,9 @@ extern void iowrite64be_hi_lo(u64 val, void __iomem *addr);
|
||||
* memory across multiple ports, use "memcpy_toio()"
|
||||
* and friends.
|
||||
*/
|
||||
extern void ioread8_rep(void __iomem *port, void *buf, unsigned long count);
|
||||
extern void ioread16_rep(void __iomem *port, void *buf, unsigned long count);
|
||||
extern void ioread32_rep(void __iomem *port, void *buf, unsigned long count);
|
||||
extern void ioread8_rep(const void __iomem *port, void *buf, unsigned long count);
|
||||
extern void ioread16_rep(const void __iomem *port, void *buf, unsigned long count);
|
||||
extern void ioread32_rep(const void __iomem *port, void *buf, unsigned long count);
|
||||
|
||||
extern void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count);
|
||||
extern void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count);
|
||||
|
@ -147,7 +147,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
|
||||
|
||||
#if CONFIG_PGTABLE_LEVELS > 3
|
||||
|
||||
#ifndef __HAVE_ARCH_PUD_FREE
|
||||
#ifndef __HAVE_ARCH_PUD_ALLOC_ONE
|
||||
/**
|
||||
* pud_alloc_one - allocate a page for PUD-level page table
|
||||
* @mm: the mm_struct of the current context
|
||||
|
@ -394,6 +394,7 @@
|
||||
*/
|
||||
#ifndef RO_AFTER_INIT_DATA
|
||||
#define RO_AFTER_INIT_DATA \
|
||||
. = ALIGN(8); \
|
||||
__start_ro_after_init = .; \
|
||||
*(.data..ro_after_init) \
|
||||
JUMP_TABLE_DATA \
|
||||
|
@ -851,7 +851,6 @@ asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp,
|
||||
asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32);
|
||||
asmlinkage long compat_sys_recv(int fd, void __user *buf, compat_size_t len,
|
||||
unsigned flags);
|
||||
asmlinkage long compat_sys_sysctl(struct compat_sysctl_args __user *args);
|
||||
|
||||
/* obsolete: fs/readdir.c */
|
||||
asmlinkage long compat_sys_old_readdir(unsigned int fd,
|
||||
|
@ -258,9 +258,36 @@ static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
|
||||
else
|
||||
return NULL;
|
||||
}
|
||||
static inline int hpage_nr_pages(struct page *page)
|
||||
|
||||
/**
|
||||
* thp_head - Head page of a transparent huge page.
|
||||
* @page: Any page (tail, head or regular) found in the page cache.
|
||||
*/
|
||||
static inline struct page *thp_head(struct page *page)
|
||||
{
|
||||
if (unlikely(PageTransHuge(page)))
|
||||
return compound_head(page);
|
||||
}
|
||||
|
||||
/**
|
||||
* thp_order - Order of a transparent huge page.
|
||||
* @page: Head page of a transparent huge page.
|
||||
*/
|
||||
static inline unsigned int thp_order(struct page *page)
|
||||
{
|
||||
VM_BUG_ON_PGFLAGS(PageTail(page), page);
|
||||
if (PageHead(page))
|
||||
return HPAGE_PMD_ORDER;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* thp_nr_pages - The number of regular pages in this huge page.
|
||||
* @page: The head page of a huge page.
|
||||
*/
|
||||
static inline int thp_nr_pages(struct page *page)
|
||||
{
|
||||
VM_BUG_ON_PGFLAGS(PageTail(page), page);
|
||||
if (PageHead(page))
|
||||
return HPAGE_PMD_NR;
|
||||
return 1;
|
||||
}
|
||||
@ -317,9 +344,21 @@ static inline struct list_head *page_deferred_list(struct page *page)
|
||||
#define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
|
||||
#define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
|
||||
|
||||
static inline int hpage_nr_pages(struct page *page)
|
||||
static inline struct page *thp_head(struct page *page)
|
||||
{
|
||||
VM_BUG_ON_PAGE(PageTail(page), page);
|
||||
VM_BUG_ON_PGFLAGS(PageTail(page), page);
|
||||
return page;
|
||||
}
|
||||
|
||||
static inline unsigned int thp_order(struct page *page)
|
||||
{
|
||||
VM_BUG_ON_PGFLAGS(PageTail(page), page);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int thp_nr_pages(struct page *page)
|
||||
{
|
||||
VM_BUG_ON_PGFLAGS(PageTail(page), page);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -443,4 +482,15 @@ static inline bool thp_migration_supported(void)
|
||||
}
|
||||
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
||||
|
||||
/**
|
||||
* thp_size - Size of a transparent huge page.
|
||||
* @page: Head page of a transparent huge page.
|
||||
*
|
||||
* Return: Number of bytes in this page.
|
||||
*/
|
||||
static inline unsigned long thp_size(struct page *page)
|
||||
{
|
||||
return PAGE_SIZE << thp_order(page);
|
||||
}
|
||||
|
||||
#endif /* _LINUX_HUGE_MM_H */
|
||||
|
@ -57,7 +57,7 @@ static inline void hi_lo_writeq_relaxed(__u64 val, volatile void __iomem *addr)
|
||||
|
||||
#ifndef ioread64_hi_lo
|
||||
#define ioread64_hi_lo ioread64_hi_lo
|
||||
static inline u64 ioread64_hi_lo(void __iomem *addr)
|
||||
static inline u64 ioread64_hi_lo(const void __iomem *addr)
|
||||
{
|
||||
u32 low, high;
|
||||
|
||||
@ -79,7 +79,7 @@ static inline void iowrite64_hi_lo(u64 val, void __iomem *addr)
|
||||
|
||||
#ifndef ioread64be_hi_lo
|
||||
#define ioread64be_hi_lo ioread64be_hi_lo
|
||||
static inline u64 ioread64be_hi_lo(void __iomem *addr)
|
||||
static inline u64 ioread64be_hi_lo(const void __iomem *addr)
|
||||
{
|
||||
u32 low, high;
|
||||
|
||||
|
@ -57,7 +57,7 @@ static inline void lo_hi_writeq_relaxed(__u64 val, volatile void __iomem *addr)
|
||||
|
||||
#ifndef ioread64_lo_hi
|
||||
#define ioread64_lo_hi ioread64_lo_hi
|
||||
static inline u64 ioread64_lo_hi(void __iomem *addr)
|
||||
static inline u64 ioread64_lo_hi(const void __iomem *addr)
|
||||
{
|
||||
u32 low, high;
|
||||
|
||||
@ -79,7 +79,7 @@ static inline void iowrite64_lo_hi(u64 val, void __iomem *addr)
|
||||
|
||||
#ifndef ioread64be_lo_hi
|
||||
#define ioread64be_lo_hi ioread64be_lo_hi
|
||||
static inline u64 ioread64be_lo_hi(void __iomem *addr)
|
||||
static inline u64 ioread64be_lo_hi(const void __iomem *addr)
|
||||
{
|
||||
u32 low, high;
|
||||
|
||||
|
@ -630,7 +630,7 @@ unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
|
||||
struct mem_cgroup_per_node *mz;
|
||||
|
||||
mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
|
||||
return mz->lru_zone_size[zone_idx][lru];
|
||||
return READ_ONCE(mz->lru_zone_size[zone_idx][lru]);
|
||||
}
|
||||
|
||||
void mem_cgroup_handle_over_high(void);
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include <linux/resource.h>
|
||||
#include <linux/page_ext.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/page-flags.h>
|
||||
#include <linux/page_ref.h>
|
||||
#include <linux/memremap.h>
|
||||
#include <linux/overflow.h>
|
||||
@ -668,11 +669,6 @@ int vma_is_stack_for_current(struct vm_area_struct *vma);
|
||||
struct mmu_gather;
|
||||
struct inode;
|
||||
|
||||
/*
|
||||
* FIXME: take this include out, include page-flags.h in
|
||||
* files which need it (119 of them)
|
||||
*/
|
||||
#include <linux/page-flags.h>
|
||||
#include <linux/huge_mm.h>
|
||||
|
||||
/*
|
||||
@ -922,12 +918,15 @@ static inline int compound_pincount(struct page *page)
|
||||
static inline void set_compound_order(struct page *page, unsigned int order)
|
||||
{
|
||||
page[1].compound_order = order;
|
||||
page[1].compound_nr = 1U << order;
|
||||
}
|
||||
|
||||
/* Returns the number of pages in this potentially compound page. */
|
||||
static inline unsigned long compound_nr(struct page *page)
|
||||
{
|
||||
return 1UL << compound_order(page);
|
||||
if (!PageHead(page))
|
||||
return 1;
|
||||
return page[1].compound_nr;
|
||||
}
|
||||
|
||||
/* Returns the number of bytes in this potentially compound page. */
|
||||
@ -1068,6 +1067,7 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
|
||||
|
||||
static inline enum zone_type page_zonenum(const struct page *page)
|
||||
{
|
||||
ASSERT_EXCLUSIVE_BITS(page->flags, ZONES_MASK << ZONES_PGSHIFT);
|
||||
return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
|
||||
}
|
||||
|
||||
@ -1595,6 +1595,7 @@ static inline void clear_page_pfmemalloc(struct page *page)
|
||||
extern void pagefault_out_of_memory(void);
|
||||
|
||||
#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
|
||||
#define offset_in_thp(page, p) ((unsigned long)(p) & (thp_size(page) - 1))
|
||||
|
||||
/*
|
||||
* Flags passed to show_mem() and show_free_areas() to suppress output in
|
||||
|
@ -48,14 +48,14 @@ static __always_inline void update_lru_size(struct lruvec *lruvec,
|
||||
static __always_inline void add_page_to_lru_list(struct page *page,
|
||||
struct lruvec *lruvec, enum lru_list lru)
|
||||
{
|
||||
update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
|
||||
update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
|
||||
list_add(&page->lru, &lruvec->lists[lru]);
|
||||
}
|
||||
|
||||
static __always_inline void add_page_to_lru_list_tail(struct page *page,
|
||||
struct lruvec *lruvec, enum lru_list lru)
|
||||
{
|
||||
update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
|
||||
update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page));
|
||||
list_add_tail(&page->lru, &lruvec->lists[lru]);
|
||||
}
|
||||
|
||||
@ -63,7 +63,7 @@ static __always_inline void del_page_from_lru_list(struct page *page,
|
||||
struct lruvec *lruvec, enum lru_list lru)
|
||||
{
|
||||
list_del(&page->lru);
|
||||
update_lru_size(lruvec, lru, page_zonenum(page), -hpage_nr_pages(page));
|
||||
update_lru_size(lruvec, lru, page_zonenum(page), -thp_nr_pages(page));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -134,6 +134,7 @@ struct page {
|
||||
unsigned char compound_dtor;
|
||||
unsigned char compound_order;
|
||||
atomic_t compound_mapcount;
|
||||
unsigned int compound_nr; /* 1 << compound_order */
|
||||
};
|
||||
struct { /* Second tail page of compound page */
|
||||
unsigned long _compound_pad_1; /* compound_head */
|
||||
|
@ -381,7 +381,7 @@ static inline struct page *find_subpage(struct page *head, pgoff_t index)
|
||||
if (PageHuge(head))
|
||||
return head;
|
||||
|
||||
return head + (index & (hpage_nr_pages(head) - 1));
|
||||
return head + (index & (thp_nr_pages(head) - 1));
|
||||
}
|
||||
|
||||
struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
|
||||
@ -773,7 +773,7 @@ static inline struct page *readahead_page(struct readahead_control *rac)
|
||||
|
||||
page = xa_load(&rac->mapping->i_pages, rac->_index);
|
||||
VM_BUG_ON_PAGE(!PageLocked(page), page);
|
||||
rac->_batch_count = hpage_nr_pages(page);
|
||||
rac->_batch_count = thp_nr_pages(page);
|
||||
|
||||
return page;
|
||||
}
|
||||
@ -796,7 +796,7 @@ static inline unsigned int __readahead_batch(struct readahead_control *rac,
|
||||
VM_BUG_ON_PAGE(!PageLocked(page), page);
|
||||
VM_BUG_ON_PAGE(PageTail(page), page);
|
||||
array[i++] = page;
|
||||
rac->_batch_count += hpage_nr_pages(page);
|
||||
rac->_batch_count += thp_nr_pages(page);
|
||||
|
||||
/*
|
||||
* The page cache isn't using multi-index entries yet,
|
||||
|
@ -47,7 +47,6 @@ struct stat64;
|
||||
struct statfs;
|
||||
struct statfs64;
|
||||
struct statx;
|
||||
struct __sysctl_args;
|
||||
struct sysinfo;
|
||||
struct timespec;
|
||||
struct __kernel_old_timeval;
|
||||
@ -1117,7 +1116,6 @@ asmlinkage long sys_send(int, void __user *, size_t, unsigned);
|
||||
asmlinkage long sys_bdflush(int func, long data);
|
||||
asmlinkage long sys_oldumount(char __user *name);
|
||||
asmlinkage long sys_uselib(const char __user *library);
|
||||
asmlinkage long sys_sysctl(struct __sysctl_args __user *args);
|
||||
asmlinkage long sys_sysfs(int option,
|
||||
unsigned long arg1, unsigned long arg2);
|
||||
asmlinkage long sys_fork(void);
|
||||
|
@ -74,15 +74,13 @@ int proc_do_static_key(struct ctl_table *table, int write, void *buffer,
|
||||
* sysctl names can be mirrored automatically under /proc/sys. The
|
||||
* procname supplied controls /proc naming.
|
||||
*
|
||||
* The table's mode will be honoured both for sys_sysctl(2) and
|
||||
* proc-fs access.
|
||||
* The table's mode will be honoured for proc-fs access.
|
||||
*
|
||||
* Leaf nodes in the sysctl tree will be represented by a single file
|
||||
* under /proc; non-leaf nodes will be represented by directories. A
|
||||
* null procname disables /proc mirroring at this node.
|
||||
*
|
||||
* sysctl(2) can automatically manage read and write requests through
|
||||
* the sysctl table. The data and maxlen fields of the ctl_table
|
||||
* The data and maxlen fields of the ctl_table
|
||||
* struct enable minimal validation of the values being written to be
|
||||
* performed, and the mode field allows minimal authentication.
|
||||
*
|
||||
|
@ -5,7 +5,7 @@
|
||||
|
||||
obj-y = fork.o exec_domain.o panic.o \
|
||||
cpu.o exit.o softirq.o resource.o \
|
||||
sysctl.o sysctl_binary.o capability.o ptrace.o user.o \
|
||||
sysctl.o capability.o ptrace.o user.o \
|
||||
signal.o sys.o umh.o workqueue.o pid.o task_work.o \
|
||||
extable.o params.o \
|
||||
kthread.o sys_ni.o nsproxy.o \
|
||||
|
@ -364,7 +364,6 @@ COND_SYSCALL(socketcall);
|
||||
COND_SYSCALL_COMPAT(socketcall);
|
||||
|
||||
/* compat syscalls for arm64, x86, ... */
|
||||
COND_SYSCALL_COMPAT(sysctl);
|
||||
COND_SYSCALL_COMPAT(fanotify_mark);
|
||||
|
||||
/* x86 */
|
||||
|
@ -1,171 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/stat.h>
|
||||
#include <linux/sysctl.h>
|
||||
#include "../fs/xfs/xfs_sysctl.h"
|
||||
#include <linux/sunrpc/debug.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/namei.h>
|
||||
#include <linux/mount.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/nsproxy.h>
|
||||
#include <linux/pid_namespace.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/uuid.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/compat.h>
|
||||
|
||||
static ssize_t binary_sysctl(const int *name, int nlen,
|
||||
void __user *oldval, size_t oldlen, void __user *newval, size_t newlen)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static void deprecated_sysctl_warning(const int *name, int nlen)
|
||||
{
|
||||
int i;
|
||||
|
||||
/*
|
||||
* CTL_KERN/KERN_VERSION is used by older glibc and cannot
|
||||
* ever go away.
|
||||
*/
|
||||
if (nlen >= 2 && name[0] == CTL_KERN && name[1] == KERN_VERSION)
|
||||
return;
|
||||
|
||||
if (printk_ratelimit()) {
|
||||
printk(KERN_INFO
|
||||
"warning: process `%s' used the deprecated sysctl "
|
||||
"system call with ", current->comm);
|
||||
for (i = 0; i < nlen; i++)
|
||||
printk(KERN_CONT "%d.", name[i]);
|
||||
printk(KERN_CONT "\n");
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
#define WARN_ONCE_HASH_BITS 8
|
||||
#define WARN_ONCE_HASH_SIZE (1<<WARN_ONCE_HASH_BITS)
|
||||
|
||||
static DECLARE_BITMAP(warn_once_bitmap, WARN_ONCE_HASH_SIZE);
|
||||
|
||||
#define FNV32_OFFSET 2166136261U
|
||||
#define FNV32_PRIME 0x01000193
|
||||
|
||||
/*
|
||||
* Print each legacy sysctl (approximately) only once.
|
||||
* To avoid making the tables non-const use a external
|
||||
* hash-table instead.
|
||||
* Worst case hash collision: 6, but very rarely.
|
||||
* NOTE! We don't use the SMP-safe bit tests. We simply
|
||||
* don't care enough.
|
||||
*/
|
||||
static void warn_on_bintable(const int *name, int nlen)
|
||||
{
|
||||
int i;
|
||||
u32 hash = FNV32_OFFSET;
|
||||
|
||||
for (i = 0; i < nlen; i++)
|
||||
hash = (hash ^ name[i]) * FNV32_PRIME;
|
||||
hash %= WARN_ONCE_HASH_SIZE;
|
||||
if (__test_and_set_bit(hash, warn_once_bitmap))
|
||||
return;
|
||||
deprecated_sysctl_warning(name, nlen);
|
||||
}
|
||||
|
||||
static ssize_t do_sysctl(int __user *args_name, int nlen,
|
||||
void __user *oldval, size_t oldlen, void __user *newval, size_t newlen)
|
||||
{
|
||||
int name[CTL_MAXNAME];
|
||||
int i;
|
||||
|
||||
/* Check args->nlen. */
|
||||
if (nlen < 0 || nlen > CTL_MAXNAME)
|
||||
return -ENOTDIR;
|
||||
/* Read in the sysctl name for simplicity */
|
||||
for (i = 0; i < nlen; i++)
|
||||
if (get_user(name[i], args_name + i))
|
||||
return -EFAULT;
|
||||
|
||||
warn_on_bintable(name, nlen);
|
||||
|
||||
return binary_sysctl(name, nlen, oldval, oldlen, newval, newlen);
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE1(sysctl, struct __sysctl_args __user *, args)
|
||||
{
|
||||
struct __sysctl_args tmp;
|
||||
size_t oldlen = 0;
|
||||
ssize_t result;
|
||||
|
||||
if (copy_from_user(&tmp, args, sizeof(tmp)))
|
||||
return -EFAULT;
|
||||
|
||||
if (tmp.oldval && !tmp.oldlenp)
|
||||
return -EFAULT;
|
||||
|
||||
if (tmp.oldlenp && get_user(oldlen, tmp.oldlenp))
|
||||
return -EFAULT;
|
||||
|
||||
result = do_sysctl(tmp.name, tmp.nlen, tmp.oldval, oldlen,
|
||||
tmp.newval, tmp.newlen);
|
||||
|
||||
if (result >= 0) {
|
||||
oldlen = result;
|
||||
result = 0;
|
||||
}
|
||||
|
||||
if (tmp.oldlenp && put_user(oldlen, tmp.oldlenp))
|
||||
return -EFAULT;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
||||
struct compat_sysctl_args {
|
||||
compat_uptr_t name;
|
||||
int nlen;
|
||||
compat_uptr_t oldval;
|
||||
compat_uptr_t oldlenp;
|
||||
compat_uptr_t newval;
|
||||
compat_size_t newlen;
|
||||
compat_ulong_t __unused[4];
|
||||
};
|
||||
|
||||
COMPAT_SYSCALL_DEFINE1(sysctl, struct compat_sysctl_args __user *, args)
|
||||
{
|
||||
struct compat_sysctl_args tmp;
|
||||
compat_size_t __user *compat_oldlenp;
|
||||
size_t oldlen = 0;
|
||||
ssize_t result;
|
||||
|
||||
if (copy_from_user(&tmp, args, sizeof(tmp)))
|
||||
return -EFAULT;
|
||||
|
||||
if (tmp.oldval && !tmp.oldlenp)
|
||||
return -EFAULT;
|
||||
|
||||
compat_oldlenp = compat_ptr(tmp.oldlenp);
|
||||
if (compat_oldlenp && get_user(oldlen, compat_oldlenp))
|
||||
return -EFAULT;
|
||||
|
||||
result = do_sysctl(compat_ptr(tmp.name), tmp.nlen,
|
||||
compat_ptr(tmp.oldval), oldlen,
|
||||
compat_ptr(tmp.newval), tmp.newlen);
|
||||
|
||||
if (result >= 0) {
|
||||
oldlen = result;
|
||||
result = 0;
|
||||
}
|
||||
|
||||
if (compat_oldlenp && put_user(oldlen, compat_oldlenp))
|
||||
return -EFAULT;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_COMPAT */
|
30
lib/iomap.c
30
lib/iomap.c
@ -70,27 +70,27 @@ static void bad_io_access(unsigned long port, const char *access)
|
||||
#define mmio_read64be(addr) swab64(readq(addr))
|
||||
#endif
|
||||
|
||||
unsigned int ioread8(void __iomem *addr)
|
||||
unsigned int ioread8(const void __iomem *addr)
|
||||
{
|
||||
IO_COND(addr, return inb(port), return readb(addr));
|
||||
return 0xff;
|
||||
}
|
||||
unsigned int ioread16(void __iomem *addr)
|
||||
unsigned int ioread16(const void __iomem *addr)
|
||||
{
|
||||
IO_COND(addr, return inw(port), return readw(addr));
|
||||
return 0xffff;
|
||||
}
|
||||
unsigned int ioread16be(void __iomem *addr)
|
||||
unsigned int ioread16be(const void __iomem *addr)
|
||||
{
|
||||
IO_COND(addr, return pio_read16be(port), return mmio_read16be(addr));
|
||||
return 0xffff;
|
||||
}
|
||||
unsigned int ioread32(void __iomem *addr)
|
||||
unsigned int ioread32(const void __iomem *addr)
|
||||
{
|
||||
IO_COND(addr, return inl(port), return readl(addr));
|
||||
return 0xffffffff;
|
||||
}
|
||||
unsigned int ioread32be(void __iomem *addr)
|
||||
unsigned int ioread32be(const void __iomem *addr)
|
||||
{
|
||||
IO_COND(addr, return pio_read32be(port), return mmio_read32be(addr));
|
||||
return 0xffffffff;
|
||||
@ -142,26 +142,26 @@ static u64 pio_read64be_hi_lo(unsigned long port)
|
||||
return lo | (hi << 32);
|
||||
}
|
||||
|
||||
u64 ioread64_lo_hi(void __iomem *addr)
|
||||
u64 ioread64_lo_hi(const void __iomem *addr)
|
||||
{
|
||||
IO_COND(addr, return pio_read64_lo_hi(port), return readq(addr));
|
||||
return 0xffffffffffffffffULL;
|
||||
}
|
||||
|
||||
u64 ioread64_hi_lo(void __iomem *addr)
|
||||
u64 ioread64_hi_lo(const void __iomem *addr)
|
||||
{
|
||||
IO_COND(addr, return pio_read64_hi_lo(port), return readq(addr));
|
||||
return 0xffffffffffffffffULL;
|
||||
}
|
||||
|
||||
u64 ioread64be_lo_hi(void __iomem *addr)
|
||||
u64 ioread64be_lo_hi(const void __iomem *addr)
|
||||
{
|
||||
IO_COND(addr, return pio_read64be_lo_hi(port),
|
||||
return mmio_read64be(addr));
|
||||
return 0xffffffffffffffffULL;
|
||||
}
|
||||
|
||||
u64 ioread64be_hi_lo(void __iomem *addr)
|
||||
u64 ioread64be_hi_lo(const void __iomem *addr)
|
||||
{
|
||||
IO_COND(addr, return pio_read64be_hi_lo(port),
|
||||
return mmio_read64be(addr));
|
||||
@ -275,7 +275,7 @@ EXPORT_SYMBOL(iowrite64be_hi_lo);
|
||||
* order" (we also don't have IO barriers).
|
||||
*/
|
||||
#ifndef mmio_insb
|
||||
static inline void mmio_insb(void __iomem *addr, u8 *dst, int count)
|
||||
static inline void mmio_insb(const void __iomem *addr, u8 *dst, int count)
|
||||
{
|
||||
while (--count >= 0) {
|
||||
u8 data = __raw_readb(addr);
|
||||
@ -283,7 +283,7 @@ static inline void mmio_insb(void __iomem *addr, u8 *dst, int count)
|
||||
dst++;
|
||||
}
|
||||
}
|
||||
static inline void mmio_insw(void __iomem *addr, u16 *dst, int count)
|
||||
static inline void mmio_insw(const void __iomem *addr, u16 *dst, int count)
|
||||
{
|
||||
while (--count >= 0) {
|
||||
u16 data = __raw_readw(addr);
|
||||
@ -291,7 +291,7 @@ static inline void mmio_insw(void __iomem *addr, u16 *dst, int count)
|
||||
dst++;
|
||||
}
|
||||
}
|
||||
static inline void mmio_insl(void __iomem *addr, u32 *dst, int count)
|
||||
static inline void mmio_insl(const void __iomem *addr, u32 *dst, int count)
|
||||
{
|
||||
while (--count >= 0) {
|
||||
u32 data = __raw_readl(addr);
|
||||
@ -325,15 +325,15 @@ static inline void mmio_outsl(void __iomem *addr, const u32 *src, int count)
|
||||
}
|
||||
#endif
|
||||
|
||||
void ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
|
||||
void ioread8_rep(const void __iomem *addr, void *dst, unsigned long count)
|
||||
{
|
||||
IO_COND(addr, insb(port,dst,count), mmio_insb(addr, dst, count));
|
||||
}
|
||||
void ioread16_rep(void __iomem *addr, void *dst, unsigned long count)
|
||||
void ioread16_rep(const void __iomem *addr, void *dst, unsigned long count)
|
||||
{
|
||||
IO_COND(addr, insw(port,dst,count), mmio_insw(addr, dst, count));
|
||||
}
|
||||
void ioread32_rep(void __iomem *addr, void *dst, unsigned long count)
|
||||
void ioread32_rep(const void __iomem *addr, void *dst, unsigned long count)
|
||||
{
|
||||
IO_COND(addr, insl(port,dst,count), mmio_insl(addr, dst, count));
|
||||
}
|
||||
|
@ -446,7 +446,7 @@ _last_literals:
|
||||
*op++ = (BYTE)(lastRun << ML_BITS);
|
||||
}
|
||||
|
||||
memcpy(op, anchor, lastRun);
|
||||
LZ4_memcpy(op, anchor, lastRun);
|
||||
|
||||
op += lastRun;
|
||||
}
|
||||
@ -708,7 +708,7 @@ _last_literals:
|
||||
} else {
|
||||
*op++ = (BYTE)(lastRunSize<<ML_BITS);
|
||||
}
|
||||
memcpy(op, anchor, lastRunSize);
|
||||
LZ4_memcpy(op, anchor, lastRunSize);
|
||||
op += lastRunSize;
|
||||
}
|
||||
|
||||
|
@ -153,7 +153,7 @@ static FORCE_INLINE int LZ4_decompress_generic(
|
||||
&& likely((endOnInput ? ip < shortiend : 1) &
|
||||
(op <= shortoend))) {
|
||||
/* Copy the literals */
|
||||
memcpy(op, ip, endOnInput ? 16 : 8);
|
||||
LZ4_memcpy(op, ip, endOnInput ? 16 : 8);
|
||||
op += length; ip += length;
|
||||
|
||||
/*
|
||||
@ -172,9 +172,9 @@ static FORCE_INLINE int LZ4_decompress_generic(
|
||||
(offset >= 8) &&
|
||||
(dict == withPrefix64k || match >= lowPrefix)) {
|
||||
/* Copy the match. */
|
||||
memcpy(op + 0, match + 0, 8);
|
||||
memcpy(op + 8, match + 8, 8);
|
||||
memcpy(op + 16, match + 16, 2);
|
||||
LZ4_memcpy(op + 0, match + 0, 8);
|
||||
LZ4_memcpy(op + 8, match + 8, 8);
|
||||
LZ4_memcpy(op + 16, match + 16, 2);
|
||||
op += length + MINMATCH;
|
||||
/* Both stages worked, load the next token. */
|
||||
continue;
|
||||
@ -263,7 +263,7 @@ static FORCE_INLINE int LZ4_decompress_generic(
|
||||
}
|
||||
}
|
||||
|
||||
memcpy(op, ip, length);
|
||||
LZ4_memcpy(op, ip, length);
|
||||
ip += length;
|
||||
op += length;
|
||||
|
||||
@ -350,7 +350,7 @@ _copy_match:
|
||||
size_t const copySize = (size_t)(lowPrefix - match);
|
||||
size_t const restSize = length - copySize;
|
||||
|
||||
memcpy(op, dictEnd - copySize, copySize);
|
||||
LZ4_memcpy(op, dictEnd - copySize, copySize);
|
||||
op += copySize;
|
||||
if (restSize > (size_t)(op - lowPrefix)) {
|
||||
/* overlap copy */
|
||||
@ -360,7 +360,7 @@ _copy_match:
|
||||
while (op < endOfMatch)
|
||||
*op++ = *copyFrom++;
|
||||
} else {
|
||||
memcpy(op, lowPrefix, restSize);
|
||||
LZ4_memcpy(op, lowPrefix, restSize);
|
||||
op += restSize;
|
||||
}
|
||||
}
|
||||
@ -386,7 +386,7 @@ _copy_match:
|
||||
while (op < copyEnd)
|
||||
*op++ = *match++;
|
||||
} else {
|
||||
memcpy(op, match, mlen);
|
||||
LZ4_memcpy(op, match, mlen);
|
||||
}
|
||||
op = copyEnd;
|
||||
if (op == oend)
|
||||
@ -400,7 +400,7 @@ _copy_match:
|
||||
op[2] = match[2];
|
||||
op[3] = match[3];
|
||||
match += inc32table[offset];
|
||||
memcpy(op + 4, match, 4);
|
||||
LZ4_memcpy(op + 4, match, 4);
|
||||
match -= dec64table[offset];
|
||||
} else {
|
||||
LZ4_copy8(op, match);
|
||||
|
@ -137,6 +137,16 @@ static FORCE_INLINE void LZ4_writeLE16(void *memPtr, U16 value)
|
||||
return put_unaligned_le16(value, memPtr);
|
||||
}
|
||||
|
||||
/*
|
||||
* LZ4 relies on memcpy with a constant size being inlined. In freestanding
|
||||
* environments, the compiler can't assume the implementation of memcpy() is
|
||||
* standard compliant, so apply its specialized memcpy() inlining logic. When
|
||||
* possible, use __builtin_memcpy() to tell the compiler to analyze memcpy()
|
||||
* as-if it were standard compliant, so it can inline it in freestanding
|
||||
* environments. This is needed when decompressing the Linux Kernel, for example.
|
||||
*/
|
||||
#define LZ4_memcpy(dst, src, size) __builtin_memcpy(dst, src, size)
|
||||
|
||||
static FORCE_INLINE void LZ4_copy8(void *dst, const void *src)
|
||||
{
|
||||
#if LZ4_ARCH64
|
||||
|
@ -570,7 +570,7 @@ _Search3:
|
||||
*op++ = (BYTE) lastRun;
|
||||
} else
|
||||
*op++ = (BYTE)(lastRun<<ML_BITS);
|
||||
memcpy(op, anchor, iend - anchor);
|
||||
LZ4_memcpy(op, anchor, iend - anchor);
|
||||
op += iend - anchor;
|
||||
}
|
||||
|
||||
|
@ -1009,7 +1009,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
|
||||
del_page_from_lru_list(page, lruvec, page_lru(page));
|
||||
mod_node_page_state(page_pgdat(page),
|
||||
NR_ISOLATED_ANON + page_is_file_lru(page),
|
||||
hpage_nr_pages(page));
|
||||
thp_nr_pages(page));
|
||||
|
||||
isolate_success:
|
||||
list_add(&page->lru, &cc->migratepages);
|
||||
|
22
mm/filemap.c
22
mm/filemap.c
@ -198,7 +198,7 @@ static void unaccount_page_cache_page(struct address_space *mapping,
|
||||
if (PageHuge(page))
|
||||
return;
|
||||
|
||||
nr = hpage_nr_pages(page);
|
||||
nr = thp_nr_pages(page);
|
||||
|
||||
__mod_lruvec_page_state(page, NR_FILE_PAGES, -nr);
|
||||
if (PageSwapBacked(page)) {
|
||||
@ -2468,6 +2468,7 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
|
||||
struct address_space *mapping = file->f_mapping;
|
||||
struct file *fpin = NULL;
|
||||
pgoff_t offset = vmf->pgoff;
|
||||
unsigned int mmap_miss;
|
||||
|
||||
/* If we don't want any read-ahead, don't bother */
|
||||
if (vmf->vma->vm_flags & VM_RAND_READ)
|
||||
@ -2483,14 +2484,15 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
|
||||
}
|
||||
|
||||
/* Avoid banging the cache line if not needed */
|
||||
if (ra->mmap_miss < MMAP_LOTSAMISS * 10)
|
||||
ra->mmap_miss++;
|
||||
mmap_miss = READ_ONCE(ra->mmap_miss);
|
||||
if (mmap_miss < MMAP_LOTSAMISS * 10)
|
||||
WRITE_ONCE(ra->mmap_miss, ++mmap_miss);
|
||||
|
||||
/*
|
||||
* Do we miss much more than hit in this file? If so,
|
||||
* stop bothering with read-ahead. It will only hurt.
|
||||
*/
|
||||
if (ra->mmap_miss > MMAP_LOTSAMISS)
|
||||
if (mmap_miss > MMAP_LOTSAMISS)
|
||||
return fpin;
|
||||
|
||||
/*
|
||||
@ -2516,13 +2518,15 @@ static struct file *do_async_mmap_readahead(struct vm_fault *vmf,
|
||||
struct file_ra_state *ra = &file->f_ra;
|
||||
struct address_space *mapping = file->f_mapping;
|
||||
struct file *fpin = NULL;
|
||||
unsigned int mmap_miss;
|
||||
pgoff_t offset = vmf->pgoff;
|
||||
|
||||
/* If we don't want any read-ahead, don't bother */
|
||||
if (vmf->vma->vm_flags & VM_RAND_READ || !ra->ra_pages)
|
||||
return fpin;
|
||||
if (ra->mmap_miss > 0)
|
||||
ra->mmap_miss--;
|
||||
mmap_miss = READ_ONCE(ra->mmap_miss);
|
||||
if (mmap_miss)
|
||||
WRITE_ONCE(ra->mmap_miss, --mmap_miss);
|
||||
if (PageReadahead(page)) {
|
||||
fpin = maybe_unlock_mmap_for_io(vmf, fpin);
|
||||
page_cache_async_readahead(mapping, ra, file,
|
||||
@ -2688,6 +2692,7 @@ void filemap_map_pages(struct vm_fault *vmf,
|
||||
unsigned long max_idx;
|
||||
XA_STATE(xas, &mapping->i_pages, start_pgoff);
|
||||
struct page *page;
|
||||
unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss);
|
||||
|
||||
rcu_read_lock();
|
||||
xas_for_each(&xas, page, end_pgoff) {
|
||||
@ -2724,8 +2729,8 @@ void filemap_map_pages(struct vm_fault *vmf,
|
||||
if (page->index >= max_idx)
|
||||
goto unlock;
|
||||
|
||||
if (file->f_ra.mmap_miss > 0)
|
||||
file->f_ra.mmap_miss--;
|
||||
if (mmap_miss > 0)
|
||||
mmap_miss--;
|
||||
|
||||
vmf->address += (xas.xa_index - last_pgoff) << PAGE_SHIFT;
|
||||
if (vmf->pte)
|
||||
@ -2745,6 +2750,7 @@ next:
|
||||
break;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss);
|
||||
}
|
||||
EXPORT_SYMBOL(filemap_map_pages);
|
||||
|
||||
|
@ -61,16 +61,16 @@ static u64 frontswap_failed_stores;
|
||||
static u64 frontswap_invalidates;
|
||||
|
||||
static inline void inc_frontswap_loads(void) {
|
||||
frontswap_loads++;
|
||||
data_race(frontswap_loads++);
|
||||
}
|
||||
static inline void inc_frontswap_succ_stores(void) {
|
||||
frontswap_succ_stores++;
|
||||
data_race(frontswap_succ_stores++);
|
||||
}
|
||||
static inline void inc_frontswap_failed_stores(void) {
|
||||
frontswap_failed_stores++;
|
||||
data_race(frontswap_failed_stores++);
|
||||
}
|
||||
static inline void inc_frontswap_invalidates(void) {
|
||||
frontswap_invalidates++;
|
||||
data_race(frontswap_invalidates++);
|
||||
}
|
||||
#else
|
||||
static inline void inc_frontswap_loads(void) { }
|
||||
|
2
mm/gup.c
2
mm/gup.c
@ -1637,7 +1637,7 @@ check_again:
|
||||
mod_node_page_state(page_pgdat(head),
|
||||
NR_ISOLATED_ANON +
|
||||
page_is_file_lru(head),
|
||||
hpage_nr_pages(head));
|
||||
thp_nr_pages(head));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -369,7 +369,7 @@ extern void clear_page_mlock(struct page *page);
|
||||
static inline void mlock_migrate_page(struct page *newpage, struct page *page)
|
||||
{
|
||||
if (TestClearPageMlocked(page)) {
|
||||
int nr_pages = hpage_nr_pages(page);
|
||||
int nr_pages = thp_nr_pages(page);
|
||||
|
||||
/* Holding pmd lock, no change in irq context: __mod is safe */
|
||||
__mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
|
||||
@ -396,7 +396,7 @@ vma_address(struct page *page, struct vm_area_struct *vma)
|
||||
unsigned long start, end;
|
||||
|
||||
start = __vma_address(page, vma);
|
||||
end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1);
|
||||
end = start + thp_size(page) - PAGE_SIZE;
|
||||
|
||||
/* page should be within @vma mapping range */
|
||||
VM_BUG_ON_VMA(end < vma->vm_start || start >= vma->vm_end, vma);
|
||||
|
@ -1169,8 +1169,10 @@ static bool update_checksum(struct kmemleak_object *object)
|
||||
u32 old_csum = object->checksum;
|
||||
|
||||
kasan_disable_current();
|
||||
kcsan_disable_current();
|
||||
object->checksum = crc32(0, (void *)object->pointer, object->size);
|
||||
kasan_enable_current();
|
||||
kcsan_enable_current();
|
||||
|
||||
return object->checksum != old_csum;
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user