mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
s390 updates for 6.9-rc3
- Fix missing NULL pointer check when determining guest/host fault - Mark all functions in asm/atomic_ops.h, asm/atomic.h and asm/preempt.h as __always_inline to avoid unwanted instrumentation - Fix removal of a Processor Activity Instrumentation (PAI) sampling event in PMU device driver - Align system call table on 8 bytes -----BEGIN PGP SIGNATURE----- iI0EABYIADUWIQQrtrZiYVkVzKQcYivNdxKlNrRb8AUCZg/e9xccYWdvcmRlZXZA bGludXguaWJtLmNvbQAKCRDNdxKlNrRb8FiVAP9/WR/s0PcUrRSETyLy89je633U lda5n9paBZA1LLPePQD8CGXkDAaQvtXQuUC0KsvNFW1AdVOK7f/AdFKoAYTJ7gU= =B9io -----END PGP SIGNATURE----- Merge tag 's390-6.9-3' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux Pull s390 fixes from Alexander Gordeev: - Fix missing NULL pointer check when determining guest/host fault - Mark all functions in asm/atomic_ops.h, asm/atomic.h and asm/preempt.h as __always_inline to avoid unwanted instrumentation - Fix removal of a Processor Activity Instrumentation (PAI) sampling event in PMU device driver - Align system call table on 8 bytes * tag 's390-6.9-3' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: s390/entry: align system call table on 8 bytes s390/pai: fix sampling event removal for PMU device driver s390/preempt: mark all functions __always_inline s390/atomic: mark all functions __always_inline s390/mm: fix NULL pointer dereference
This commit is contained in:
commit
50094473ec
@ -15,31 +15,31 @@
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/cmpxchg.h>
|
||||
|
||||
static inline int arch_atomic_read(const atomic_t *v)
|
||||
static __always_inline int arch_atomic_read(const atomic_t *v)
|
||||
{
|
||||
return __atomic_read(v);
|
||||
}
|
||||
#define arch_atomic_read arch_atomic_read
|
||||
|
||||
static inline void arch_atomic_set(atomic_t *v, int i)
|
||||
static __always_inline void arch_atomic_set(atomic_t *v, int i)
|
||||
{
|
||||
__atomic_set(v, i);
|
||||
}
|
||||
#define arch_atomic_set arch_atomic_set
|
||||
|
||||
static inline int arch_atomic_add_return(int i, atomic_t *v)
|
||||
static __always_inline int arch_atomic_add_return(int i, atomic_t *v)
|
||||
{
|
||||
return __atomic_add_barrier(i, &v->counter) + i;
|
||||
}
|
||||
#define arch_atomic_add_return arch_atomic_add_return
|
||||
|
||||
static inline int arch_atomic_fetch_add(int i, atomic_t *v)
|
||||
static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
|
||||
{
|
||||
return __atomic_add_barrier(i, &v->counter);
|
||||
}
|
||||
#define arch_atomic_fetch_add arch_atomic_fetch_add
|
||||
|
||||
static inline void arch_atomic_add(int i, atomic_t *v)
|
||||
static __always_inline void arch_atomic_add(int i, atomic_t *v)
|
||||
{
|
||||
__atomic_add(i, &v->counter);
|
||||
}
|
||||
@ -50,11 +50,11 @@ static inline void arch_atomic_add(int i, atomic_t *v)
|
||||
#define arch_atomic_fetch_sub(_i, _v) arch_atomic_fetch_add(-(int)(_i), _v)
|
||||
|
||||
#define ATOMIC_OPS(op) \
|
||||
static inline void arch_atomic_##op(int i, atomic_t *v) \
|
||||
static __always_inline void arch_atomic_##op(int i, atomic_t *v) \
|
||||
{ \
|
||||
__atomic_##op(i, &v->counter); \
|
||||
} \
|
||||
static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
|
||||
static __always_inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
|
||||
{ \
|
||||
return __atomic_##op##_barrier(i, &v->counter); \
|
||||
}
|
||||
@ -74,7 +74,7 @@ ATOMIC_OPS(xor)
|
||||
|
||||
#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
|
||||
|
||||
static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
|
||||
static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
|
||||
{
|
||||
return __atomic_cmpxchg(&v->counter, old, new);
|
||||
}
|
||||
@ -82,31 +82,31 @@ static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
|
||||
|
||||
#define ATOMIC64_INIT(i) { (i) }
|
||||
|
||||
static inline s64 arch_atomic64_read(const atomic64_t *v)
|
||||
static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
|
||||
{
|
||||
return __atomic64_read(v);
|
||||
}
|
||||
#define arch_atomic64_read arch_atomic64_read
|
||||
|
||||
static inline void arch_atomic64_set(atomic64_t *v, s64 i)
|
||||
static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
|
||||
{
|
||||
__atomic64_set(v, i);
|
||||
}
|
||||
#define arch_atomic64_set arch_atomic64_set
|
||||
|
||||
static inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
|
||||
static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
|
||||
{
|
||||
return __atomic64_add_barrier(i, (long *)&v->counter) + i;
|
||||
}
|
||||
#define arch_atomic64_add_return arch_atomic64_add_return
|
||||
|
||||
static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
|
||||
static __always_inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
|
||||
{
|
||||
return __atomic64_add_barrier(i, (long *)&v->counter);
|
||||
}
|
||||
#define arch_atomic64_fetch_add arch_atomic64_fetch_add
|
||||
|
||||
static inline void arch_atomic64_add(s64 i, atomic64_t *v)
|
||||
static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v)
|
||||
{
|
||||
__atomic64_add(i, (long *)&v->counter);
|
||||
}
|
||||
@ -114,20 +114,20 @@ static inline void arch_atomic64_add(s64 i, atomic64_t *v)
|
||||
|
||||
#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new))
|
||||
|
||||
static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
|
||||
static __always_inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
|
||||
{
|
||||
return __atomic64_cmpxchg((long *)&v->counter, old, new);
|
||||
}
|
||||
#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
|
||||
|
||||
#define ATOMIC64_OPS(op) \
|
||||
static inline void arch_atomic64_##op(s64 i, atomic64_t *v) \
|
||||
{ \
|
||||
__atomic64_##op(i, (long *)&v->counter); \
|
||||
} \
|
||||
static inline long arch_atomic64_fetch_##op(s64 i, atomic64_t *v) \
|
||||
{ \
|
||||
return __atomic64_##op##_barrier(i, (long *)&v->counter); \
|
||||
#define ATOMIC64_OPS(op) \
|
||||
static __always_inline void arch_atomic64_##op(s64 i, atomic64_t *v) \
|
||||
{ \
|
||||
__atomic64_##op(i, (long *)&v->counter); \
|
||||
} \
|
||||
static __always_inline long arch_atomic64_fetch_##op(s64 i, atomic64_t *v) \
|
||||
{ \
|
||||
return __atomic64_##op##_barrier(i, (long *)&v->counter); \
|
||||
}
|
||||
|
||||
ATOMIC64_OPS(and)
|
||||
|
@ -8,7 +8,7 @@
|
||||
#ifndef __ARCH_S390_ATOMIC_OPS__
|
||||
#define __ARCH_S390_ATOMIC_OPS__
|
||||
|
||||
static inline int __atomic_read(const atomic_t *v)
|
||||
static __always_inline int __atomic_read(const atomic_t *v)
|
||||
{
|
||||
int c;
|
||||
|
||||
@ -18,14 +18,14 @@ static inline int __atomic_read(const atomic_t *v)
|
||||
return c;
|
||||
}
|
||||
|
||||
static inline void __atomic_set(atomic_t *v, int i)
|
||||
static __always_inline void __atomic_set(atomic_t *v, int i)
|
||||
{
|
||||
asm volatile(
|
||||
" st %1,%0\n"
|
||||
: "=R" (v->counter) : "d" (i));
|
||||
}
|
||||
|
||||
static inline s64 __atomic64_read(const atomic64_t *v)
|
||||
static __always_inline s64 __atomic64_read(const atomic64_t *v)
|
||||
{
|
||||
s64 c;
|
||||
|
||||
@ -35,7 +35,7 @@ static inline s64 __atomic64_read(const atomic64_t *v)
|
||||
return c;
|
||||
}
|
||||
|
||||
static inline void __atomic64_set(atomic64_t *v, s64 i)
|
||||
static __always_inline void __atomic64_set(atomic64_t *v, s64 i)
|
||||
{
|
||||
asm volatile(
|
||||
" stg %1,%0\n"
|
||||
@ -45,7 +45,7 @@ static inline void __atomic64_set(atomic64_t *v, s64 i)
|
||||
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
|
||||
|
||||
#define __ATOMIC_OP(op_name, op_type, op_string, op_barrier) \
|
||||
static inline op_type op_name(op_type val, op_type *ptr) \
|
||||
static __always_inline op_type op_name(op_type val, op_type *ptr) \
|
||||
{ \
|
||||
op_type old; \
|
||||
\
|
||||
@ -96,7 +96,7 @@ __ATOMIC_CONST_OPS(__atomic64_add_const, long, "agsi")
|
||||
#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
|
||||
|
||||
#define __ATOMIC_OP(op_name, op_string) \
|
||||
static inline int op_name(int val, int *ptr) \
|
||||
static __always_inline int op_name(int val, int *ptr) \
|
||||
{ \
|
||||
int old, new; \
|
||||
\
|
||||
@ -122,7 +122,7 @@ __ATOMIC_OPS(__atomic_xor, "xr")
|
||||
#undef __ATOMIC_OPS
|
||||
|
||||
#define __ATOMIC64_OP(op_name, op_string) \
|
||||
static inline long op_name(long val, long *ptr) \
|
||||
static __always_inline long op_name(long val, long *ptr) \
|
||||
{ \
|
||||
long old, new; \
|
||||
\
|
||||
@ -154,7 +154,7 @@ __ATOMIC64_OPS(__atomic64_xor, "xgr")
|
||||
|
||||
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
|
||||
|
||||
static inline int __atomic_cmpxchg(int *ptr, int old, int new)
|
||||
static __always_inline int __atomic_cmpxchg(int *ptr, int old, int new)
|
||||
{
|
||||
asm volatile(
|
||||
" cs %[old],%[new],%[ptr]"
|
||||
@ -164,7 +164,7 @@ static inline int __atomic_cmpxchg(int *ptr, int old, int new)
|
||||
return old;
|
||||
}
|
||||
|
||||
static inline bool __atomic_cmpxchg_bool(int *ptr, int old, int new)
|
||||
static __always_inline bool __atomic_cmpxchg_bool(int *ptr, int old, int new)
|
||||
{
|
||||
int old_expected = old;
|
||||
|
||||
@ -176,7 +176,7 @@ static inline bool __atomic_cmpxchg_bool(int *ptr, int old, int new)
|
||||
return old == old_expected;
|
||||
}
|
||||
|
||||
static inline long __atomic64_cmpxchg(long *ptr, long old, long new)
|
||||
static __always_inline long __atomic64_cmpxchg(long *ptr, long old, long new)
|
||||
{
|
||||
asm volatile(
|
||||
" csg %[old],%[new],%[ptr]"
|
||||
@ -186,7 +186,7 @@ static inline long __atomic64_cmpxchg(long *ptr, long old, long new)
|
||||
return old;
|
||||
}
|
||||
|
||||
static inline bool __atomic64_cmpxchg_bool(long *ptr, long old, long new)
|
||||
static __always_inline bool __atomic64_cmpxchg_bool(long *ptr, long old, long new)
|
||||
{
|
||||
long old_expected = old;
|
||||
|
||||
|
@ -12,12 +12,12 @@
|
||||
#define PREEMPT_NEED_RESCHED 0x80000000
|
||||
#define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED)
|
||||
|
||||
static inline int preempt_count(void)
|
||||
static __always_inline int preempt_count(void)
|
||||
{
|
||||
return READ_ONCE(S390_lowcore.preempt_count) & ~PREEMPT_NEED_RESCHED;
|
||||
}
|
||||
|
||||
static inline void preempt_count_set(int pc)
|
||||
static __always_inline void preempt_count_set(int pc)
|
||||
{
|
||||
int old, new;
|
||||
|
||||
@ -29,22 +29,22 @@ static inline void preempt_count_set(int pc)
|
||||
old, new) != old);
|
||||
}
|
||||
|
||||
static inline void set_preempt_need_resched(void)
|
||||
static __always_inline void set_preempt_need_resched(void)
|
||||
{
|
||||
__atomic_and(~PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
|
||||
}
|
||||
|
||||
static inline void clear_preempt_need_resched(void)
|
||||
static __always_inline void clear_preempt_need_resched(void)
|
||||
{
|
||||
__atomic_or(PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
|
||||
}
|
||||
|
||||
static inline bool test_preempt_need_resched(void)
|
||||
static __always_inline bool test_preempt_need_resched(void)
|
||||
{
|
||||
return !(READ_ONCE(S390_lowcore.preempt_count) & PREEMPT_NEED_RESCHED);
|
||||
}
|
||||
|
||||
static inline void __preempt_count_add(int val)
|
||||
static __always_inline void __preempt_count_add(int val)
|
||||
{
|
||||
/*
|
||||
* With some obscure config options and CONFIG_PROFILE_ALL_BRANCHES
|
||||
@ -59,17 +59,17 @@ static inline void __preempt_count_add(int val)
|
||||
__atomic_add(val, &S390_lowcore.preempt_count);
|
||||
}
|
||||
|
||||
static inline void __preempt_count_sub(int val)
|
||||
static __always_inline void __preempt_count_sub(int val)
|
||||
{
|
||||
__preempt_count_add(-val);
|
||||
}
|
||||
|
||||
static inline bool __preempt_count_dec_and_test(void)
|
||||
static __always_inline bool __preempt_count_dec_and_test(void)
|
||||
{
|
||||
return __atomic_add(-1, &S390_lowcore.preempt_count) == 1;
|
||||
}
|
||||
|
||||
static inline bool should_resched(int preempt_offset)
|
||||
static __always_inline bool should_resched(int preempt_offset)
|
||||
{
|
||||
return unlikely(READ_ONCE(S390_lowcore.preempt_count) ==
|
||||
preempt_offset);
|
||||
@ -79,45 +79,45 @@ static inline bool should_resched(int preempt_offset)
|
||||
|
||||
#define PREEMPT_ENABLED (0)
|
||||
|
||||
static inline int preempt_count(void)
|
||||
static __always_inline int preempt_count(void)
|
||||
{
|
||||
return READ_ONCE(S390_lowcore.preempt_count);
|
||||
}
|
||||
|
||||
static inline void preempt_count_set(int pc)
|
||||
static __always_inline void preempt_count_set(int pc)
|
||||
{
|
||||
S390_lowcore.preempt_count = pc;
|
||||
}
|
||||
|
||||
static inline void set_preempt_need_resched(void)
|
||||
static __always_inline void set_preempt_need_resched(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void clear_preempt_need_resched(void)
|
||||
static __always_inline void clear_preempt_need_resched(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline bool test_preempt_need_resched(void)
|
||||
static __always_inline bool test_preempt_need_resched(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void __preempt_count_add(int val)
|
||||
static __always_inline void __preempt_count_add(int val)
|
||||
{
|
||||
S390_lowcore.preempt_count += val;
|
||||
}
|
||||
|
||||
static inline void __preempt_count_sub(int val)
|
||||
static __always_inline void __preempt_count_sub(int val)
|
||||
{
|
||||
S390_lowcore.preempt_count -= val;
|
||||
}
|
||||
|
||||
static inline bool __preempt_count_dec_and_test(void)
|
||||
static __always_inline bool __preempt_count_dec_and_test(void)
|
||||
{
|
||||
return !--S390_lowcore.preempt_count && tif_need_resched();
|
||||
}
|
||||
|
||||
static inline bool should_resched(int preempt_offset)
|
||||
static __always_inline bool should_resched(int preempt_offset)
|
||||
{
|
||||
return unlikely(preempt_count() == preempt_offset &&
|
||||
tif_need_resched());
|
||||
|
@ -635,6 +635,7 @@ SYM_DATA_START_LOCAL(daton_psw)
|
||||
SYM_DATA_END(daton_psw)
|
||||
|
||||
.section .rodata, "a"
|
||||
.balign 8
|
||||
#define SYSCALL(esame,emu) .quad __s390x_ ## esame
|
||||
SYM_DATA_START(sys_call_table)
|
||||
#include "asm/syscall_table.h"
|
||||
|
@ -90,7 +90,6 @@ static void paicrypt_event_destroy(struct perf_event *event)
|
||||
event->cpu);
|
||||
struct paicrypt_map *cpump = mp->mapptr;
|
||||
|
||||
cpump->event = NULL;
|
||||
static_branch_dec(&pai_key);
|
||||
mutex_lock(&pai_reserve_mutex);
|
||||
debug_sprintf_event(cfm_dbg, 5, "%s event %#llx cpu %d users %d"
|
||||
@ -356,10 +355,15 @@ static int paicrypt_add(struct perf_event *event, int flags)
|
||||
|
||||
static void paicrypt_stop(struct perf_event *event, int flags)
|
||||
{
|
||||
if (!event->attr.sample_period) /* Counting */
|
||||
struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
|
||||
struct paicrypt_map *cpump = mp->mapptr;
|
||||
|
||||
if (!event->attr.sample_period) { /* Counting */
|
||||
paicrypt_read(event);
|
||||
else /* Sampling */
|
||||
} else { /* Sampling */
|
||||
perf_sched_cb_dec(event->pmu);
|
||||
cpump->event = NULL;
|
||||
}
|
||||
event->hw.state = PERF_HES_STOPPED;
|
||||
}
|
||||
|
||||
|
@ -122,7 +122,6 @@ static void paiext_event_destroy(struct perf_event *event)
|
||||
|
||||
free_page(PAI_SAVE_AREA(event));
|
||||
mutex_lock(&paiext_reserve_mutex);
|
||||
cpump->event = NULL;
|
||||
if (refcount_dec_and_test(&cpump->refcnt)) /* Last reference gone */
|
||||
paiext_free(mp);
|
||||
paiext_root_free();
|
||||
@ -362,10 +361,15 @@ static int paiext_add(struct perf_event *event, int flags)
|
||||
|
||||
static void paiext_stop(struct perf_event *event, int flags)
|
||||
{
|
||||
if (!event->attr.sample_period) /* Counting */
|
||||
struct paiext_mapptr *mp = this_cpu_ptr(paiext_root.mapptr);
|
||||
struct paiext_map *cpump = mp->mapptr;
|
||||
|
||||
if (!event->attr.sample_period) { /* Counting */
|
||||
paiext_read(event);
|
||||
else /* Sampling */
|
||||
} else { /* Sampling */
|
||||
perf_sched_cb_dec(event->pmu);
|
||||
cpump->event = NULL;
|
||||
}
|
||||
event->hw.state = PERF_HES_STOPPED;
|
||||
}
|
||||
|
||||
|
@ -75,7 +75,7 @@ static enum fault_type get_fault_type(struct pt_regs *regs)
|
||||
if (!IS_ENABLED(CONFIG_PGSTE))
|
||||
return KERNEL_FAULT;
|
||||
gmap = (struct gmap *)S390_lowcore.gmap;
|
||||
if (regs->cr1 == gmap->asce)
|
||||
if (gmap && gmap->asce == regs->cr1)
|
||||
return GMAP_FAULT;
|
||||
return KERNEL_FAULT;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user