mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-11-25 19:14:39 +08:00
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Heiko Carstens: "Since Martin is on vacation you get the s390 pull request for the v4.15 merge window this time from me. Besides a lot of cleanups and bug fixes these are the most important changes: - a new regset for runtime instrumentation registers - hardware accelerated AES-GCM support for the aes_s390 module - support for the new CEX6S crypto cards - support for FORTIFY_SOURCE - addition of missing z13 and new z14 instructions to the in-kernel disassembler - generate opcode tables for the in-kernel disassembler out of a simple text file instead of having to manually maintain those tables - fast memset16, memset32 and memset64 implementations - removal of named saved segment support - hardware counter support for z14 - queued spinlocks and queued rwlocks implementations for s390 - use the stack_depth tracking feature for s390 BPF JIT - a new s390_sthyi system call which emulates the sthyi (store hypervisor information) instruction - removal of the old KVM virtio transport - an s390 specific CPU alternatives implementation which is used in the new spinlock code" * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (88 commits) MAINTAINERS: add virtio-ccw.h to virtio/s390 section s390/noexec: execute kexec datamover without DAT s390: fix transactional execution control register handling s390/bpf: take advantage of stack_depth tracking s390: simplify transactional execution elf hwcap handling s390/zcrypt: Rework struct ap_qact_ap_info. s390/virtio: remove unused header file kvm_virtio.h s390: avoid undefined behaviour s390/disassembler: generate opcode tables from text file s390/disassembler: remove insn_to_mnemonic() s390/dasd: avoid calling do_gettimeofday() s390: vfio-ccw: Do not attempt to free no-op, test and tic cda. s390: remove named saved segment support s390/archrandom: Reconsider s390 arch random implementation s390/pci: do not require AIS facility s390/qdio: sanitize put_indicator s390/qdio: use atomic_cmpxchg s390/nmi: avoid using long-displacement facility s390: pass endianness info to sparse s390/decompressor: remove informational messages ...
This commit is contained in:
commit
d60a540ac5
@ -2548,6 +2548,9 @@
|
||||
|
||||
noalign [KNL,ARM]
|
||||
|
||||
noaltinstr [S390] Disables alternative instructions patching
|
||||
(CPU alternatives feature).
|
||||
|
||||
noapic [SMP,APIC] Tells the kernel to not make use of any
|
||||
IOAPICs that may be present in the system.
|
||||
|
||||
|
@ -14335,6 +14335,7 @@ L: virtualization@lists.linux-foundation.org
|
||||
L: kvm@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/s390/virtio/
|
||||
F: arch/s390/include/uapi/asm/virtio-ccw.h
|
||||
|
||||
VIRTIO GPU DRIVER
|
||||
M: David Airlie <airlied@linux.ie>
|
||||
|
@ -68,6 +68,7 @@ config S390
|
||||
select ARCH_BINFMT_ELF_STATE
|
||||
select ARCH_HAS_DEVMEM_IS_ALLOWED
|
||||
select ARCH_HAS_ELF_RANDOMIZE
|
||||
select ARCH_HAS_FORTIFY_SOURCE
|
||||
select ARCH_HAS_GCOV_PROFILE_ALL
|
||||
select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA
|
||||
select ARCH_HAS_KCOV
|
||||
@ -143,7 +144,6 @@ config S390
|
||||
select HAVE_DYNAMIC_FTRACE
|
||||
select HAVE_DYNAMIC_FTRACE_WITH_REGS
|
||||
select HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
select HAVE_EXIT_THREAD
|
||||
select HAVE_FTRACE_MCOUNT_RECORD
|
||||
select HAVE_FUNCTION_GRAPH_TRACER
|
||||
select HAVE_FUNCTION_TRACER
|
||||
@ -538,6 +538,22 @@ config ARCH_RANDOM
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config ALTERNATIVES
|
||||
def_bool y
|
||||
prompt "Patch optimized instructions for running CPU type"
|
||||
help
|
||||
When enabled the kernel code is compiled with additional
|
||||
alternative instructions blocks optimized for newer CPU types.
|
||||
These alternative instructions blocks are patched at kernel boot
|
||||
time when running CPU supports them. This mechanism is used to
|
||||
optimize some critical code paths (i.e. spinlocks) for newer CPUs
|
||||
even if kernel is build to support older machine generations.
|
||||
|
||||
This mechanism could be disabled by appending "noaltinstr"
|
||||
option to the kernel command line.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
endmenu
|
||||
|
||||
menu "Memory setup"
|
||||
@ -809,18 +825,6 @@ config PFAULT
|
||||
Everybody who wants to run Linux under VM != VM4.2 should select
|
||||
this option.
|
||||
|
||||
config SHARED_KERNEL
|
||||
bool "VM shared kernel support"
|
||||
depends on !JUMP_LABEL
|
||||
help
|
||||
Select this option, if you want to share the text segment of the
|
||||
Linux kernel between different VM guests. This reduces memory
|
||||
usage with lots of guests but greatly increases kernel size.
|
||||
Also if a kernel was IPL'ed from a shared segment the kexec system
|
||||
call will not work.
|
||||
You should only select this option if you know what you are
|
||||
doing and want to exploit this feature.
|
||||
|
||||
config CMM
|
||||
def_tristate n
|
||||
prompt "Cooperative memory management"
|
||||
@ -930,17 +934,4 @@ config S390_GUEST
|
||||
Select this option if you want to run the kernel as a guest under
|
||||
the KVM hypervisor.
|
||||
|
||||
config S390_GUEST_OLD_TRANSPORT
|
||||
def_bool y
|
||||
prompt "Guest support for old s390 virtio transport (DEPRECATED)"
|
||||
depends on S390_GUEST
|
||||
help
|
||||
Enable this option to add support for the old s390-virtio
|
||||
transport (i.e. virtio devices NOT based on virtio-ccw). This
|
||||
type of virtio devices is only available on the experimental
|
||||
kuli userspace or with old (< 2.6) qemu. If you are running
|
||||
with a modern version of qemu (which supports virtio-ccw since
|
||||
1.4 and uses it by default since version 2.4), you probably won't
|
||||
need this.
|
||||
|
||||
endmenu
|
||||
|
@ -21,7 +21,7 @@ KBUILD_CFLAGS += -m64
|
||||
KBUILD_AFLAGS += -m64
|
||||
UTS_MACHINE := s390x
|
||||
STACK_SIZE := 16384
|
||||
CHECKFLAGS += -D__s390__ -D__s390x__
|
||||
CHECKFLAGS += -D__s390__ -D__s390x__ -mbig-endian
|
||||
|
||||
export LD_BFD
|
||||
|
||||
@ -133,6 +133,7 @@ archclean:
|
||||
|
||||
archprepare:
|
||||
$(Q)$(MAKE) $(build)=$(tools) include/generated/facilities.h
|
||||
$(Q)$(MAKE) $(build)=$(tools) include/generated/dis.h
|
||||
|
||||
# Don't use tabs in echo arguments
|
||||
define archhelp
|
||||
|
@ -12,7 +12,7 @@ targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
|
||||
targets += misc.o piggy.o sizes.h head.o
|
||||
|
||||
KBUILD_CFLAGS := -m64 -D__KERNEL__ -O2
|
||||
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
|
||||
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING -D__NO_FORTIFY
|
||||
KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks -msoft-float
|
||||
KBUILD_CFLAGS += $(call cc-option,-mpacked-stack)
|
||||
KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
|
||||
|
@ -170,9 +170,7 @@ unsigned long decompress_kernel(void)
|
||||
free_mem_ptr = (unsigned long) &_end;
|
||||
free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
|
||||
|
||||
puts("Uncompressing Linux... ");
|
||||
__decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error);
|
||||
puts("Ok, booting the kernel.\n");
|
||||
return (unsigned long) output;
|
||||
}
|
||||
|
||||
|
@ -69,7 +69,6 @@ CONFIG_KSM=y
|
||||
CONFIG_TRANSPARENT_HUGEPAGE=y
|
||||
CONFIG_CLEANCACHE=y
|
||||
CONFIG_FRONTSWAP=y
|
||||
CONFIG_CMA=y
|
||||
CONFIG_CMA_DEBUG=y
|
||||
CONFIG_CMA_DEBUGFS=y
|
||||
CONFIG_MEM_SOFT_DIRTY=y
|
||||
@ -379,7 +378,6 @@ CONFIG_BLK_DEV_LOOP=m
|
||||
CONFIG_BLK_DEV_CRYPTOLOOP=m
|
||||
CONFIG_BLK_DEV_DRBD=m
|
||||
CONFIG_BLK_DEV_NBD=m
|
||||
CONFIG_BLK_DEV_OSD=m
|
||||
CONFIG_BLK_DEV_RAM=y
|
||||
CONFIG_BLK_DEV_RAM_SIZE=32768
|
||||
CONFIG_BLK_DEV_RAM_DAX=y
|
||||
@ -416,7 +414,6 @@ CONFIG_SCSI_OSD_ULD=m
|
||||
CONFIG_MD=y
|
||||
CONFIG_BLK_DEV_MD=y
|
||||
CONFIG_MD_LINEAR=m
|
||||
CONFIG_MD_RAID0=m
|
||||
CONFIG_MD_MULTIPATH=m
|
||||
CONFIG_MD_FAULTY=m
|
||||
CONFIG_BLK_DEV_DM=m
|
||||
@ -483,6 +480,8 @@ CONFIG_INFINIBAND=m
|
||||
CONFIG_INFINIBAND_USER_ACCESS=m
|
||||
CONFIG_MLX4_INFINIBAND=m
|
||||
CONFIG_MLX5_INFINIBAND=m
|
||||
CONFIG_VFIO=m
|
||||
CONFIG_VFIO_PCI=m
|
||||
CONFIG_VIRTIO_BALLOON=m
|
||||
CONFIG_EXT4_FS=y
|
||||
CONFIG_EXT4_FS_POSIX_ACL=y
|
||||
@ -599,7 +598,6 @@ CONFIG_DETECT_HUNG_TASK=y
|
||||
CONFIG_WQ_WATCHDOG=y
|
||||
CONFIG_PANIC_ON_OOPS=y
|
||||
CONFIG_DEBUG_TIMEKEEPING=y
|
||||
CONFIG_DEBUG_RT_MUTEXES=y
|
||||
CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y
|
||||
CONFIG_PROVE_LOCKING=y
|
||||
CONFIG_LOCK_STAT=y
|
||||
@ -629,10 +627,8 @@ CONFIG_SCHED_TRACER=y
|
||||
CONFIG_FTRACE_SYSCALLS=y
|
||||
CONFIG_STACK_TRACER=y
|
||||
CONFIG_BLK_DEV_IO_TRACE=y
|
||||
CONFIG_UPROBE_EVENTS=y
|
||||
CONFIG_FUNCTION_PROFILER=y
|
||||
CONFIG_HIST_TRIGGERS=y
|
||||
CONFIG_TRACE_ENUM_MAP_FILE=y
|
||||
CONFIG_LKDTM=m
|
||||
CONFIG_TEST_LIST_SORT=y
|
||||
CONFIG_TEST_SORT=y
|
||||
@ -649,6 +645,7 @@ CONFIG_ENCRYPTED_KEYS=m
|
||||
CONFIG_SECURITY=y
|
||||
CONFIG_SECURITY_NETWORK=y
|
||||
CONFIG_HARDENED_USERCOPY=y
|
||||
CONFIG_FORTIFY_SOURCE=y
|
||||
CONFIG_SECURITY_SELINUX=y
|
||||
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
|
||||
CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
|
||||
@ -705,12 +702,12 @@ CONFIG_CRYPTO_USER_API_RNG=m
|
||||
CONFIG_CRYPTO_USER_API_AEAD=m
|
||||
CONFIG_ZCRYPT=m
|
||||
CONFIG_PKEY=m
|
||||
CONFIG_CRYPTO_PAES_S390=m
|
||||
CONFIG_CRYPTO_SHA1_S390=m
|
||||
CONFIG_CRYPTO_SHA256_S390=m
|
||||
CONFIG_CRYPTO_SHA512_S390=m
|
||||
CONFIG_CRYPTO_DES_S390=m
|
||||
CONFIG_CRYPTO_AES_S390=m
|
||||
CONFIG_CRYPTO_PAES_S390=m
|
||||
CONFIG_CRYPTO_GHASH_S390=m
|
||||
CONFIG_CRYPTO_CRC32_S390=y
|
||||
CONFIG_ASYMMETRIC_KEY_TYPE=y
|
||||
|
@ -70,7 +70,6 @@ CONFIG_KSM=y
|
||||
CONFIG_TRANSPARENT_HUGEPAGE=y
|
||||
CONFIG_CLEANCACHE=y
|
||||
CONFIG_FRONTSWAP=y
|
||||
CONFIG_CMA=y
|
||||
CONFIG_MEM_SOFT_DIRTY=y
|
||||
CONFIG_ZSWAP=y
|
||||
CONFIG_ZBUD=m
|
||||
@ -376,7 +375,6 @@ CONFIG_BLK_DEV_LOOP=m
|
||||
CONFIG_BLK_DEV_CRYPTOLOOP=m
|
||||
CONFIG_BLK_DEV_DRBD=m
|
||||
CONFIG_BLK_DEV_NBD=m
|
||||
CONFIG_BLK_DEV_OSD=m
|
||||
CONFIG_BLK_DEV_RAM=y
|
||||
CONFIG_BLK_DEV_RAM_SIZE=32768
|
||||
CONFIG_BLK_DEV_RAM_DAX=y
|
||||
@ -412,7 +410,6 @@ CONFIG_SCSI_OSD_ULD=m
|
||||
CONFIG_MD=y
|
||||
CONFIG_BLK_DEV_MD=y
|
||||
CONFIG_MD_LINEAR=m
|
||||
CONFIG_MD_RAID0=m
|
||||
CONFIG_MD_MULTIPATH=m
|
||||
CONFIG_MD_FAULTY=m
|
||||
CONFIG_BLK_DEV_DM=m
|
||||
@ -479,6 +476,8 @@ CONFIG_INFINIBAND=m
|
||||
CONFIG_INFINIBAND_USER_ACCESS=m
|
||||
CONFIG_MLX4_INFINIBAND=m
|
||||
CONFIG_MLX5_INFINIBAND=m
|
||||
CONFIG_VFIO=m
|
||||
CONFIG_VFIO_PCI=m
|
||||
CONFIG_VIRTIO_BALLOON=m
|
||||
CONFIG_EXT4_FS=y
|
||||
CONFIG_EXT4_FS_POSIX_ACL=y
|
||||
@ -575,10 +574,8 @@ CONFIG_SCHED_TRACER=y
|
||||
CONFIG_FTRACE_SYSCALLS=y
|
||||
CONFIG_STACK_TRACER=y
|
||||
CONFIG_BLK_DEV_IO_TRACE=y
|
||||
CONFIG_UPROBE_EVENTS=y
|
||||
CONFIG_FUNCTION_PROFILER=y
|
||||
CONFIG_HIST_TRIGGERS=y
|
||||
CONFIG_TRACE_ENUM_MAP_FILE=y
|
||||
CONFIG_LKDTM=m
|
||||
CONFIG_PERCPU_TEST=m
|
||||
CONFIG_ATOMIC64_SELFTEST=y
|
||||
@ -650,12 +647,12 @@ CONFIG_CRYPTO_USER_API_RNG=m
|
||||
CONFIG_CRYPTO_USER_API_AEAD=m
|
||||
CONFIG_ZCRYPT=m
|
||||
CONFIG_PKEY=m
|
||||
CONFIG_CRYPTO_PAES_S390=m
|
||||
CONFIG_CRYPTO_SHA1_S390=m
|
||||
CONFIG_CRYPTO_SHA256_S390=m
|
||||
CONFIG_CRYPTO_SHA512_S390=m
|
||||
CONFIG_CRYPTO_DES_S390=m
|
||||
CONFIG_CRYPTO_AES_S390=m
|
||||
CONFIG_CRYPTO_PAES_S390=m
|
||||
CONFIG_CRYPTO_GHASH_S390=m
|
||||
CONFIG_CRYPTO_CRC32_S390=y
|
||||
CONFIG_CRC7=m
|
||||
|
@ -68,7 +68,6 @@ CONFIG_KSM=y
|
||||
CONFIG_TRANSPARENT_HUGEPAGE=y
|
||||
CONFIG_CLEANCACHE=y
|
||||
CONFIG_FRONTSWAP=y
|
||||
CONFIG_CMA=y
|
||||
CONFIG_MEM_SOFT_DIRTY=y
|
||||
CONFIG_ZSWAP=y
|
||||
CONFIG_ZBUD=m
|
||||
@ -374,7 +373,6 @@ CONFIG_BLK_DEV_LOOP=m
|
||||
CONFIG_BLK_DEV_CRYPTOLOOP=m
|
||||
CONFIG_BLK_DEV_DRBD=m
|
||||
CONFIG_BLK_DEV_NBD=m
|
||||
CONFIG_BLK_DEV_OSD=m
|
||||
CONFIG_BLK_DEV_RAM=y
|
||||
CONFIG_BLK_DEV_RAM_SIZE=32768
|
||||
CONFIG_BLK_DEV_RAM_DAX=y
|
||||
@ -410,7 +408,6 @@ CONFIG_SCSI_OSD_ULD=m
|
||||
CONFIG_MD=y
|
||||
CONFIG_BLK_DEV_MD=y
|
||||
CONFIG_MD_LINEAR=m
|
||||
CONFIG_MD_RAID0=m
|
||||
CONFIG_MD_MULTIPATH=m
|
||||
CONFIG_MD_FAULTY=m
|
||||
CONFIG_BLK_DEV_DM=m
|
||||
@ -477,6 +474,8 @@ CONFIG_INFINIBAND=m
|
||||
CONFIG_INFINIBAND_USER_ACCESS=m
|
||||
CONFIG_MLX4_INFINIBAND=m
|
||||
CONFIG_MLX5_INFINIBAND=m
|
||||
CONFIG_VFIO=m
|
||||
CONFIG_VFIO_PCI=m
|
||||
CONFIG_VIRTIO_BALLOON=m
|
||||
CONFIG_EXT4_FS=y
|
||||
CONFIG_EXT4_FS_POSIX_ACL=y
|
||||
@ -573,10 +572,8 @@ CONFIG_SCHED_TRACER=y
|
||||
CONFIG_FTRACE_SYSCALLS=y
|
||||
CONFIG_STACK_TRACER=y
|
||||
CONFIG_BLK_DEV_IO_TRACE=y
|
||||
CONFIG_UPROBE_EVENTS=y
|
||||
CONFIG_FUNCTION_PROFILER=y
|
||||
CONFIG_HIST_TRIGGERS=y
|
||||
CONFIG_TRACE_ENUM_MAP_FILE=y
|
||||
CONFIG_LKDTM=m
|
||||
CONFIG_PERCPU_TEST=m
|
||||
CONFIG_ATOMIC64_SELFTEST=y
|
||||
@ -648,12 +645,12 @@ CONFIG_CRYPTO_USER_API_RNG=m
|
||||
CONFIG_CRYPTO_USER_API_AEAD=m
|
||||
CONFIG_ZCRYPT=m
|
||||
CONFIG_PKEY=m
|
||||
CONFIG_CRYPTO_PAES_S390=m
|
||||
CONFIG_CRYPTO_SHA1_S390=m
|
||||
CONFIG_CRYPTO_SHA256_S390=m
|
||||
CONFIG_CRYPTO_SHA512_S390=m
|
||||
CONFIG_CRYPTO_DES_S390=m
|
||||
CONFIG_CRYPTO_AES_S390=m
|
||||
CONFIG_CRYPTO_PAES_S390=m
|
||||
CONFIG_CRYPTO_GHASH_S390=m
|
||||
CONFIG_CRYPTO_CRC32_S390=y
|
||||
CONFIG_CRC7=m
|
||||
|
@ -4,9 +4,11 @@
|
||||
* s390 implementation of the AES Cipher Algorithm.
|
||||
*
|
||||
* s390 Version:
|
||||
* Copyright IBM Corp. 2005, 2007
|
||||
* Copyright IBM Corp. 2005, 2017
|
||||
* Author(s): Jan Glauber (jang@de.ibm.com)
|
||||
* Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback
|
||||
* Patrick Steuer <patrick.steuer@de.ibm.com>
|
||||
* Harald Freudenberger <freude@de.ibm.com>
|
||||
*
|
||||
* Derived from "crypto/aes_generic.c"
|
||||
*
|
||||
@ -22,20 +24,25 @@
|
||||
|
||||
#include <crypto/aes.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/ghash.h>
|
||||
#include <crypto/internal/aead.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/cpufeature.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/fips.h>
|
||||
#include <linux/string.h>
|
||||
#include <crypto/xts.h>
|
||||
#include <asm/cpacf.h>
|
||||
|
||||
static u8 *ctrblk;
|
||||
static DEFINE_SPINLOCK(ctrblk_lock);
|
||||
|
||||
static cpacf_mask_t km_functions, kmc_functions, kmctr_functions;
|
||||
static cpacf_mask_t km_functions, kmc_functions, kmctr_functions,
|
||||
kma_functions;
|
||||
|
||||
struct s390_aes_ctx {
|
||||
u8 key[AES_MAX_KEY_SIZE];
|
||||
@ -55,6 +62,17 @@ struct s390_xts_ctx {
|
||||
struct crypto_skcipher *fallback;
|
||||
};
|
||||
|
||||
struct gcm_sg_walk {
|
||||
struct scatter_walk walk;
|
||||
unsigned int walk_bytes;
|
||||
u8 *walk_ptr;
|
||||
unsigned int walk_bytes_remain;
|
||||
u8 buf[AES_BLOCK_SIZE];
|
||||
unsigned int buf_bytes;
|
||||
u8 *ptr;
|
||||
unsigned int nbytes;
|
||||
};
|
||||
|
||||
static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
|
||||
unsigned int key_len)
|
||||
{
|
||||
@ -771,6 +789,267 @@ static struct crypto_alg ctr_aes_alg = {
|
||||
}
|
||||
};
|
||||
|
||||
static int gcm_aes_setkey(struct crypto_aead *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
|
||||
switch (keylen) {
|
||||
case AES_KEYSIZE_128:
|
||||
ctx->fc = CPACF_KMA_GCM_AES_128;
|
||||
break;
|
||||
case AES_KEYSIZE_192:
|
||||
ctx->fc = CPACF_KMA_GCM_AES_192;
|
||||
break;
|
||||
case AES_KEYSIZE_256:
|
||||
ctx->fc = CPACF_KMA_GCM_AES_256;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memcpy(ctx->key, key, keylen);
|
||||
ctx->key_len = keylen;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gcm_aes_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
|
||||
{
|
||||
switch (authsize) {
|
||||
case 4:
|
||||
case 8:
|
||||
case 12:
|
||||
case 13:
|
||||
case 14:
|
||||
case 15:
|
||||
case 16:
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void gcm_sg_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg,
|
||||
unsigned int len)
|
||||
{
|
||||
memset(gw, 0, sizeof(*gw));
|
||||
gw->walk_bytes_remain = len;
|
||||
scatterwalk_start(&gw->walk, sg);
|
||||
}
|
||||
|
||||
static int gcm_sg_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
|
||||
{
|
||||
int n;
|
||||
|
||||
/* minbytesneeded <= AES_BLOCK_SIZE */
|
||||
if (gw->buf_bytes && gw->buf_bytes >= minbytesneeded) {
|
||||
gw->ptr = gw->buf;
|
||||
gw->nbytes = gw->buf_bytes;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (gw->walk_bytes_remain == 0) {
|
||||
gw->ptr = NULL;
|
||||
gw->nbytes = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain);
|
||||
if (!gw->walk_bytes) {
|
||||
scatterwalk_start(&gw->walk, sg_next(gw->walk.sg));
|
||||
gw->walk_bytes = scatterwalk_clamp(&gw->walk,
|
||||
gw->walk_bytes_remain);
|
||||
}
|
||||
gw->walk_ptr = scatterwalk_map(&gw->walk);
|
||||
|
||||
if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) {
|
||||
gw->ptr = gw->walk_ptr;
|
||||
gw->nbytes = gw->walk_bytes;
|
||||
goto out;
|
||||
}
|
||||
|
||||
while (1) {
|
||||
n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes);
|
||||
memcpy(gw->buf + gw->buf_bytes, gw->walk_ptr, n);
|
||||
gw->buf_bytes += n;
|
||||
gw->walk_bytes_remain -= n;
|
||||
scatterwalk_unmap(&gw->walk);
|
||||
scatterwalk_advance(&gw->walk, n);
|
||||
scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
|
||||
|
||||
if (gw->buf_bytes >= minbytesneeded) {
|
||||
gw->ptr = gw->buf;
|
||||
gw->nbytes = gw->buf_bytes;
|
||||
goto out;
|
||||
}
|
||||
|
||||
gw->walk_bytes = scatterwalk_clamp(&gw->walk,
|
||||
gw->walk_bytes_remain);
|
||||
if (!gw->walk_bytes) {
|
||||
scatterwalk_start(&gw->walk, sg_next(gw->walk.sg));
|
||||
gw->walk_bytes = scatterwalk_clamp(&gw->walk,
|
||||
gw->walk_bytes_remain);
|
||||
}
|
||||
gw->walk_ptr = scatterwalk_map(&gw->walk);
|
||||
}
|
||||
|
||||
out:
|
||||
return gw->nbytes;
|
||||
}
|
||||
|
||||
static void gcm_sg_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
|
||||
{
|
||||
int n;
|
||||
|
||||
if (gw->ptr == NULL)
|
||||
return;
|
||||
|
||||
if (gw->ptr == gw->buf) {
|
||||
n = gw->buf_bytes - bytesdone;
|
||||
if (n > 0) {
|
||||
memmove(gw->buf, gw->buf + bytesdone, n);
|
||||
gw->buf_bytes -= n;
|
||||
} else
|
||||
gw->buf_bytes = 0;
|
||||
} else {
|
||||
gw->walk_bytes_remain -= bytesdone;
|
||||
scatterwalk_unmap(&gw->walk);
|
||||
scatterwalk_advance(&gw->walk, bytesdone);
|
||||
scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
|
||||
}
|
||||
}
|
||||
|
||||
static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
|
||||
{
|
||||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||
struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
unsigned int ivsize = crypto_aead_ivsize(tfm);
|
||||
unsigned int taglen = crypto_aead_authsize(tfm);
|
||||
unsigned int aadlen = req->assoclen;
|
||||
unsigned int pclen = req->cryptlen;
|
||||
int ret = 0;
|
||||
|
||||
unsigned int len, in_bytes, out_bytes,
|
||||
min_bytes, bytes, aad_bytes, pc_bytes;
|
||||
struct gcm_sg_walk gw_in, gw_out;
|
||||
u8 tag[GHASH_DIGEST_SIZE];
|
||||
|
||||
struct {
|
||||
u32 _[3]; /* reserved */
|
||||
u32 cv; /* Counter Value */
|
||||
u8 t[GHASH_DIGEST_SIZE];/* Tag */
|
||||
u8 h[AES_BLOCK_SIZE]; /* Hash-subkey */
|
||||
u64 taadl; /* Total AAD Length */
|
||||
u64 tpcl; /* Total Plain-/Cipher-text Length */
|
||||
u8 j0[GHASH_BLOCK_SIZE];/* initial counter value */
|
||||
u8 k[AES_MAX_KEY_SIZE]; /* Key */
|
||||
} param;
|
||||
|
||||
/*
|
||||
* encrypt
|
||||
* req->src: aad||plaintext
|
||||
* req->dst: aad||ciphertext||tag
|
||||
* decrypt
|
||||
* req->src: aad||ciphertext||tag
|
||||
* req->dst: aad||plaintext, return 0 or -EBADMSG
|
||||
* aad, plaintext and ciphertext may be empty.
|
||||
*/
|
||||
if (flags & CPACF_DECRYPT)
|
||||
pclen -= taglen;
|
||||
len = aadlen + pclen;
|
||||
|
||||
memset(¶m, 0, sizeof(param));
|
||||
param.cv = 1;
|
||||
param.taadl = aadlen * 8;
|
||||
param.tpcl = pclen * 8;
|
||||
memcpy(param.j0, req->iv, ivsize);
|
||||
*(u32 *)(param.j0 + ivsize) = 1;
|
||||
memcpy(param.k, ctx->key, ctx->key_len);
|
||||
|
||||
gcm_sg_walk_start(&gw_in, req->src, len);
|
||||
gcm_sg_walk_start(&gw_out, req->dst, len);
|
||||
|
||||
do {
|
||||
min_bytes = min_t(unsigned int,
|
||||
aadlen > 0 ? aadlen : pclen, AES_BLOCK_SIZE);
|
||||
in_bytes = gcm_sg_walk_go(&gw_in, min_bytes);
|
||||
out_bytes = gcm_sg_walk_go(&gw_out, min_bytes);
|
||||
bytes = min(in_bytes, out_bytes);
|
||||
|
||||
if (aadlen + pclen <= bytes) {
|
||||
aad_bytes = aadlen;
|
||||
pc_bytes = pclen;
|
||||
flags |= CPACF_KMA_LAAD | CPACF_KMA_LPC;
|
||||
} else {
|
||||
if (aadlen <= bytes) {
|
||||
aad_bytes = aadlen;
|
||||
pc_bytes = (bytes - aadlen) &
|
||||
~(AES_BLOCK_SIZE - 1);
|
||||
flags |= CPACF_KMA_LAAD;
|
||||
} else {
|
||||
aad_bytes = bytes & ~(AES_BLOCK_SIZE - 1);
|
||||
pc_bytes = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (aad_bytes > 0)
|
||||
memcpy(gw_out.ptr, gw_in.ptr, aad_bytes);
|
||||
|
||||
cpacf_kma(ctx->fc | flags, ¶m,
|
||||
gw_out.ptr + aad_bytes,
|
||||
gw_in.ptr + aad_bytes, pc_bytes,
|
||||
gw_in.ptr, aad_bytes);
|
||||
|
||||
gcm_sg_walk_done(&gw_in, aad_bytes + pc_bytes);
|
||||
gcm_sg_walk_done(&gw_out, aad_bytes + pc_bytes);
|
||||
aadlen -= aad_bytes;
|
||||
pclen -= pc_bytes;
|
||||
} while (aadlen + pclen > 0);
|
||||
|
||||
if (flags & CPACF_DECRYPT) {
|
||||
scatterwalk_map_and_copy(tag, req->src, len, taglen, 0);
|
||||
if (crypto_memneq(tag, param.t, taglen))
|
||||
ret = -EBADMSG;
|
||||
} else
|
||||
scatterwalk_map_and_copy(param.t, req->dst, len, taglen, 1);
|
||||
|
||||
memzero_explicit(¶m, sizeof(param));
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int gcm_aes_encrypt(struct aead_request *req)
|
||||
{
|
||||
return gcm_aes_crypt(req, CPACF_ENCRYPT);
|
||||
}
|
||||
|
||||
static int gcm_aes_decrypt(struct aead_request *req)
|
||||
{
|
||||
return gcm_aes_crypt(req, CPACF_DECRYPT);
|
||||
}
|
||||
|
||||
static struct aead_alg gcm_aes_aead = {
|
||||
.setkey = gcm_aes_setkey,
|
||||
.setauthsize = gcm_aes_setauthsize,
|
||||
.encrypt = gcm_aes_encrypt,
|
||||
.decrypt = gcm_aes_decrypt,
|
||||
|
||||
.ivsize = GHASH_BLOCK_SIZE - sizeof(u32),
|
||||
.maxauthsize = GHASH_DIGEST_SIZE,
|
||||
.chunksize = AES_BLOCK_SIZE,
|
||||
|
||||
.base = {
|
||||
.cra_flags = CRYPTO_ALG_TYPE_AEAD,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct s390_aes_ctx),
|
||||
.cra_priority = 900,
|
||||
.cra_name = "gcm(aes)",
|
||||
.cra_driver_name = "gcm-aes-s390",
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
};
|
||||
|
||||
static struct crypto_alg *aes_s390_algs_ptr[5];
|
||||
static int aes_s390_algs_num;
|
||||
|
||||
@ -790,16 +1069,19 @@ static void aes_s390_fini(void)
|
||||
crypto_unregister_alg(aes_s390_algs_ptr[aes_s390_algs_num]);
|
||||
if (ctrblk)
|
||||
free_page((unsigned long) ctrblk);
|
||||
|
||||
crypto_unregister_aead(&gcm_aes_aead);
|
||||
}
|
||||
|
||||
static int __init aes_s390_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* Query available functions for KM, KMC and KMCTR */
|
||||
/* Query available functions for KM, KMC, KMCTR and KMA */
|
||||
cpacf_query(CPACF_KM, &km_functions);
|
||||
cpacf_query(CPACF_KMC, &kmc_functions);
|
||||
cpacf_query(CPACF_KMCTR, &kmctr_functions);
|
||||
cpacf_query(CPACF_KMA, &kma_functions);
|
||||
|
||||
if (cpacf_test_func(&km_functions, CPACF_KM_AES_128) ||
|
||||
cpacf_test_func(&km_functions, CPACF_KM_AES_192) ||
|
||||
@ -840,6 +1122,14 @@ static int __init aes_s390_init(void)
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
if (cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_128) ||
|
||||
cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_192) ||
|
||||
cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_256)) {
|
||||
ret = crypto_register_aead(&gcm_aes_aead);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
out_err:
|
||||
aes_s390_fini();
|
||||
|
@ -53,7 +53,6 @@ CONFIG_KSM=y
|
||||
CONFIG_TRANSPARENT_HUGEPAGE=y
|
||||
CONFIG_CLEANCACHE=y
|
||||
CONFIG_FRONTSWAP=y
|
||||
CONFIG_CMA=y
|
||||
CONFIG_ZSWAP=y
|
||||
CONFIG_ZBUD=m
|
||||
CONFIG_ZSMALLOC=m
|
||||
@ -163,7 +162,6 @@ CONFIG_MAGIC_SYSRQ=y
|
||||
CONFIG_DEBUG_PAGEALLOC=y
|
||||
CONFIG_DETECT_HUNG_TASK=y
|
||||
CONFIG_PANIC_ON_OOPS=y
|
||||
CONFIG_DEBUG_RT_MUTEXES=y
|
||||
CONFIG_PROVE_LOCKING=y
|
||||
CONFIG_LOCK_STAT=y
|
||||
CONFIG_DEBUG_LOCKDEP=y
|
||||
@ -179,7 +177,6 @@ CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
|
||||
CONFIG_STACK_TRACER=y
|
||||
CONFIG_BLK_DEV_IO_TRACE=y
|
||||
CONFIG_FUNCTION_PROFILER=y
|
||||
CONFIG_TRACE_ENUM_MAP_FILE=y
|
||||
CONFIG_KPROBES_SANITY_TEST=y
|
||||
CONFIG_S390_PTDUMP=y
|
||||
CONFIG_CRYPTO_CRYPTD=m
|
||||
|
@ -15,6 +15,7 @@ generic-y += local64.h
|
||||
generic-y += mcs_spinlock.h
|
||||
generic-y += mm-arch-hooks.h
|
||||
generic-y += preempt.h
|
||||
generic-y += rwsem.h
|
||||
generic-y += trace_clock.h
|
||||
generic-y += unaligned.h
|
||||
generic-y += word-at-a-time.h
|
||||
|
163
arch/s390/include/asm/alternative.h
Normal file
163
arch/s390/include/asm/alternative.h
Normal file
@ -0,0 +1,163 @@
|
||||
#ifndef _ASM_S390_ALTERNATIVE_H
|
||||
#define _ASM_S390_ALTERNATIVE_H
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/stringify.h>
|
||||
|
||||
struct alt_instr {
|
||||
s32 instr_offset; /* original instruction */
|
||||
s32 repl_offset; /* offset to replacement instruction */
|
||||
u16 facility; /* facility bit set for replacement */
|
||||
u8 instrlen; /* length of original instruction */
|
||||
u8 replacementlen; /* length of new instruction */
|
||||
} __packed;
|
||||
|
||||
#ifdef CONFIG_ALTERNATIVES
|
||||
extern void apply_alternative_instructions(void);
|
||||
extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
|
||||
#else
|
||||
static inline void apply_alternative_instructions(void) {};
|
||||
static inline void apply_alternatives(struct alt_instr *start,
|
||||
struct alt_instr *end) {};
|
||||
#endif
|
||||
/*
|
||||
* |661: |662: |6620 |663:
|
||||
* +-----------+---------------------+
|
||||
* | oldinstr | oldinstr_padding |
|
||||
* | +----------+----------+
|
||||
* | | | |
|
||||
* | | >6 bytes |6/4/2 nops|
|
||||
* | |6 bytes jg----------->
|
||||
* +-----------+---------------------+
|
||||
* ^^ static padding ^^
|
||||
*
|
||||
* .altinstr_replacement section
|
||||
* +---------------------+-----------+
|
||||
* |6641: |6651:
|
||||
* | alternative instr 1 |
|
||||
* +-----------+---------+- - - - - -+
|
||||
* |6642: |6652: |
|
||||
* | alternative instr 2 | padding
|
||||
* +---------------------+- - - - - -+
|
||||
* ^ runtime ^
|
||||
*
|
||||
* .altinstructions section
|
||||
* +---------------------------------+
|
||||
* | alt_instr entries for each |
|
||||
* | alternative instr |
|
||||
* +---------------------------------+
|
||||
*/
|
||||
|
||||
#define b_altinstr(num) "664"#num
|
||||
#define e_altinstr(num) "665"#num
|
||||
|
||||
#define e_oldinstr_pad_end "663"
|
||||
#define oldinstr_len "662b-661b"
|
||||
#define oldinstr_total_len e_oldinstr_pad_end"b-661b"
|
||||
#define altinstr_len(num) e_altinstr(num)"b-"b_altinstr(num)"b"
|
||||
#define oldinstr_pad_len(num) \
|
||||
"-(((" altinstr_len(num) ")-(" oldinstr_len ")) > 0) * " \
|
||||
"((" altinstr_len(num) ")-(" oldinstr_len "))"
|
||||
|
||||
#define INSTR_LEN_SANITY_CHECK(len) \
|
||||
".if " len " > 254\n" \
|
||||
"\t.error \"cpu alternatives does not support instructions " \
|
||||
"blocks > 254 bytes\"\n" \
|
||||
".endif\n" \
|
||||
".if (" len ") %% 2\n" \
|
||||
"\t.error \"cpu alternatives instructions length is odd\"\n" \
|
||||
".endif\n"
|
||||
|
||||
#define OLDINSTR_PADDING(oldinstr, num) \
|
||||
".if " oldinstr_pad_len(num) " > 6\n" \
|
||||
"\tjg " e_oldinstr_pad_end "f\n" \
|
||||
"6620:\n" \
|
||||
"\t.fill (" oldinstr_pad_len(num) " - (6620b-662b)) / 2, 2, 0x0700\n" \
|
||||
".else\n" \
|
||||
"\t.fill " oldinstr_pad_len(num) " / 6, 6, 0xc0040000\n" \
|
||||
"\t.fill " oldinstr_pad_len(num) " %% 6 / 4, 4, 0x47000000\n" \
|
||||
"\t.fill " oldinstr_pad_len(num) " %% 6 %% 4 / 2, 2, 0x0700\n" \
|
||||
".endif\n"
|
||||
|
||||
#define OLDINSTR(oldinstr, num) \
|
||||
"661:\n\t" oldinstr "\n662:\n" \
|
||||
OLDINSTR_PADDING(oldinstr, num) \
|
||||
e_oldinstr_pad_end ":\n" \
|
||||
INSTR_LEN_SANITY_CHECK(oldinstr_len)
|
||||
|
||||
#define OLDINSTR_2(oldinstr, num1, num2) \
|
||||
"661:\n\t" oldinstr "\n662:\n" \
|
||||
".if " altinstr_len(num1) " < " altinstr_len(num2) "\n" \
|
||||
OLDINSTR_PADDING(oldinstr, num2) \
|
||||
".else\n" \
|
||||
OLDINSTR_PADDING(oldinstr, num1) \
|
||||
".endif\n" \
|
||||
e_oldinstr_pad_end ":\n" \
|
||||
INSTR_LEN_SANITY_CHECK(oldinstr_len)
|
||||
|
||||
#define ALTINSTR_ENTRY(facility, num) \
|
||||
"\t.long 661b - .\n" /* old instruction */ \
|
||||
"\t.long " b_altinstr(num)"b - .\n" /* alt instruction */ \
|
||||
"\t.word " __stringify(facility) "\n" /* facility bit */ \
|
||||
"\t.byte " oldinstr_total_len "\n" /* source len */ \
|
||||
"\t.byte " altinstr_len(num) "\n" /* alt instruction len */
|
||||
|
||||
#define ALTINSTR_REPLACEMENT(altinstr, num) /* replacement */ \
|
||||
b_altinstr(num)":\n\t" altinstr "\n" e_altinstr(num) ":\n" \
|
||||
INSTR_LEN_SANITY_CHECK(altinstr_len(num))
|
||||
|
||||
#ifdef CONFIG_ALTERNATIVES
|
||||
/* alternative assembly primitive: */
|
||||
#define ALTERNATIVE(oldinstr, altinstr, facility) \
|
||||
".pushsection .altinstr_replacement, \"ax\"\n" \
|
||||
ALTINSTR_REPLACEMENT(altinstr, 1) \
|
||||
".popsection\n" \
|
||||
OLDINSTR(oldinstr, 1) \
|
||||
".pushsection .altinstructions,\"a\"\n" \
|
||||
ALTINSTR_ENTRY(facility, 1) \
|
||||
".popsection\n"
|
||||
|
||||
#define ALTERNATIVE_2(oldinstr, altinstr1, facility1, altinstr2, facility2)\
|
||||
".pushsection .altinstr_replacement, \"ax\"\n" \
|
||||
ALTINSTR_REPLACEMENT(altinstr1, 1) \
|
||||
ALTINSTR_REPLACEMENT(altinstr2, 2) \
|
||||
".popsection\n" \
|
||||
OLDINSTR_2(oldinstr, 1, 2) \
|
||||
".pushsection .altinstructions,\"a\"\n" \
|
||||
ALTINSTR_ENTRY(facility1, 1) \
|
||||
ALTINSTR_ENTRY(facility2, 2) \
|
||||
".popsection\n"
|
||||
#else
|
||||
/* Alternative instructions are disabled, let's put just oldinstr in */
|
||||
#define ALTERNATIVE(oldinstr, altinstr, facility) \
|
||||
oldinstr "\n"
|
||||
|
||||
#define ALTERNATIVE_2(oldinstr, altinstr1, facility1, altinstr2, facility2) \
|
||||
oldinstr "\n"
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Alternative instructions for different CPU types or capabilities.
|
||||
*
|
||||
* This allows to use optimized instructions even on generic binary
|
||||
* kernels.
|
||||
*
|
||||
* oldinstr is padded with jump and nops at compile time if altinstr is
|
||||
* longer. altinstr is padded with jump and nops at run-time during patching.
|
||||
*
|
||||
* For non barrier like inlines please define new variants
|
||||
* without volatile and memory clobber.
|
||||
*/
|
||||
#define alternative(oldinstr, altinstr, facility) \
|
||||
asm volatile(ALTERNATIVE(oldinstr, altinstr, facility) : : : "memory")
|
||||
|
||||
#define alternative_2(oldinstr, altinstr1, facility1, altinstr2, facility2) \
|
||||
asm volatile(ALTERNATIVE_2(oldinstr, altinstr1, facility1, \
|
||||
altinstr2, facility2) ::: "memory")
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_S390_ALTERNATIVE_H */
|
@ -28,26 +28,27 @@ static void s390_arch_random_generate(u8 *buf, unsigned int nbytes)
|
||||
|
||||
static inline bool arch_has_random(void)
|
||||
{
|
||||
if (static_branch_likely(&s390_arch_random_available))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool arch_has_random_seed(void)
|
||||
{
|
||||
return arch_has_random();
|
||||
if (static_branch_likely(&s390_arch_random_available))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool arch_get_random_long(unsigned long *v)
|
||||
{
|
||||
if (static_branch_likely(&s390_arch_random_available)) {
|
||||
s390_arch_random_generate((u8 *)v, sizeof(*v));
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool arch_get_random_int(unsigned int *v)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool arch_get_random_seed_long(unsigned long *v)
|
||||
{
|
||||
if (static_branch_likely(&s390_arch_random_available)) {
|
||||
s390_arch_random_generate((u8 *)v, sizeof(*v));
|
||||
@ -56,14 +57,13 @@ static inline bool arch_get_random_int(unsigned int *v)
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool arch_get_random_seed_long(unsigned long *v)
|
||||
{
|
||||
return arch_get_random_long(v);
|
||||
}
|
||||
|
||||
static inline bool arch_get_random_seed_int(unsigned int *v)
|
||||
{
|
||||
return arch_get_random_int(v);
|
||||
if (static_branch_likely(&s390_arch_random_available)) {
|
||||
s390_arch_random_generate((u8 *)v, sizeof(*v));
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_ARCH_RANDOM */
|
||||
|
@ -40,19 +40,24 @@ __ATOMIC_OPS(__atomic64_xor, long, "laxg")
|
||||
#undef __ATOMIC_OPS
|
||||
#undef __ATOMIC_OP
|
||||
|
||||
static inline void __atomic_add_const(int val, int *ptr)
|
||||
{
|
||||
asm volatile(
|
||||
" asi %[ptr],%[val]\n"
|
||||
: [ptr] "+Q" (*ptr) : [val] "i" (val) : "cc");
|
||||
#define __ATOMIC_CONST_OP(op_name, op_type, op_string, op_barrier) \
|
||||
static inline void op_name(op_type val, op_type *ptr) \
|
||||
{ \
|
||||
asm volatile( \
|
||||
op_string " %[ptr],%[val]\n" \
|
||||
op_barrier \
|
||||
: [ptr] "+Q" (*ptr) : [val] "i" (val) : "cc", "memory");\
|
||||
}
|
||||
|
||||
static inline void __atomic64_add_const(long val, long *ptr)
|
||||
{
|
||||
asm volatile(
|
||||
" agsi %[ptr],%[val]\n"
|
||||
: [ptr] "+Q" (*ptr) : [val] "i" (val) : "cc");
|
||||
}
|
||||
#define __ATOMIC_CONST_OPS(op_name, op_type, op_string) \
|
||||
__ATOMIC_CONST_OP(op_name, op_type, op_string, "\n") \
|
||||
__ATOMIC_CONST_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n")
|
||||
|
||||
__ATOMIC_CONST_OPS(__atomic_add_const, int, "asi")
|
||||
__ATOMIC_CONST_OPS(__atomic64_add_const, long, "agsi")
|
||||
|
||||
#undef __ATOMIC_CONST_OPS
|
||||
#undef __ATOMIC_CONST_OP
|
||||
|
||||
#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
|
||||
|
||||
@ -108,6 +113,11 @@ __ATOMIC64_OPS(__atomic64_xor, "xgr")
|
||||
|
||||
#undef __ATOMIC64_OPS
|
||||
|
||||
#define __atomic_add_const(val, ptr) __atomic_add(val, ptr)
|
||||
#define __atomic_add_const_barrier(val, ptr) __atomic_add(val, ptr)
|
||||
#define __atomic64_add_const(val, ptr) __atomic64_add(val, ptr)
|
||||
#define __atomic64_add_const_barrier(val, ptr) __atomic64_add(val, ptr)
|
||||
|
||||
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
|
||||
|
||||
static inline int __atomic_cmpxchg(int *ptr, int old, int new)
|
||||
|
@ -42,6 +42,7 @@ struct ccwgroup_device {
|
||||
* @thaw: undo work done in @freeze
|
||||
* @restore: callback for restoring after hibernation
|
||||
* @driver: embedded driver structure
|
||||
* @ccw_driver: supported ccw_driver (optional)
|
||||
*/
|
||||
struct ccwgroup_driver {
|
||||
int (*setup) (struct ccwgroup_device *);
|
||||
@ -56,6 +57,7 @@ struct ccwgroup_driver {
|
||||
int (*restore)(struct ccwgroup_device *);
|
||||
|
||||
struct device_driver driver;
|
||||
struct ccw_driver *ccw_driver;
|
||||
};
|
||||
|
||||
extern int ccwgroup_driver_register (struct ccwgroup_driver *cdriver);
|
||||
|
@ -2,7 +2,7 @@
|
||||
/*
|
||||
* CP Assist for Cryptographic Functions (CPACF)
|
||||
*
|
||||
* Copyright IBM Corp. 2003, 2016
|
||||
* Copyright IBM Corp. 2003, 2017
|
||||
* Author(s): Thomas Spatzier
|
||||
* Jan Glauber
|
||||
* Harald Freudenberger (freude@de.ibm.com)
|
||||
@ -134,6 +134,22 @@
|
||||
#define CPACF_PRNO_TRNG_Q_R2C_RATIO 0x70
|
||||
#define CPACF_PRNO_TRNG 0x72
|
||||
|
||||
/*
|
||||
* Function codes for the KMA (CIPHER MESSAGE WITH AUTHENTICATION)
|
||||
* instruction
|
||||
*/
|
||||
#define CPACF_KMA_QUERY 0x00
|
||||
#define CPACF_KMA_GCM_AES_128 0x12
|
||||
#define CPACF_KMA_GCM_AES_192 0x13
|
||||
#define CPACF_KMA_GCM_AES_256 0x14
|
||||
|
||||
/*
|
||||
* Flags for the KMA (CIPHER MESSAGE WITH AUTHENTICATION) instruction
|
||||
*/
|
||||
#define CPACF_KMA_LPC 0x100 /* Last-Plaintext/Ciphertext */
|
||||
#define CPACF_KMA_LAAD 0x200 /* Last-AAD */
|
||||
#define CPACF_KMA_HS 0x400 /* Hash-subkey Supplied */
|
||||
|
||||
typedef struct { unsigned char bytes[16]; } cpacf_mask_t;
|
||||
|
||||
/**
|
||||
@ -179,6 +195,8 @@ static inline int __cpacf_check_opcode(unsigned int opcode)
|
||||
return test_facility(77); /* check for MSA4 */
|
||||
case CPACF_PRNO:
|
||||
return test_facility(57); /* check for MSA5 */
|
||||
case CPACF_KMA:
|
||||
return test_facility(146); /* check for MSA8 */
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
@ -470,4 +488,36 @@ static inline void cpacf_pckmo(long func, void *param)
|
||||
: "cc", "memory");
|
||||
}
|
||||
|
||||
/**
|
||||
* cpacf_kma() - executes the KMA (CIPHER MESSAGE WITH AUTHENTICATION)
|
||||
* instruction
|
||||
* @func: the function code passed to KMA; see CPACF_KMA_xxx defines
|
||||
* @param: address of parameter block; see POP for details on each func
|
||||
* @dest: address of destination memory area
|
||||
* @src: address of source memory area
|
||||
* @src_len: length of src operand in bytes
|
||||
* @aad: address of additional authenticated data memory area
|
||||
* @aad_len: length of aad operand in bytes
|
||||
*/
|
||||
static inline void cpacf_kma(unsigned long func, void *param, u8 *dest,
|
||||
const u8 *src, unsigned long src_len,
|
||||
const u8 *aad, unsigned long aad_len)
|
||||
{
|
||||
register unsigned long r0 asm("0") = (unsigned long) func;
|
||||
register unsigned long r1 asm("1") = (unsigned long) param;
|
||||
register unsigned long r2 asm("2") = (unsigned long) src;
|
||||
register unsigned long r3 asm("3") = (unsigned long) src_len;
|
||||
register unsigned long r4 asm("4") = (unsigned long) aad;
|
||||
register unsigned long r5 asm("5") = (unsigned long) aad_len;
|
||||
register unsigned long r6 asm("6") = (unsigned long) dest;
|
||||
|
||||
asm volatile(
|
||||
"0: .insn rrf,%[opc] << 16,%[dst],%[src],%[aad],0\n"
|
||||
" brc 1,0b\n" /* handle partial completion */
|
||||
: [dst] "+a" (r6), [src] "+a" (r2), [slen] "+d" (r3),
|
||||
[aad] "+a" (r4), [alen] "+d" (r5)
|
||||
: [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_KMA)
|
||||
: "cc", "memory");
|
||||
}
|
||||
|
||||
#endif /* _ASM_S390_CPACF_H */
|
||||
|
@ -8,6 +8,18 @@
|
||||
#ifndef __ASM_CTL_REG_H
|
||||
#define __ASM_CTL_REG_H
|
||||
|
||||
#include <linux/const.h>
|
||||
|
||||
#define CR2_GUARDED_STORAGE _BITUL(63 - 59)
|
||||
|
||||
#define CR14_CHANNEL_REPORT_SUBMASK _BITUL(63 - 35)
|
||||
#define CR14_RECOVERY_SUBMASK _BITUL(63 - 36)
|
||||
#define CR14_DEGRADATION_SUBMASK _BITUL(63 - 37)
|
||||
#define CR14_EXTERNAL_DAMAGE_SUBMASK _BITUL(63 - 38)
|
||||
#define CR14_WARNING_SUBMASK _BITUL(63 - 39)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/bug.h>
|
||||
|
||||
#define __ctl_load(array, low, high) do { \
|
||||
@ -55,7 +67,11 @@ void smp_ctl_clear_bit(int cr, int bit);
|
||||
union ctlreg0 {
|
||||
unsigned long val;
|
||||
struct {
|
||||
unsigned long : 32;
|
||||
unsigned long : 8;
|
||||
unsigned long tcx : 1; /* Transactional-Execution control */
|
||||
unsigned long pifo : 1; /* Transactional-Execution Program-
|
||||
Interruption-Filtering Override */
|
||||
unsigned long : 22;
|
||||
unsigned long : 3;
|
||||
unsigned long lap : 1; /* Low-address-protection control */
|
||||
unsigned long : 4;
|
||||
@ -71,6 +87,19 @@ union ctlreg0 {
|
||||
};
|
||||
};
|
||||
|
||||
union ctlreg2 {
|
||||
unsigned long val;
|
||||
struct {
|
||||
unsigned long : 33;
|
||||
unsigned long ducto : 25;
|
||||
unsigned long : 1;
|
||||
unsigned long gse : 1;
|
||||
unsigned long : 1;
|
||||
unsigned long tds : 1;
|
||||
unsigned long tdc : 2;
|
||||
};
|
||||
};
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
# define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
|
||||
# define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
|
||||
@ -79,4 +108,5 @@ union ctlreg0 {
|
||||
# define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)
|
||||
#endif
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __ASM_CTL_REG_H */
|
||||
|
@ -14,71 +14,71 @@
|
||||
#include <linux/refcount.h>
|
||||
#include <uapi/asm/debug.h>
|
||||
|
||||
#define DEBUG_MAX_LEVEL 6 /* debug levels range from 0 to 6 */
|
||||
#define DEBUG_OFF_LEVEL -1 /* level where debug is switched off */
|
||||
#define DEBUG_FLUSH_ALL -1 /* parameter to flush all areas */
|
||||
#define DEBUG_MAX_VIEWS 10 /* max number of views in proc fs */
|
||||
#define DEBUG_MAX_NAME_LEN 64 /* max length for a debugfs file name */
|
||||
#define DEBUG_DEFAULT_LEVEL 3 /* initial debug level */
|
||||
#define DEBUG_MAX_LEVEL 6 /* debug levels range from 0 to 6 */
|
||||
#define DEBUG_OFF_LEVEL -1 /* level where debug is switched off */
|
||||
#define DEBUG_FLUSH_ALL -1 /* parameter to flush all areas */
|
||||
#define DEBUG_MAX_VIEWS 10 /* max number of views in proc fs */
|
||||
#define DEBUG_MAX_NAME_LEN 64 /* max length for a debugfs file name */
|
||||
#define DEBUG_DEFAULT_LEVEL 3 /* initial debug level */
|
||||
|
||||
#define DEBUG_DIR_ROOT "s390dbf" /* name of debug root directory in proc fs */
|
||||
|
||||
#define DEBUG_DATA(entry) (char*)(entry + 1) /* data is stored behind */
|
||||
/* the entry information */
|
||||
#define DEBUG_DATA(entry) (char *)(entry + 1) /* data is stored behind */
|
||||
/* the entry information */
|
||||
|
||||
typedef struct __debug_entry debug_entry_t;
|
||||
|
||||
struct debug_view;
|
||||
|
||||
typedef struct debug_info {
|
||||
struct debug_info* next;
|
||||
struct debug_info* prev;
|
||||
typedef struct debug_info {
|
||||
struct debug_info *next;
|
||||
struct debug_info *prev;
|
||||
refcount_t ref_count;
|
||||
spinlock_t lock;
|
||||
spinlock_t lock;
|
||||
int level;
|
||||
int nr_areas;
|
||||
int pages_per_area;
|
||||
int buf_size;
|
||||
int entry_size;
|
||||
debug_entry_t*** areas;
|
||||
int entry_size;
|
||||
debug_entry_t ***areas;
|
||||
int active_area;
|
||||
int *active_pages;
|
||||
int *active_entries;
|
||||
struct dentry* debugfs_root_entry;
|
||||
struct dentry* debugfs_entries[DEBUG_MAX_VIEWS];
|
||||
struct debug_view* views[DEBUG_MAX_VIEWS];
|
||||
struct dentry *debugfs_root_entry;
|
||||
struct dentry *debugfs_entries[DEBUG_MAX_VIEWS];
|
||||
struct debug_view *views[DEBUG_MAX_VIEWS];
|
||||
char name[DEBUG_MAX_NAME_LEN];
|
||||
umode_t mode;
|
||||
} debug_info_t;
|
||||
|
||||
typedef int (debug_header_proc_t) (debug_info_t* id,
|
||||
struct debug_view* view,
|
||||
typedef int (debug_header_proc_t) (debug_info_t *id,
|
||||
struct debug_view *view,
|
||||
int area,
|
||||
debug_entry_t* entry,
|
||||
char* out_buf);
|
||||
debug_entry_t *entry,
|
||||
char *out_buf);
|
||||
|
||||
typedef int (debug_format_proc_t) (debug_info_t* id,
|
||||
struct debug_view* view, char* out_buf,
|
||||
const char* in_buf);
|
||||
typedef int (debug_prolog_proc_t) (debug_info_t* id,
|
||||
struct debug_view* view,
|
||||
char* out_buf);
|
||||
typedef int (debug_input_proc_t) (debug_info_t* id,
|
||||
struct debug_view* view,
|
||||
struct file* file,
|
||||
typedef int (debug_format_proc_t) (debug_info_t *id,
|
||||
struct debug_view *view, char *out_buf,
|
||||
const char *in_buf);
|
||||
typedef int (debug_prolog_proc_t) (debug_info_t *id,
|
||||
struct debug_view *view,
|
||||
char *out_buf);
|
||||
typedef int (debug_input_proc_t) (debug_info_t *id,
|
||||
struct debug_view *view,
|
||||
struct file *file,
|
||||
const char __user *user_buf,
|
||||
size_t in_buf_size, loff_t* offset);
|
||||
size_t in_buf_size, loff_t *offset);
|
||||
|
||||
int debug_dflt_header_fn(debug_info_t *id, struct debug_view *view,
|
||||
int area, debug_entry_t *entry, char *out_buf);
|
||||
|
||||
int debug_dflt_header_fn(debug_info_t* id, struct debug_view* view,
|
||||
int area, debug_entry_t* entry, char* out_buf);
|
||||
|
||||
struct debug_view {
|
||||
char name[DEBUG_MAX_NAME_LEN];
|
||||
debug_prolog_proc_t* prolog_proc;
|
||||
debug_header_proc_t* header_proc;
|
||||
debug_format_proc_t* format_proc;
|
||||
debug_input_proc_t* input_proc;
|
||||
void* private_data;
|
||||
debug_prolog_proc_t *prolog_proc;
|
||||
debug_header_proc_t *header_proc;
|
||||
debug_format_proc_t *format_proc;
|
||||
debug_input_proc_t *input_proc;
|
||||
void *private_data;
|
||||
};
|
||||
|
||||
extern struct debug_view debug_hex_ascii_view;
|
||||
@ -87,65 +87,67 @@ extern struct debug_view debug_sprintf_view;
|
||||
|
||||
/* do NOT use the _common functions */
|
||||
|
||||
debug_entry_t* debug_event_common(debug_info_t* id, int level,
|
||||
const void* data, int length);
|
||||
debug_entry_t *debug_event_common(debug_info_t *id, int level,
|
||||
const void *data, int length);
|
||||
|
||||
debug_entry_t* debug_exception_common(debug_info_t* id, int level,
|
||||
const void* data, int length);
|
||||
debug_entry_t *debug_exception_common(debug_info_t *id, int level,
|
||||
const void *data, int length);
|
||||
|
||||
/* Debug Feature API: */
|
||||
|
||||
debug_info_t *debug_register(const char *name, int pages, int nr_areas,
|
||||
int buf_size);
|
||||
int buf_size);
|
||||
|
||||
debug_info_t *debug_register_mode(const char *name, int pages, int nr_areas,
|
||||
int buf_size, umode_t mode, uid_t uid,
|
||||
gid_t gid);
|
||||
|
||||
void debug_unregister(debug_info_t* id);
|
||||
void debug_unregister(debug_info_t *id);
|
||||
|
||||
void debug_set_level(debug_info_t* id, int new_level);
|
||||
void debug_set_level(debug_info_t *id, int new_level);
|
||||
|
||||
void debug_set_critical(void);
|
||||
void debug_stop_all(void);
|
||||
|
||||
static inline bool debug_level_enabled(debug_info_t* id, int level)
|
||||
static inline bool debug_level_enabled(debug_info_t *id, int level)
|
||||
{
|
||||
return level <= id->level;
|
||||
}
|
||||
|
||||
static inline debug_entry_t*
|
||||
debug_event(debug_info_t* id, int level, void* data, int length)
|
||||
static inline debug_entry_t *debug_event(debug_info_t *id, int level,
|
||||
void *data, int length)
|
||||
{
|
||||
if ((!id) || (level > id->level) || (id->pages_per_area == 0))
|
||||
return NULL;
|
||||
return debug_event_common(id,level,data,length);
|
||||
return debug_event_common(id, level, data, length);
|
||||
}
|
||||
|
||||
static inline debug_entry_t*
|
||||
debug_int_event(debug_info_t* id, int level, unsigned int tag)
|
||||
static inline debug_entry_t *debug_int_event(debug_info_t *id, int level,
|
||||
unsigned int tag)
|
||||
{
|
||||
unsigned int t=tag;
|
||||
unsigned int t = tag;
|
||||
|
||||
if ((!id) || (level > id->level) || (id->pages_per_area == 0))
|
||||
return NULL;
|
||||
return debug_event_common(id,level,&t,sizeof(unsigned int));
|
||||
return debug_event_common(id, level, &t, sizeof(unsigned int));
|
||||
}
|
||||
|
||||
static inline debug_entry_t *
|
||||
debug_long_event (debug_info_t* id, int level, unsigned long tag)
|
||||
static inline debug_entry_t *debug_long_event(debug_info_t *id, int level,
|
||||
unsigned long tag)
|
||||
{
|
||||
unsigned long t=tag;
|
||||
unsigned long t = tag;
|
||||
|
||||
if ((!id) || (level > id->level) || (id->pages_per_area == 0))
|
||||
return NULL;
|
||||
return debug_event_common(id,level,&t,sizeof(unsigned long));
|
||||
return debug_event_common(id, level, &t, sizeof(unsigned long));
|
||||
}
|
||||
|
||||
static inline debug_entry_t*
|
||||
debug_text_event(debug_info_t* id, int level, const char* txt)
|
||||
static inline debug_entry_t *debug_text_event(debug_info_t *id, int level,
|
||||
const char *txt)
|
||||
{
|
||||
if ((!id) || (level > id->level) || (id->pages_per_area == 0))
|
||||
return NULL;
|
||||
return debug_event_common(id,level,txt,strlen(txt));
|
||||
return debug_event_common(id, level, txt, strlen(txt));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -161,6 +163,7 @@ __debug_sprintf_event(debug_info_t *id, int level, char *string, ...)
|
||||
debug_entry_t *__ret; \
|
||||
debug_info_t *__id = _id; \
|
||||
int __level = _level; \
|
||||
\
|
||||
if ((!__id) || (__level > __id->level)) \
|
||||
__ret = NULL; \
|
||||
else \
|
||||
@ -169,38 +172,40 @@ __debug_sprintf_event(debug_info_t *id, int level, char *string, ...)
|
||||
__ret; \
|
||||
})
|
||||
|
||||
static inline debug_entry_t*
|
||||
debug_exception(debug_info_t* id, int level, void* data, int length)
|
||||
static inline debug_entry_t *debug_exception(debug_info_t *id, int level,
|
||||
void *data, int length)
|
||||
{
|
||||
if ((!id) || (level > id->level) || (id->pages_per_area == 0))
|
||||
return NULL;
|
||||
return debug_exception_common(id,level,data,length);
|
||||
return debug_exception_common(id, level, data, length);
|
||||
}
|
||||
|
||||
static inline debug_entry_t*
|
||||
debug_int_exception(debug_info_t* id, int level, unsigned int tag)
|
||||
static inline debug_entry_t *debug_int_exception(debug_info_t *id, int level,
|
||||
unsigned int tag)
|
||||
{
|
||||
unsigned int t=tag;
|
||||
unsigned int t = tag;
|
||||
|
||||
if ((!id) || (level > id->level) || (id->pages_per_area == 0))
|
||||
return NULL;
|
||||
return debug_exception_common(id,level,&t,sizeof(unsigned int));
|
||||
return debug_exception_common(id, level, &t, sizeof(unsigned int));
|
||||
}
|
||||
|
||||
static inline debug_entry_t *
|
||||
debug_long_exception (debug_info_t* id, int level, unsigned long tag)
|
||||
static inline debug_entry_t *debug_long_exception (debug_info_t *id, int level,
|
||||
unsigned long tag)
|
||||
{
|
||||
unsigned long t=tag;
|
||||
unsigned long t = tag;
|
||||
|
||||
if ((!id) || (level > id->level) || (id->pages_per_area == 0))
|
||||
return NULL;
|
||||
return debug_exception_common(id,level,&t,sizeof(unsigned long));
|
||||
return debug_exception_common(id, level, &t, sizeof(unsigned long));
|
||||
}
|
||||
|
||||
static inline debug_entry_t*
|
||||
debug_text_exception(debug_info_t* id, int level, const char* txt)
|
||||
static inline debug_entry_t *debug_text_exception(debug_info_t *id, int level,
|
||||
const char *txt)
|
||||
{
|
||||
if ((!id) || (level > id->level) || (id->pages_per_area == 0))
|
||||
return NULL;
|
||||
return debug_exception_common(id,level,txt,strlen(txt));
|
||||
return debug_exception_common(id, level, txt, strlen(txt));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -216,6 +221,7 @@ __debug_sprintf_exception(debug_info_t *id, int level, char *string, ...)
|
||||
debug_entry_t *__ret; \
|
||||
debug_info_t *__id = _id; \
|
||||
int __level = _level; \
|
||||
\
|
||||
if ((!__id) || (__level > __id->level)) \
|
||||
__ret = NULL; \
|
||||
else \
|
||||
@ -224,13 +230,13 @@ __debug_sprintf_exception(debug_info_t *id, int level, char *string, ...)
|
||||
__ret; \
|
||||
})
|
||||
|
||||
int debug_register_view(debug_info_t* id, struct debug_view* view);
|
||||
int debug_unregister_view(debug_info_t* id, struct debug_view* view);
|
||||
int debug_register_view(debug_info_t *id, struct debug_view *view);
|
||||
int debug_unregister_view(debug_info_t *id, struct debug_view *view);
|
||||
|
||||
/*
|
||||
define the debug levels:
|
||||
- 0 No debugging output to console or syslog
|
||||
- 1 Log internal errors to syslog, ignore check conditions
|
||||
- 1 Log internal errors to syslog, ignore check conditions
|
||||
- 2 Log internal errors and check conditions to syslog
|
||||
- 3 Log internal errors to console, log check conditions to syslog
|
||||
- 4 Log internal errors and check conditions to console
|
||||
@ -248,17 +254,17 @@ int debug_unregister_view(debug_info_t* id, struct debug_view* view);
|
||||
#define INTERNAL_DEBMSG(x,y...) "D" __FILE__ "%d: " x, __LINE__, y
|
||||
|
||||
#if DEBUG_LEVEL > 0
|
||||
#define PRINT_DEBUG(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
|
||||
#define PRINT_INFO(x...) printk ( KERN_INFO PRINTK_HEADER x )
|
||||
#define PRINT_WARN(x...) printk ( KERN_WARNING PRINTK_HEADER x )
|
||||
#define PRINT_ERR(x...) printk ( KERN_ERR PRINTK_HEADER x )
|
||||
#define PRINT_FATAL(x...) panic ( PRINTK_HEADER x )
|
||||
#define PRINT_DEBUG(x...) printk(KERN_DEBUG PRINTK_HEADER x)
|
||||
#define PRINT_INFO(x...) printk(KERN_INFO PRINTK_HEADER x)
|
||||
#define PRINT_WARN(x...) printk(KERN_WARNING PRINTK_HEADER x)
|
||||
#define PRINT_ERR(x...) printk(KERN_ERR PRINTK_HEADER x)
|
||||
#define PRINT_FATAL(x...) panic(PRINTK_HEADER x)
|
||||
#else
|
||||
#define PRINT_DEBUG(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
|
||||
#define PRINT_INFO(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
|
||||
#define PRINT_WARN(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
|
||||
#define PRINT_ERR(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
|
||||
#define PRINT_FATAL(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
|
||||
#endif /* DASD_DEBUG */
|
||||
#define PRINT_DEBUG(x...) printk(KERN_DEBUG PRINTK_HEADER x)
|
||||
#define PRINT_INFO(x...) printk(KERN_DEBUG PRINTK_HEADER x)
|
||||
#define PRINT_WARN(x...) printk(KERN_DEBUG PRINTK_HEADER x)
|
||||
#define PRINT_ERR(x...) printk(KERN_DEBUG PRINTK_HEADER x)
|
||||
#define PRINT_FATAL(x...) printk(KERN_DEBUG PRINTK_HEADER x)
|
||||
#endif /* DASD_DEBUG */
|
||||
|
||||
#endif /* DEBUG_H */
|
||||
#endif /* DEBUG_H */
|
||||
|
@ -9,32 +9,7 @@
|
||||
#ifndef __ASM_S390_DIS_H__
|
||||
#define __ASM_S390_DIS_H__
|
||||
|
||||
/* Type of operand */
|
||||
#define OPERAND_GPR 0x1 /* Operand printed as %rx */
|
||||
#define OPERAND_FPR 0x2 /* Operand printed as %fx */
|
||||
#define OPERAND_AR 0x4 /* Operand printed as %ax */
|
||||
#define OPERAND_CR 0x8 /* Operand printed as %cx */
|
||||
#define OPERAND_VR 0x10 /* Operand printed as %vx */
|
||||
#define OPERAND_DISP 0x20 /* Operand printed as displacement */
|
||||
#define OPERAND_BASE 0x40 /* Operand printed as base register */
|
||||
#define OPERAND_INDEX 0x80 /* Operand printed as index register */
|
||||
#define OPERAND_PCREL 0x100 /* Operand printed as pc-relative symbol */
|
||||
#define OPERAND_SIGNED 0x200 /* Operand printed as signed value */
|
||||
#define OPERAND_LENGTH 0x400 /* Operand printed as length (+1) */
|
||||
|
||||
|
||||
struct s390_operand {
|
||||
int bits; /* The number of bits in the operand. */
|
||||
int shift; /* The number of bits to shift. */
|
||||
int flags; /* One bit syntax flags. */
|
||||
};
|
||||
|
||||
struct s390_insn {
|
||||
const char name[5];
|
||||
unsigned char opfrag;
|
||||
unsigned char format;
|
||||
};
|
||||
|
||||
#include <generated/dis.h>
|
||||
|
||||
static inline int insn_length(unsigned char code)
|
||||
{
|
||||
@ -45,7 +20,6 @@ struct pt_regs;
|
||||
|
||||
void show_code(struct pt_regs *regs);
|
||||
void print_fn_code(unsigned char *code, unsigned long len);
|
||||
int insn_to_mnemonic(unsigned char *instruction, char *buf, unsigned int len);
|
||||
struct s390_insn *find_insn(unsigned char *code);
|
||||
|
||||
static inline int is_known_insn(unsigned char *code)
|
||||
|
@ -13,6 +13,8 @@
|
||||
#include <asm/cio.h>
|
||||
#include <asm/setup.h>
|
||||
|
||||
#define NSS_NAME_SIZE 8
|
||||
|
||||
#define IPL_PARMBLOCK_ORIGIN 0x2000
|
||||
|
||||
#define IPL_PARM_BLK_FCP_LEN (sizeof(struct ipl_list_hdr) + \
|
||||
@ -106,7 +108,6 @@ extern size_t append_ipl_scpdata(char *, size_t);
|
||||
enum {
|
||||
IPL_DEVNO_VALID = 1,
|
||||
IPL_PARMBLOCK_VALID = 2,
|
||||
IPL_NSS_VALID = 4,
|
||||
};
|
||||
|
||||
enum ipl_type {
|
||||
|
@ -63,8 +63,6 @@ typedef u16 kprobe_opcode_t;
|
||||
|
||||
#define kretprobe_blacklist_size 0
|
||||
|
||||
#define KPROBE_SWAP_INST 0x10
|
||||
|
||||
/* Architecture specific copy of original instruction */
|
||||
struct arch_specific_insn {
|
||||
/* copy of original instruction */
|
||||
|
@ -736,7 +736,6 @@ struct kvm_arch{
|
||||
wait_queue_head_t ipte_wq;
|
||||
int ipte_lock_count;
|
||||
struct mutex ipte_mutex;
|
||||
struct ratelimit_state sthyi_limit;
|
||||
spinlock_t start_stop_lock;
|
||||
struct sie_page2 *sie_page2;
|
||||
struct kvm_s390_cpu_model model;
|
||||
|
@ -134,8 +134,9 @@ struct lowcore {
|
||||
__u8 pad_0x03b4[0x03b8-0x03b4]; /* 0x03b4 */
|
||||
__u64 gmap; /* 0x03b8 */
|
||||
__u32 spinlock_lockval; /* 0x03c0 */
|
||||
__u32 fpu_flags; /* 0x03c4 */
|
||||
__u8 pad_0x03c8[0x0400-0x03c8]; /* 0x03c8 */
|
||||
__u32 spinlock_index; /* 0x03c4 */
|
||||
__u32 fpu_flags; /* 0x03c8 */
|
||||
__u8 pad_0x03cc[0x0400-0x03cc]; /* 0x03cc */
|
||||
|
||||
/* Per cpu primary space access list */
|
||||
__u32 paste[16]; /* 0x0400 */
|
||||
|
@ -26,12 +26,9 @@
|
||||
#define MCCK_CODE_CPU_TIMER_VALID _BITUL(63 - 46)
|
||||
#define MCCK_CODE_PSW_MWP_VALID _BITUL(63 - 20)
|
||||
#define MCCK_CODE_PSW_IA_VALID _BITUL(63 - 23)
|
||||
|
||||
#define MCCK_CR14_CR_PENDING_SUB_MASK (1 << 28)
|
||||
#define MCCK_CR14_RECOVERY_SUB_MASK (1 << 27)
|
||||
#define MCCK_CR14_DEGRAD_SUB_MASK (1 << 26)
|
||||
#define MCCK_CR14_EXT_DAMAGE_SUB_MASK (1 << 25)
|
||||
#define MCCK_CR14_WARN_SUB_MASK (1 << 24)
|
||||
#define MCCK_CODE_CR_VALID _BITUL(63 - 29)
|
||||
#define MCCK_CODE_GS_VALID _BITUL(63 - 36)
|
||||
#define MCCK_CODE_FC_VALID _BITUL(63 - 43)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
@ -87,6 +84,8 @@ union mci {
|
||||
|
||||
#define MCESA_ORIGIN_MASK (~0x3ffUL)
|
||||
#define MCESA_LC_MASK (0xfUL)
|
||||
#define MCESA_MIN_SIZE (1024)
|
||||
#define MCESA_MAX_SIZE (2048)
|
||||
|
||||
struct mcesa {
|
||||
u8 vector_save_area[1024];
|
||||
@ -95,8 +94,12 @@ struct mcesa {
|
||||
|
||||
struct pt_regs;
|
||||
|
||||
extern void s390_handle_mcck(void);
|
||||
extern void s390_do_machine_check(struct pt_regs *regs);
|
||||
void nmi_alloc_boot_cpu(struct lowcore *lc);
|
||||
int nmi_alloc_per_cpu(struct lowcore *lc);
|
||||
void nmi_free_per_cpu(struct lowcore *lc);
|
||||
|
||||
void s390_handle_mcck(void);
|
||||
void s390_do_machine_check(struct pt_regs *regs);
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* _ASM_S390_NMI_H */
|
||||
|
@ -19,11 +19,7 @@ extern debug_info_t *pci_debug_err_id;
|
||||
|
||||
static inline void zpci_err_hex(void *addr, int len)
|
||||
{
|
||||
while (len > 0) {
|
||||
debug_event(pci_debug_err_id, 0, (void *) addr, len);
|
||||
len -= pci_debug_err_id->buf_size;
|
||||
addr += pci_debug_err_id->buf_size;
|
||||
}
|
||||
debug_event(pci_debug_err_id, 0, addr, len);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -82,6 +82,6 @@ int zpci_refresh_trans(u64 fn, u64 addr, u64 range);
|
||||
int zpci_load(u64 *data, u64 req, u64 offset);
|
||||
int zpci_store(u64 data, u64 req, u64 offset);
|
||||
int zpci_store_block(const u64 *data, u64 req, u64 offset);
|
||||
void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc);
|
||||
int zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc);
|
||||
|
||||
#endif
|
||||
|
@ -13,6 +13,7 @@
|
||||
#define _S390_PGALLOC_H
|
||||
|
||||
#include <linux/threads.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
@ -28,24 +29,9 @@ void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long);
|
||||
void page_table_free_pgste(struct page *page);
|
||||
extern int page_table_allocate_pgste;
|
||||
|
||||
static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
|
||||
{
|
||||
struct addrtype { char _[256]; };
|
||||
int i;
|
||||
|
||||
for (i = 0; i < n; i += 256) {
|
||||
*s = val;
|
||||
asm volatile(
|
||||
"mvc 8(248,%[s]),0(%[s])\n"
|
||||
: "+m" (*(struct addrtype *) s)
|
||||
: [s] "a" (s));
|
||||
s += 256 / sizeof(long);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void crst_table_init(unsigned long *crst, unsigned long entry)
|
||||
{
|
||||
clear_table(crst, entry, _CRST_TABLE_SIZE);
|
||||
memset64((u64 *)crst, entry, _CRST_ENTRIES);
|
||||
}
|
||||
|
||||
static inline unsigned long pgd_entry_type(struct mm_struct *mm)
|
||||
|
@ -22,6 +22,7 @@
|
||||
#define CIF_IGNORE_IRQ 5 /* ignore interrupt (for udelay) */
|
||||
#define CIF_ENABLED_WAIT 6 /* in enabled wait state */
|
||||
#define CIF_MCCK_GUEST 7 /* machine check happening in guest */
|
||||
#define CIF_DEDICATED_CPU 8 /* this CPU is dedicated */
|
||||
|
||||
#define _CIF_MCCK_PENDING _BITUL(CIF_MCCK_PENDING)
|
||||
#define _CIF_ASCE_PRIMARY _BITUL(CIF_ASCE_PRIMARY)
|
||||
@ -31,6 +32,7 @@
|
||||
#define _CIF_IGNORE_IRQ _BITUL(CIF_IGNORE_IRQ)
|
||||
#define _CIF_ENABLED_WAIT _BITUL(CIF_ENABLED_WAIT)
|
||||
#define _CIF_MCCK_GUEST _BITUL(CIF_MCCK_GUEST)
|
||||
#define _CIF_DEDICATED_CPU _BITUL(CIF_DEDICATED_CPU)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
@ -219,10 +221,10 @@ void show_registers(struct pt_regs *regs);
|
||||
void show_cacheinfo(struct seq_file *m);
|
||||
|
||||
/* Free all resources held by a thread. */
|
||||
extern void release_thread(struct task_struct *);
|
||||
static inline void release_thread(struct task_struct *tsk) { }
|
||||
|
||||
/* Free guarded storage control block for current */
|
||||
void exit_thread_gs(void);
|
||||
/* Free guarded storage control block */
|
||||
void guarded_storage_release(struct task_struct *tsk);
|
||||
|
||||
unsigned long get_wchan(struct task_struct *p);
|
||||
#define task_pt_regs(tsk) ((struct pt_regs *) \
|
||||
|
@ -6,55 +6,55 @@
|
||||
#define S390_RUNTIME_INSTR_STOP 0x2
|
||||
|
||||
struct runtime_instr_cb {
|
||||
__u64 buf_current;
|
||||
__u64 buf_origin;
|
||||
__u64 buf_limit;
|
||||
__u64 rca;
|
||||
__u64 roa;
|
||||
__u64 rla;
|
||||
|
||||
__u32 valid : 1;
|
||||
__u32 pstate : 1;
|
||||
__u32 pstate_set_buf : 1;
|
||||
__u32 home_space : 1;
|
||||
__u32 altered : 1;
|
||||
__u32 : 3;
|
||||
__u32 pstate_sample : 1;
|
||||
__u32 sstate_sample : 1;
|
||||
__u32 pstate_collect : 1;
|
||||
__u32 sstate_collect : 1;
|
||||
__u32 : 1;
|
||||
__u32 halted_int : 1;
|
||||
__u32 int_requested : 1;
|
||||
__u32 buffer_full_int : 1;
|
||||
__u32 v : 1;
|
||||
__u32 s : 1;
|
||||
__u32 k : 1;
|
||||
__u32 h : 1;
|
||||
__u32 a : 1;
|
||||
__u32 reserved1 : 3;
|
||||
__u32 ps : 1;
|
||||
__u32 qs : 1;
|
||||
__u32 pc : 1;
|
||||
__u32 qc : 1;
|
||||
__u32 reserved2 : 1;
|
||||
__u32 g : 1;
|
||||
__u32 u : 1;
|
||||
__u32 l : 1;
|
||||
__u32 key : 4;
|
||||
__u32 : 9;
|
||||
__u32 reserved3 : 8;
|
||||
__u32 t : 1;
|
||||
__u32 rgs : 3;
|
||||
|
||||
__u32 mode : 4;
|
||||
__u32 next : 1;
|
||||
__u32 m : 4;
|
||||
__u32 n : 1;
|
||||
__u32 mae : 1;
|
||||
__u32 : 2;
|
||||
__u32 call_type_br : 1;
|
||||
__u32 return_type_br : 1;
|
||||
__u32 other_type_br : 1;
|
||||
__u32 bc_other_type : 1;
|
||||
__u32 emit : 1;
|
||||
__u32 tx_abort : 1;
|
||||
__u32 : 2;
|
||||
__u32 bp_xn : 1;
|
||||
__u32 bp_xt : 1;
|
||||
__u32 bp_ti : 1;
|
||||
__u32 bp_ni : 1;
|
||||
__u32 suppr_y : 1;
|
||||
__u32 suppr_z : 1;
|
||||
__u32 reserved4 : 2;
|
||||
__u32 c : 1;
|
||||
__u32 r : 1;
|
||||
__u32 b : 1;
|
||||
__u32 j : 1;
|
||||
__u32 e : 1;
|
||||
__u32 x : 1;
|
||||
__u32 reserved5 : 2;
|
||||
__u32 bpxn : 1;
|
||||
__u32 bpxt : 1;
|
||||
__u32 bpti : 1;
|
||||
__u32 bpni : 1;
|
||||
__u32 reserved6 : 2;
|
||||
|
||||
__u32 dc_miss_extra : 1;
|
||||
__u32 lat_lev_ignore : 1;
|
||||
__u32 ic_lat_lev : 4;
|
||||
__u32 dc_lat_lev : 4;
|
||||
__u32 d : 1;
|
||||
__u32 f : 1;
|
||||
__u32 ic : 4;
|
||||
__u32 dc : 4;
|
||||
|
||||
__u64 reserved1;
|
||||
__u64 scaling_factor;
|
||||
__u64 reserved7;
|
||||
__u64 sf;
|
||||
__u64 rsic;
|
||||
__u64 reserved2;
|
||||
__u64 reserved8;
|
||||
} __packed __aligned(8);
|
||||
|
||||
extern struct runtime_instr_cb runtime_instr_empty_cb;
|
||||
@ -86,6 +86,8 @@ static inline void restore_ri_cb(struct runtime_instr_cb *cb_next,
|
||||
load_runtime_instr_cb(&runtime_instr_empty_cb);
|
||||
}
|
||||
|
||||
void exit_thread_runtime_instr(void);
|
||||
struct task_struct;
|
||||
|
||||
void runtime_instr_release(struct task_struct *tsk);
|
||||
|
||||
#endif /* _RUNTIME_INSTR_H */
|
||||
|
@ -1,211 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _S390_RWSEM_H
|
||||
#define _S390_RWSEM_H
|
||||
|
||||
/*
|
||||
* S390 version
|
||||
* Copyright IBM Corp. 2002
|
||||
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
|
||||
*
|
||||
* Based on asm-alpha/semaphore.h and asm-i386/rwsem.h
|
||||
*/
|
||||
|
||||
/*
|
||||
*
|
||||
* The MSW of the count is the negated number of active writers and waiting
|
||||
* lockers, and the LSW is the total number of active locks
|
||||
*
|
||||
* The lock count is initialized to 0 (no active and no waiting lockers).
|
||||
*
|
||||
* When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an
|
||||
* uncontended lock. This can be determined because XADD returns the old value.
|
||||
* Readers increment by 1 and see a positive value when uncontended, negative
|
||||
* if there are writers (and maybe) readers waiting (in which case it goes to
|
||||
* sleep).
|
||||
*
|
||||
* The value of WAITING_BIAS supports up to 32766 waiting processes. This can
|
||||
* be extended to 65534 by manually checking the whole MSW rather than relying
|
||||
* on the S flag.
|
||||
*
|
||||
* The value of ACTIVE_BIAS supports up to 65535 active processes.
|
||||
*
|
||||
* This should be totally fair - if anything is waiting, a process that wants a
|
||||
* lock will go to the back of the queue. When the currently active lock is
|
||||
* released, if there's a writer at the front of the queue, then that and only
|
||||
* that will be woken up; if there's a bunch of consecutive readers at the
|
||||
* front, then they'll all be woken up, but no other readers will be.
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_RWSEM_H
|
||||
#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
|
||||
#endif
|
||||
|
||||
#define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
|
||||
#define RWSEM_ACTIVE_BIAS 0x0000000000000001L
|
||||
#define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
|
||||
#define RWSEM_WAITING_BIAS (-0x0000000100000000L)
|
||||
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
|
||||
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
|
||||
|
||||
/*
|
||||
* lock for reading
|
||||
*/
|
||||
static inline void __down_read(struct rw_semaphore *sem)
|
||||
{
|
||||
signed long old, new;
|
||||
|
||||
asm volatile(
|
||||
" lg %0,%2\n"
|
||||
"0: lgr %1,%0\n"
|
||||
" aghi %1,%4\n"
|
||||
" csg %0,%1,%2\n"
|
||||
" jl 0b"
|
||||
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
|
||||
: "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
|
||||
: "cc", "memory");
|
||||
if (old < 0)
|
||||
rwsem_down_read_failed(sem);
|
||||
}
|
||||
|
||||
/*
|
||||
* trylock for reading -- returns 1 if successful, 0 if contention
|
||||
*/
|
||||
static inline int __down_read_trylock(struct rw_semaphore *sem)
|
||||
{
|
||||
signed long old, new;
|
||||
|
||||
asm volatile(
|
||||
" lg %0,%2\n"
|
||||
"0: ltgr %1,%0\n"
|
||||
" jm 1f\n"
|
||||
" aghi %1,%4\n"
|
||||
" csg %0,%1,%2\n"
|
||||
" jl 0b\n"
|
||||
"1:"
|
||||
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
|
||||
: "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
|
||||
: "cc", "memory");
|
||||
return old >= 0 ? 1 : 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* lock for writing
|
||||
*/
|
||||
static inline long ___down_write(struct rw_semaphore *sem)
|
||||
{
|
||||
signed long old, new, tmp;
|
||||
|
||||
tmp = RWSEM_ACTIVE_WRITE_BIAS;
|
||||
asm volatile(
|
||||
" lg %0,%2\n"
|
||||
"0: lgr %1,%0\n"
|
||||
" ag %1,%4\n"
|
||||
" csg %0,%1,%2\n"
|
||||
" jl 0b"
|
||||
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
|
||||
: "Q" (sem->count), "m" (tmp)
|
||||
: "cc", "memory");
|
||||
|
||||
return old;
|
||||
}
|
||||
|
||||
static inline void __down_write(struct rw_semaphore *sem)
|
||||
{
|
||||
if (___down_write(sem))
|
||||
rwsem_down_write_failed(sem);
|
||||
}
|
||||
|
||||
static inline int __down_write_killable(struct rw_semaphore *sem)
|
||||
{
|
||||
if (___down_write(sem))
|
||||
if (IS_ERR(rwsem_down_write_failed_killable(sem)))
|
||||
return -EINTR;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* trylock for writing -- returns 1 if successful, 0 if contention
|
||||
*/
|
||||
static inline int __down_write_trylock(struct rw_semaphore *sem)
|
||||
{
|
||||
signed long old;
|
||||
|
||||
asm volatile(
|
||||
" lg %0,%1\n"
|
||||
"0: ltgr %0,%0\n"
|
||||
" jnz 1f\n"
|
||||
" csg %0,%3,%1\n"
|
||||
" jl 0b\n"
|
||||
"1:"
|
||||
: "=&d" (old), "=Q" (sem->count)
|
||||
: "Q" (sem->count), "d" (RWSEM_ACTIVE_WRITE_BIAS)
|
||||
: "cc", "memory");
|
||||
return (old == RWSEM_UNLOCKED_VALUE) ? 1 : 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* unlock after reading
|
||||
*/
|
||||
static inline void __up_read(struct rw_semaphore *sem)
|
||||
{
|
||||
signed long old, new;
|
||||
|
||||
asm volatile(
|
||||
" lg %0,%2\n"
|
||||
"0: lgr %1,%0\n"
|
||||
" aghi %1,%4\n"
|
||||
" csg %0,%1,%2\n"
|
||||
" jl 0b"
|
||||
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
|
||||
: "Q" (sem->count), "i" (-RWSEM_ACTIVE_READ_BIAS)
|
||||
: "cc", "memory");
|
||||
if (new < 0)
|
||||
if ((new & RWSEM_ACTIVE_MASK) == 0)
|
||||
rwsem_wake(sem);
|
||||
}
|
||||
|
||||
/*
|
||||
* unlock after writing
|
||||
*/
|
||||
static inline void __up_write(struct rw_semaphore *sem)
|
||||
{
|
||||
signed long old, new, tmp;
|
||||
|
||||
tmp = -RWSEM_ACTIVE_WRITE_BIAS;
|
||||
asm volatile(
|
||||
" lg %0,%2\n"
|
||||
"0: lgr %1,%0\n"
|
||||
" ag %1,%4\n"
|
||||
" csg %0,%1,%2\n"
|
||||
" jl 0b"
|
||||
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
|
||||
: "Q" (sem->count), "m" (tmp)
|
||||
: "cc", "memory");
|
||||
if (new < 0)
|
||||
if ((new & RWSEM_ACTIVE_MASK) == 0)
|
||||
rwsem_wake(sem);
|
||||
}
|
||||
|
||||
/*
|
||||
* downgrade write lock to read lock
|
||||
*/
|
||||
static inline void __downgrade_write(struct rw_semaphore *sem)
|
||||
{
|
||||
signed long old, new, tmp;
|
||||
|
||||
tmp = -RWSEM_WAITING_BIAS;
|
||||
asm volatile(
|
||||
" lg %0,%2\n"
|
||||
"0: lgr %1,%0\n"
|
||||
" ag %1,%4\n"
|
||||
" csg %0,%1,%2\n"
|
||||
" jl 0b"
|
||||
: "=&d" (old), "=&d" (new), "=Q" (sem->count)
|
||||
: "Q" (sem->count), "m" (tmp)
|
||||
: "cc", "memory");
|
||||
if (new > 1)
|
||||
rwsem_downgrade_wake(sem);
|
||||
}
|
||||
|
||||
#endif /* _S390_RWSEM_H */
|
@ -4,6 +4,6 @@
|
||||
|
||||
#include <asm-generic/sections.h>
|
||||
|
||||
extern char _eshared[], _ehead[];
|
||||
extern char _ehead[];
|
||||
|
||||
#endif
|
||||
|
@ -98,9 +98,6 @@ extern char vmpoff_cmd[];
|
||||
#define SET_CONSOLE_VT220 do { console_mode = 4; } while (0)
|
||||
#define SET_CONSOLE_HVC do { console_mode = 5; } while (0)
|
||||
|
||||
#define NSS_NAME_SIZE 8
|
||||
extern char kernel_nss_name[];
|
||||
|
||||
#ifdef CONFIG_PFAULT
|
||||
extern int pfault_init(void);
|
||||
extern void pfault_fini(void);
|
||||
|
@ -28,6 +28,7 @@ extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
|
||||
|
||||
extern void smp_call_online_cpu(void (*func)(void *), void *);
|
||||
extern void smp_call_ipl_cpu(void (*func)(void *), void *);
|
||||
extern void smp_emergency_stop(void);
|
||||
|
||||
extern int smp_find_processor_id(u16 address);
|
||||
extern int smp_store_status(int cpu);
|
||||
@ -53,6 +54,10 @@ static inline void smp_call_online_cpu(void (*func)(void *), void *data)
|
||||
func(data);
|
||||
}
|
||||
|
||||
static inline void smp_emergency_stop(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int smp_find_processor_id(u16 address) { return 0; }
|
||||
static inline int smp_store_status(int cpu) { return 0; }
|
||||
static inline int smp_vcpu_scheduled(int cpu) { return 1; }
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <asm/atomic_ops.h>
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/alternative.h>
|
||||
|
||||
#define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
|
||||
|
||||
@ -36,20 +37,15 @@ bool arch_vcpu_is_preempted(int cpu);
|
||||
* (the type definitions are in asm/spinlock_types.h)
|
||||
*/
|
||||
|
||||
void arch_lock_relax(int cpu);
|
||||
void arch_spin_relax(arch_spinlock_t *lock);
|
||||
|
||||
void arch_spin_lock_wait(arch_spinlock_t *);
|
||||
int arch_spin_trylock_retry(arch_spinlock_t *);
|
||||
void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
|
||||
|
||||
static inline void arch_spin_relax(arch_spinlock_t *lock)
|
||||
{
|
||||
arch_lock_relax(lock->lock);
|
||||
}
|
||||
void arch_spin_lock_setup(int cpu);
|
||||
|
||||
static inline u32 arch_spin_lockval(int cpu)
|
||||
{
|
||||
return ~cpu;
|
||||
return cpu + 1;
|
||||
}
|
||||
|
||||
static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
|
||||
@ -65,8 +61,7 @@ static inline int arch_spin_is_locked(arch_spinlock_t *lp)
|
||||
static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
|
||||
{
|
||||
barrier();
|
||||
return likely(arch_spin_value_unlocked(*lp) &&
|
||||
__atomic_cmpxchg_bool(&lp->lock, 0, SPINLOCK_LOCKVAL));
|
||||
return likely(__atomic_cmpxchg_bool(&lp->lock, 0, SPINLOCK_LOCKVAL));
|
||||
}
|
||||
|
||||
static inline void arch_spin_lock(arch_spinlock_t *lp)
|
||||
@ -79,7 +74,7 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
|
||||
unsigned long flags)
|
||||
{
|
||||
if (!arch_spin_trylock_once(lp))
|
||||
arch_spin_lock_wait_flags(lp, flags);
|
||||
arch_spin_lock_wait(lp);
|
||||
}
|
||||
|
||||
static inline int arch_spin_trylock(arch_spinlock_t *lp)
|
||||
@ -93,11 +88,10 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp)
|
||||
{
|
||||
typecheck(int, lp->lock);
|
||||
asm volatile(
|
||||
#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
|
||||
" .long 0xb2fa0070\n" /* NIAI 7 */
|
||||
#endif
|
||||
" st %1,%0\n"
|
||||
: "=Q" (lp->lock) : "d" (0) : "cc", "memory");
|
||||
ALTERNATIVE("", ".long 0xb2fa0070", 49) /* NIAI 7 */
|
||||
" sth %1,%0\n"
|
||||
: "=Q" (((unsigned short *) &lp->lock)[1])
|
||||
: "d" (0) : "cc", "memory");
|
||||
}
|
||||
|
||||
/*
|
||||
@ -115,164 +109,63 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp)
|
||||
* read_can_lock - would read_trylock() succeed?
|
||||
* @lock: the rwlock in question.
|
||||
*/
|
||||
#define arch_read_can_lock(x) ((int)(x)->lock >= 0)
|
||||
#define arch_read_can_lock(x) (((x)->cnts & 0xffff0000) == 0)
|
||||
|
||||
/**
|
||||
* write_can_lock - would write_trylock() succeed?
|
||||
* @lock: the rwlock in question.
|
||||
*/
|
||||
#define arch_write_can_lock(x) ((x)->lock == 0)
|
||||
|
||||
extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
|
||||
extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
|
||||
#define arch_write_can_lock(x) ((x)->cnts == 0)
|
||||
|
||||
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
|
||||
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
|
||||
#define arch_read_relax(rw) barrier()
|
||||
#define arch_write_relax(rw) barrier()
|
||||
|
||||
static inline int arch_read_trylock_once(arch_rwlock_t *rw)
|
||||
{
|
||||
int old = ACCESS_ONCE(rw->lock);
|
||||
return likely(old >= 0 &&
|
||||
__atomic_cmpxchg_bool(&rw->lock, old, old + 1));
|
||||
}
|
||||
|
||||
static inline int arch_write_trylock_once(arch_rwlock_t *rw)
|
||||
{
|
||||
int old = ACCESS_ONCE(rw->lock);
|
||||
return likely(old == 0 &&
|
||||
__atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
|
||||
|
||||
#define __RAW_OP_OR "lao"
|
||||
#define __RAW_OP_AND "lan"
|
||||
#define __RAW_OP_ADD "laa"
|
||||
|
||||
#define __RAW_LOCK(ptr, op_val, op_string) \
|
||||
({ \
|
||||
int old_val; \
|
||||
\
|
||||
typecheck(int *, ptr); \
|
||||
asm volatile( \
|
||||
op_string " %0,%2,%1\n" \
|
||||
"bcr 14,0\n" \
|
||||
: "=d" (old_val), "+Q" (*ptr) \
|
||||
: "d" (op_val) \
|
||||
: "cc", "memory"); \
|
||||
old_val; \
|
||||
})
|
||||
|
||||
#define __RAW_UNLOCK(ptr, op_val, op_string) \
|
||||
({ \
|
||||
int old_val; \
|
||||
\
|
||||
typecheck(int *, ptr); \
|
||||
asm volatile( \
|
||||
op_string " %0,%2,%1\n" \
|
||||
: "=d" (old_val), "+Q" (*ptr) \
|
||||
: "d" (op_val) \
|
||||
: "cc", "memory"); \
|
||||
old_val; \
|
||||
})
|
||||
|
||||
extern void _raw_read_lock_wait(arch_rwlock_t *lp);
|
||||
extern void _raw_write_lock_wait(arch_rwlock_t *lp, int prev);
|
||||
void arch_read_lock_wait(arch_rwlock_t *lp);
|
||||
void arch_write_lock_wait(arch_rwlock_t *lp);
|
||||
|
||||
static inline void arch_read_lock(arch_rwlock_t *rw)
|
||||
{
|
||||
int old;
|
||||
|
||||
old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD);
|
||||
if (old < 0)
|
||||
_raw_read_lock_wait(rw);
|
||||
old = __atomic_add(1, &rw->cnts);
|
||||
if (old & 0xffff0000)
|
||||
arch_read_lock_wait(rw);
|
||||
}
|
||||
|
||||
static inline void arch_read_unlock(arch_rwlock_t *rw)
|
||||
{
|
||||
__RAW_UNLOCK(&rw->lock, -1, __RAW_OP_ADD);
|
||||
__atomic_add_const_barrier(-1, &rw->cnts);
|
||||
}
|
||||
|
||||
static inline void arch_write_lock(arch_rwlock_t *rw)
|
||||
{
|
||||
int old;
|
||||
|
||||
old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
|
||||
if (old != 0)
|
||||
_raw_write_lock_wait(rw, old);
|
||||
rw->owner = SPINLOCK_LOCKVAL;
|
||||
if (!__atomic_cmpxchg_bool(&rw->cnts, 0, 0x30000))
|
||||
arch_write_lock_wait(rw);
|
||||
}
|
||||
|
||||
static inline void arch_write_unlock(arch_rwlock_t *rw)
|
||||
{
|
||||
rw->owner = 0;
|
||||
__RAW_UNLOCK(&rw->lock, 0x7fffffff, __RAW_OP_AND);
|
||||
__atomic_add_barrier(-0x30000, &rw->cnts);
|
||||
}
|
||||
|
||||
#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
|
||||
|
||||
extern void _raw_read_lock_wait(arch_rwlock_t *lp);
|
||||
extern void _raw_write_lock_wait(arch_rwlock_t *lp);
|
||||
|
||||
static inline void arch_read_lock(arch_rwlock_t *rw)
|
||||
{
|
||||
if (!arch_read_trylock_once(rw))
|
||||
_raw_read_lock_wait(rw);
|
||||
}
|
||||
|
||||
static inline void arch_read_unlock(arch_rwlock_t *rw)
|
||||
{
|
||||
int old;
|
||||
|
||||
do {
|
||||
old = ACCESS_ONCE(rw->lock);
|
||||
} while (!__atomic_cmpxchg_bool(&rw->lock, old, old - 1));
|
||||
}
|
||||
|
||||
static inline void arch_write_lock(arch_rwlock_t *rw)
|
||||
{
|
||||
if (!arch_write_trylock_once(rw))
|
||||
_raw_write_lock_wait(rw);
|
||||
rw->owner = SPINLOCK_LOCKVAL;
|
||||
}
|
||||
|
||||
static inline void arch_write_unlock(arch_rwlock_t *rw)
|
||||
{
|
||||
typecheck(int, rw->lock);
|
||||
|
||||
rw->owner = 0;
|
||||
asm volatile(
|
||||
"st %1,%0\n"
|
||||
: "+Q" (rw->lock)
|
||||
: "d" (0)
|
||||
: "cc", "memory");
|
||||
}
|
||||
|
||||
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
|
||||
|
||||
static inline int arch_read_trylock(arch_rwlock_t *rw)
|
||||
{
|
||||
if (!arch_read_trylock_once(rw))
|
||||
return _raw_read_trylock_retry(rw);
|
||||
return 1;
|
||||
int old;
|
||||
|
||||
old = READ_ONCE(rw->cnts);
|
||||
return (!(old & 0xffff0000) &&
|
||||
__atomic_cmpxchg_bool(&rw->cnts, old, old + 1));
|
||||
}
|
||||
|
||||
static inline int arch_write_trylock(arch_rwlock_t *rw)
|
||||
{
|
||||
if (!arch_write_trylock_once(rw) && !_raw_write_trylock_retry(rw))
|
||||
return 0;
|
||||
rw->owner = SPINLOCK_LOCKVAL;
|
||||
return 1;
|
||||
}
|
||||
int old;
|
||||
|
||||
static inline void arch_read_relax(arch_rwlock_t *rw)
|
||||
{
|
||||
arch_lock_relax(rw->owner);
|
||||
}
|
||||
|
||||
static inline void arch_write_relax(arch_rwlock_t *rw)
|
||||
{
|
||||
arch_lock_relax(rw->owner);
|
||||
old = READ_ONCE(rw->cnts);
|
||||
return !old && __atomic_cmpxchg_bool(&rw->cnts, 0, 0x30000);
|
||||
}
|
||||
|
||||
#endif /* __ASM_SPINLOCK_H */
|
||||
|
@ -13,8 +13,8 @@ typedef struct {
|
||||
#define __ARCH_SPIN_LOCK_UNLOCKED { .lock = 0, }
|
||||
|
||||
typedef struct {
|
||||
int lock;
|
||||
int owner;
|
||||
int cnts;
|
||||
arch_spinlock_t wait;
|
||||
} arch_rwlock_t;
|
||||
|
||||
#define __ARCH_RW_LOCK_UNLOCKED { 0 }
|
||||
|
@ -18,6 +18,9 @@
|
||||
#define __HAVE_ARCH_MEMMOVE /* gcc builtin & arch function */
|
||||
#define __HAVE_ARCH_MEMSCAN /* inline & arch function */
|
||||
#define __HAVE_ARCH_MEMSET /* gcc builtin & arch function */
|
||||
#define __HAVE_ARCH_MEMSET16 /* arch function */
|
||||
#define __HAVE_ARCH_MEMSET32 /* arch function */
|
||||
#define __HAVE_ARCH_MEMSET64 /* arch function */
|
||||
#define __HAVE_ARCH_STRCAT /* inline & arch function */
|
||||
#define __HAVE_ARCH_STRCMP /* arch function */
|
||||
#define __HAVE_ARCH_STRCPY /* inline & arch function */
|
||||
@ -31,17 +34,17 @@
|
||||
#define __HAVE_ARCH_STRSTR /* arch function */
|
||||
|
||||
/* Prototypes for non-inlined arch strings functions. */
|
||||
extern int memcmp(const void *, const void *, size_t);
|
||||
extern void *memcpy(void *, const void *, size_t);
|
||||
extern void *memset(void *, int, size_t);
|
||||
extern void *memmove(void *, const void *, size_t);
|
||||
extern int strcmp(const char *,const char *);
|
||||
extern size_t strlcat(char *, const char *, size_t);
|
||||
extern size_t strlcpy(char *, const char *, size_t);
|
||||
extern char *strncat(char *, const char *, size_t);
|
||||
extern char *strncpy(char *, const char *, size_t);
|
||||
extern char *strrchr(const char *, int);
|
||||
extern char *strstr(const char *, const char *);
|
||||
int memcmp(const void *s1, const void *s2, size_t n);
|
||||
void *memcpy(void *dest, const void *src, size_t n);
|
||||
void *memset(void *s, int c, size_t n);
|
||||
void *memmove(void *dest, const void *src, size_t n);
|
||||
int strcmp(const char *s1, const char *s2);
|
||||
size_t strlcat(char *dest, const char *src, size_t n);
|
||||
size_t strlcpy(char *dest, const char *src, size_t size);
|
||||
char *strncat(char *dest, const char *src, size_t n);
|
||||
char *strncpy(char *dest, const char *src, size_t n);
|
||||
char *strrchr(const char *s, int c);
|
||||
char *strstr(const char *s1, const char *s2);
|
||||
|
||||
#undef __HAVE_ARCH_STRCHR
|
||||
#undef __HAVE_ARCH_STRNCHR
|
||||
@ -50,7 +53,26 @@ extern char *strstr(const char *, const char *);
|
||||
#undef __HAVE_ARCH_STRSEP
|
||||
#undef __HAVE_ARCH_STRSPN
|
||||
|
||||
#if !defined(IN_ARCH_STRING_C)
|
||||
void *__memset16(uint16_t *s, uint16_t v, size_t count);
|
||||
void *__memset32(uint32_t *s, uint32_t v, size_t count);
|
||||
void *__memset64(uint64_t *s, uint64_t v, size_t count);
|
||||
|
||||
static inline void *memset16(uint16_t *s, uint16_t v, size_t count)
|
||||
{
|
||||
return __memset16(s, v, count * sizeof(v));
|
||||
}
|
||||
|
||||
static inline void *memset32(uint32_t *s, uint32_t v, size_t count)
|
||||
{
|
||||
return __memset32(s, v, count * sizeof(v));
|
||||
}
|
||||
|
||||
static inline void *memset64(uint64_t *s, uint64_t v, size_t count)
|
||||
{
|
||||
return __memset64(s, v, count * sizeof(v));
|
||||
}
|
||||
|
||||
#if !defined(IN_ARCH_STRING_C) && (!defined(CONFIG_FORTIFY_SOURCE) || defined(__NO_FORTIFY))
|
||||
|
||||
static inline void *memchr(const void * s, int c, size_t n)
|
||||
{
|
||||
|
@ -37,8 +37,8 @@ static inline void restore_access_regs(unsigned int *acrs)
|
||||
save_ri_cb(prev->thread.ri_cb); \
|
||||
save_gs_cb(prev->thread.gs_cb); \
|
||||
} \
|
||||
update_cr_regs(next); \
|
||||
if (next->mm) { \
|
||||
update_cr_regs(next); \
|
||||
set_cpu_flag(CIF_FPU); \
|
||||
restore_access_regs(&next->thread.acrs[0]); \
|
||||
restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \
|
||||
|
@ -156,7 +156,8 @@ static inline unsigned char topology_mnest_limit(void)
|
||||
struct topology_core {
|
||||
unsigned char nl;
|
||||
unsigned char reserved0[3];
|
||||
unsigned char :6;
|
||||
unsigned char :5;
|
||||
unsigned char d:1;
|
||||
unsigned char pp:2;
|
||||
unsigned char reserved1;
|
||||
unsigned short origin;
|
||||
@ -198,4 +199,5 @@ struct service_level {
|
||||
int register_service_level(struct service_level *);
|
||||
int unregister_service_level(struct service_level *);
|
||||
|
||||
int sthyi_fill(void *dst, u64 *rc);
|
||||
#endif /* __ASM_S390_SYSINFO_H */
|
||||
|
@ -17,6 +17,7 @@ struct cpu_topology_s390 {
|
||||
unsigned short book_id;
|
||||
unsigned short drawer_id;
|
||||
unsigned short node_id;
|
||||
unsigned short dedicated : 1;
|
||||
cpumask_t thread_mask;
|
||||
cpumask_t core_mask;
|
||||
cpumask_t book_mask;
|
||||
@ -35,6 +36,7 @@ extern cpumask_t cpus_with_topology;
|
||||
#define topology_book_cpumask(cpu) (&cpu_topology[cpu].book_mask)
|
||||
#define topology_drawer_id(cpu) (cpu_topology[cpu].drawer_id)
|
||||
#define topology_drawer_cpumask(cpu) (&cpu_topology[cpu].drawer_mask)
|
||||
#define topology_cpu_dedicated(cpu) (cpu_topology[cpu].dedicated)
|
||||
|
||||
#define mc_capable() 1
|
||||
|
||||
|
@ -47,6 +47,7 @@ struct vdso_per_cpu_data {
|
||||
|
||||
extern struct vdso_data *vdso_data;
|
||||
|
||||
void vdso_alloc_boot_cpu(struct lowcore *lowcore);
|
||||
int vdso_alloc_per_cpu(struct lowcore *lowcore);
|
||||
void vdso_free_per_cpu(struct lowcore *lowcore);
|
||||
|
||||
|
@ -1,65 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
/*
|
||||
* definition for virtio for kvm on s390
|
||||
*
|
||||
* Copyright IBM Corp. 2008
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License (version 2 only)
|
||||
* as published by the Free Software Foundation.
|
||||
*
|
||||
* Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
|
||||
*/
|
||||
|
||||
#ifndef __KVM_S390_VIRTIO_H
|
||||
#define __KVM_S390_VIRTIO_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
struct kvm_device_desc {
|
||||
/* The device type: console, network, disk etc. Type 0 terminates. */
|
||||
__u8 type;
|
||||
/* The number of virtqueues (first in config array) */
|
||||
__u8 num_vq;
|
||||
/*
|
||||
* The number of bytes of feature bits. Multiply by 2: one for host
|
||||
* features and one for guest acknowledgements.
|
||||
*/
|
||||
__u8 feature_len;
|
||||
/* The number of bytes of the config array after virtqueues. */
|
||||
__u8 config_len;
|
||||
/* A status byte, written by the Guest. */
|
||||
__u8 status;
|
||||
__u8 config[0];
|
||||
};
|
||||
|
||||
/*
|
||||
* This is how we expect the device configuration field for a virtqueue
|
||||
* to be laid out in config space.
|
||||
*/
|
||||
struct kvm_vqconfig {
|
||||
/* The token returned with an interrupt. Set by the guest */
|
||||
__u64 token;
|
||||
/* The address of the virtio ring */
|
||||
__u64 address;
|
||||
/* The number of entries in the virtio_ring */
|
||||
__u16 num;
|
||||
|
||||
};
|
||||
|
||||
#define KVM_S390_VIRTIO_NOTIFY 0
|
||||
#define KVM_S390_VIRTIO_RESET 1
|
||||
#define KVM_S390_VIRTIO_SET_STATUS 2
|
||||
|
||||
/* The alignment to use between consumer and producer parts of vring.
|
||||
* This is pagesize for historical reasons. */
|
||||
#define KVM_S390_VIRTIO_RING_ALIGN 4096
|
||||
|
||||
|
||||
/* These values are supposed to be in ext_params on an interrupt */
|
||||
#define VIRTIO_PARAM_MASK 0xff
|
||||
#define VIRTIO_PARAM_VRING_INTERRUPT 0x0
|
||||
#define VIRTIO_PARAM_CONFIG_CHANGED 0x1
|
||||
#define VIRTIO_PARAM_DEV_ADD 0x2
|
||||
|
||||
#endif
|
6
arch/s390/include/uapi/asm/sthyi.h
Normal file
6
arch/s390/include/uapi/asm/sthyi.h
Normal file
@ -0,0 +1,6 @@
|
||||
#ifndef _UAPI_ASM_STHYI_H
|
||||
#define _UAPI_ASM_STHYI_H
|
||||
|
||||
#define STHYI_FC_CP_IFL_CAP 0
|
||||
|
||||
#endif /* _UAPI_ASM_STHYI_H */
|
@ -316,7 +316,8 @@
|
||||
#define __NR_pwritev2 377
|
||||
#define __NR_s390_guarded_storage 378
|
||||
#define __NR_statx 379
|
||||
#define NR_syscalls 380
|
||||
#define __NR_s390_sthyi 380
|
||||
#define NR_syscalls 381
|
||||
|
||||
/*
|
||||
* There are some system calls that are not present on 64 bit, some
|
||||
|
@ -34,6 +34,8 @@ AFLAGS_REMOVE_head.o += $(CC_FLAGS_MARCH)
|
||||
AFLAGS_head.o += -march=z900
|
||||
endif
|
||||
|
||||
CFLAGS_als.o += -D__NO_FORTIFY
|
||||
|
||||
#
|
||||
# Passing null pointers is ok for smp code, since we access the lowcore here.
|
||||
#
|
||||
@ -56,7 +58,7 @@ obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o
|
||||
obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
|
||||
obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o als.o
|
||||
obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
|
||||
obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o
|
||||
obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o
|
||||
obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o
|
||||
|
||||
extra-y += head.o head64.o vmlinux.lds
|
||||
@ -75,6 +77,7 @@ obj-$(CONFIG_KPROBES) += kprobes.o
|
||||
obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
|
||||
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
|
||||
obj-$(CONFIG_UPROBES) += uprobes.o
|
||||
obj-$(CONFIG_ALTERNATIVES) += alternative.o
|
||||
|
||||
obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o
|
||||
obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o
|
||||
|
110
arch/s390/kernel/alternative.c
Normal file
110
arch/s390/kernel/alternative.c
Normal file
@ -0,0 +1,110 @@
|
||||
#include <linux/module.h>
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/facility.h>
|
||||
|
||||
#define MAX_PATCH_LEN (255 - 1)
|
||||
|
||||
static int __initdata_or_module alt_instr_disabled;
|
||||
|
||||
static int __init disable_alternative_instructions(char *str)
|
||||
{
|
||||
alt_instr_disabled = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
early_param("noaltinstr", disable_alternative_instructions);
|
||||
|
||||
struct brcl_insn {
|
||||
u16 opc;
|
||||
s32 disp;
|
||||
} __packed;
|
||||
|
||||
static u16 __initdata_or_module nop16 = 0x0700;
|
||||
static u32 __initdata_or_module nop32 = 0x47000000;
|
||||
static struct brcl_insn __initdata_or_module nop48 = {
|
||||
0xc004, 0
|
||||
};
|
||||
|
||||
static const void *nops[] __initdata_or_module = {
|
||||
&nop16,
|
||||
&nop32,
|
||||
&nop48
|
||||
};
|
||||
|
||||
static void __init_or_module add_jump_padding(void *insns, unsigned int len)
|
||||
{
|
||||
struct brcl_insn brcl = {
|
||||
0xc0f4,
|
||||
len / 2
|
||||
};
|
||||
|
||||
memcpy(insns, &brcl, sizeof(brcl));
|
||||
insns += sizeof(brcl);
|
||||
len -= sizeof(brcl);
|
||||
|
||||
while (len > 0) {
|
||||
memcpy(insns, &nop16, 2);
|
||||
insns += 2;
|
||||
len -= 2;
|
||||
}
|
||||
}
|
||||
|
||||
static void __init_or_module add_padding(void *insns, unsigned int len)
|
||||
{
|
||||
if (len > 6)
|
||||
add_jump_padding(insns, len);
|
||||
else if (len >= 2)
|
||||
memcpy(insns, nops[len / 2 - 1], len);
|
||||
}
|
||||
|
||||
static void __init_or_module __apply_alternatives(struct alt_instr *start,
|
||||
struct alt_instr *end)
|
||||
{
|
||||
struct alt_instr *a;
|
||||
u8 *instr, *replacement;
|
||||
u8 insnbuf[MAX_PATCH_LEN];
|
||||
|
||||
/*
|
||||
* The scan order should be from start to end. A later scanned
|
||||
* alternative code can overwrite previously scanned alternative code.
|
||||
*/
|
||||
for (a = start; a < end; a++) {
|
||||
int insnbuf_sz = 0;
|
||||
|
||||
instr = (u8 *)&a->instr_offset + a->instr_offset;
|
||||
replacement = (u8 *)&a->repl_offset + a->repl_offset;
|
||||
|
||||
if (!test_facility(a->facility))
|
||||
continue;
|
||||
|
||||
if (unlikely(a->instrlen % 2 || a->replacementlen % 2)) {
|
||||
WARN_ONCE(1, "cpu alternatives instructions length is "
|
||||
"odd, skipping patching\n");
|
||||
continue;
|
||||
}
|
||||
|
||||
memcpy(insnbuf, replacement, a->replacementlen);
|
||||
insnbuf_sz = a->replacementlen;
|
||||
|
||||
if (a->instrlen > a->replacementlen) {
|
||||
add_padding(insnbuf + a->replacementlen,
|
||||
a->instrlen - a->replacementlen);
|
||||
insnbuf_sz += a->instrlen - a->replacementlen;
|
||||
}
|
||||
|
||||
s390_kernel_write(instr, insnbuf, insnbuf_sz);
|
||||
}
|
||||
}
|
||||
|
||||
void __init_or_module apply_alternatives(struct alt_instr *start,
|
||||
struct alt_instr *end)
|
||||
{
|
||||
if (!alt_instr_disabled)
|
||||
__apply_alternatives(start, end);
|
||||
}
|
||||
|
||||
extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
|
||||
void __init apply_alternative_instructions(void)
|
||||
{
|
||||
apply_alternatives(__alt_instructions, __alt_instructions_end);
|
||||
}
|
@ -14,6 +14,7 @@
|
||||
#include <asm/vdso.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/gmap.h>
|
||||
#include <asm/nmi.h>
|
||||
|
||||
/*
|
||||
* Make sure that the compiler is new enough. We want a compiler that
|
||||
@ -159,6 +160,7 @@ int main(void)
|
||||
OFFSET(__LC_LAST_UPDATE_CLOCK, lowcore, last_update_clock);
|
||||
OFFSET(__LC_INT_CLOCK, lowcore, int_clock);
|
||||
OFFSET(__LC_MCCK_CLOCK, lowcore, mcck_clock);
|
||||
OFFSET(__LC_CLOCK_COMPARATOR, lowcore, clock_comparator);
|
||||
OFFSET(__LC_BOOT_CLOCK, lowcore, boot_clock);
|
||||
OFFSET(__LC_CURRENT, lowcore, current_task);
|
||||
OFFSET(__LC_KERNEL_STACK, lowcore, kernel_stack);
|
||||
@ -194,6 +196,9 @@ int main(void)
|
||||
OFFSET(__LC_CREGS_SAVE_AREA, lowcore, cregs_save_area);
|
||||
OFFSET(__LC_PGM_TDB, lowcore, pgm_tdb);
|
||||
BLANK();
|
||||
/* extended machine check save area */
|
||||
OFFSET(__MCESA_GS_SAVE_AREA, mcesa, guarded_storage_save_area);
|
||||
BLANK();
|
||||
/* gmap/sie offsets */
|
||||
OFFSET(__GMAP_ASCE, gmap, asce);
|
||||
OFFSET(__SIE_PROG0C, kvm_s390_sie_block, prog0c);
|
||||
|
@ -181,3 +181,4 @@ COMPAT_SYSCALL_WRAP3(mlock2, unsigned long, start, size_t, len, int, flags);
|
||||
COMPAT_SYSCALL_WRAP6(copy_file_range, int, fd_in, loff_t __user *, off_in, int, fd_out, loff_t __user *, off_out, size_t, len, unsigned int, flags);
|
||||
COMPAT_SYSCALL_WRAP2(s390_guarded_storage, int, command, struct gs_cb *, gs_cb);
|
||||
COMPAT_SYSCALL_WRAP5(statx, int, dfd, const char __user *, path, unsigned, flags, unsigned, mask, struct statx __user *, buffer);
|
||||
COMPAT_SYSCALL_WRAP4(s390_sthyi, unsigned long, code, void __user *, info, u64 __user *, rc, unsigned long, flags);
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -31,14 +31,6 @@
|
||||
#include <asm/facility.h>
|
||||
#include "entry.h"
|
||||
|
||||
/*
|
||||
* Create a Kernel NSS if the SAVESYS= parameter is defined
|
||||
*/
|
||||
#define DEFSYS_CMD_SIZE 128
|
||||
#define SAVESYS_CMD_SIZE 32
|
||||
|
||||
char kernel_nss_name[NSS_NAME_SIZE + 1];
|
||||
|
||||
static void __init setup_boot_command_line(void);
|
||||
|
||||
/*
|
||||
@ -59,134 +51,6 @@ static void __init reset_tod_clock(void)
|
||||
S390_lowcore.last_update_clock = TOD_UNIX_EPOCH;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SHARED_KERNEL
|
||||
int __init savesys_ipl_nss(char *cmd, const int cmdlen);
|
||||
|
||||
asm(
|
||||
" .section .init.text,\"ax\",@progbits\n"
|
||||
" .align 4\n"
|
||||
" .type savesys_ipl_nss, @function\n"
|
||||
"savesys_ipl_nss:\n"
|
||||
" stmg 6,15,48(15)\n"
|
||||
" lgr 14,3\n"
|
||||
" sam31\n"
|
||||
" diag 2,14,0x8\n"
|
||||
" sam64\n"
|
||||
" lgr 2,14\n"
|
||||
" lmg 6,15,48(15)\n"
|
||||
" br 14\n"
|
||||
" .size savesys_ipl_nss, .-savesys_ipl_nss\n"
|
||||
" .previous\n");
|
||||
|
||||
static __initdata char upper_command_line[COMMAND_LINE_SIZE];
|
||||
|
||||
static noinline __init void create_kernel_nss(void)
|
||||
{
|
||||
unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size;
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
unsigned int sinitrd_pfn, einitrd_pfn;
|
||||
#endif
|
||||
int response;
|
||||
int hlen;
|
||||
size_t len;
|
||||
char *savesys_ptr;
|
||||
char defsys_cmd[DEFSYS_CMD_SIZE];
|
||||
char savesys_cmd[SAVESYS_CMD_SIZE];
|
||||
|
||||
/* Do nothing if we are not running under VM */
|
||||
if (!MACHINE_IS_VM)
|
||||
return;
|
||||
|
||||
/* Convert COMMAND_LINE to upper case */
|
||||
for (i = 0; i < strlen(boot_command_line); i++)
|
||||
upper_command_line[i] = toupper(boot_command_line[i]);
|
||||
|
||||
savesys_ptr = strstr(upper_command_line, "SAVESYS=");
|
||||
|
||||
if (!savesys_ptr)
|
||||
return;
|
||||
|
||||
savesys_ptr += 8; /* Point to the beginning of the NSS name */
|
||||
for (i = 0; i < NSS_NAME_SIZE; i++) {
|
||||
if (savesys_ptr[i] == ' ' || savesys_ptr[i] == '\0')
|
||||
break;
|
||||
kernel_nss_name[i] = savesys_ptr[i];
|
||||
}
|
||||
|
||||
stext_pfn = PFN_DOWN(__pa(&_stext));
|
||||
eshared_pfn = PFN_DOWN(__pa(&_eshared));
|
||||
end_pfn = PFN_UP(__pa(&_end));
|
||||
min_size = end_pfn << 2;
|
||||
|
||||
hlen = snprintf(defsys_cmd, DEFSYS_CMD_SIZE,
|
||||
"DEFSYS %s 00000-%.5X EW %.5X-%.5X SR %.5X-%.5X",
|
||||
kernel_nss_name, stext_pfn - 1, stext_pfn,
|
||||
eshared_pfn - 1, eshared_pfn, end_pfn);
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
if (INITRD_START && INITRD_SIZE) {
|
||||
sinitrd_pfn = PFN_DOWN(__pa(INITRD_START));
|
||||
einitrd_pfn = PFN_UP(__pa(INITRD_START + INITRD_SIZE));
|
||||
min_size = einitrd_pfn << 2;
|
||||
hlen += snprintf(defsys_cmd + hlen, DEFSYS_CMD_SIZE - hlen,
|
||||
" EW %.5X-%.5X", sinitrd_pfn, einitrd_pfn);
|
||||
}
|
||||
#endif
|
||||
|
||||
snprintf(defsys_cmd + hlen, DEFSYS_CMD_SIZE - hlen,
|
||||
" EW MINSIZE=%.7iK PARMREGS=0-13", min_size);
|
||||
defsys_cmd[DEFSYS_CMD_SIZE - 1] = '\0';
|
||||
snprintf(savesys_cmd, SAVESYS_CMD_SIZE, "SAVESYS %s \n IPL %s",
|
||||
kernel_nss_name, kernel_nss_name);
|
||||
savesys_cmd[SAVESYS_CMD_SIZE - 1] = '\0';
|
||||
|
||||
__cpcmd(defsys_cmd, NULL, 0, &response);
|
||||
|
||||
if (response != 0) {
|
||||
pr_err("Defining the Linux kernel NSS failed with rc=%d\n",
|
||||
response);
|
||||
kernel_nss_name[0] = '\0';
|
||||
return;
|
||||
}
|
||||
|
||||
len = strlen(savesys_cmd);
|
||||
ASCEBC(savesys_cmd, len);
|
||||
response = savesys_ipl_nss(savesys_cmd, len);
|
||||
|
||||
/* On success: response is equal to the command size,
|
||||
* max SAVESYS_CMD_SIZE
|
||||
* On error: response contains the numeric portion of cp error message.
|
||||
* for SAVESYS it will be >= 263
|
||||
* for missing privilege class, it will be 1
|
||||
*/
|
||||
if (response > SAVESYS_CMD_SIZE || response == 1) {
|
||||
pr_err("Saving the Linux kernel NSS failed with rc=%d\n",
|
||||
response);
|
||||
kernel_nss_name[0] = '\0';
|
||||
return;
|
||||
}
|
||||
|
||||
/* re-initialize cputime accounting. */
|
||||
get_tod_clock_ext(tod_clock_base);
|
||||
S390_lowcore.last_update_clock = *(__u64 *) &tod_clock_base[1];
|
||||
S390_lowcore.last_update_timer = 0x7fffffffffffffffULL;
|
||||
S390_lowcore.user_timer = 0;
|
||||
S390_lowcore.system_timer = 0;
|
||||
asm volatile("SPT 0(%0)" : : "a" (&S390_lowcore.last_update_timer));
|
||||
|
||||
/* re-setup boot command line with new ipl vm parms */
|
||||
ipl_update_parameters();
|
||||
setup_boot_command_line();
|
||||
|
||||
ipl_flags = IPL_NSS_VALID;
|
||||
}
|
||||
|
||||
#else /* CONFIG_SHARED_KERNEL */
|
||||
|
||||
static inline void create_kernel_nss(void) { }
|
||||
|
||||
#endif /* CONFIG_SHARED_KERNEL */
|
||||
|
||||
/*
|
||||
* Clear bss memory
|
||||
*/
|
||||
@ -375,8 +239,10 @@ static __init void detect_machine_facilities(void)
|
||||
S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE;
|
||||
if (test_facility(40))
|
||||
S390_lowcore.machine_flags |= MACHINE_FLAG_LPP;
|
||||
if (test_facility(50) && test_facility(73))
|
||||
if (test_facility(50) && test_facility(73)) {
|
||||
S390_lowcore.machine_flags |= MACHINE_FLAG_TE;
|
||||
__ctl_set_bit(0, 55);
|
||||
}
|
||||
if (test_facility(51))
|
||||
S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
|
||||
if (test_facility(129)) {
|
||||
@ -549,10 +415,6 @@ static void __init setup_boot_command_line(void)
|
||||
append_to_cmdline(append_ipl_scpdata);
|
||||
}
|
||||
|
||||
/*
|
||||
* Save ipl parameters, clear bss memory, initialize storage keys
|
||||
* and create a kernel NSS at startup if the SAVESYS= parm is defined
|
||||
*/
|
||||
void __init startup_init(void)
|
||||
{
|
||||
reset_tod_clock();
|
||||
@ -569,7 +431,6 @@ void __init startup_init(void)
|
||||
setup_arch_string();
|
||||
ipl_update_parameters();
|
||||
setup_boot_command_line();
|
||||
create_kernel_nss();
|
||||
detect_diag9c();
|
||||
detect_diag44();
|
||||
detect_machine_facilities();
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/cache.h>
|
||||
#include <asm/ctl_reg.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/thread_info.h>
|
||||
@ -952,15 +953,56 @@ load_fpu_regs:
|
||||
*/
|
||||
ENTRY(mcck_int_handler)
|
||||
STCK __LC_MCCK_CLOCK
|
||||
la %r1,4095 # revalidate r1
|
||||
spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
|
||||
lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
|
||||
la %r1,4095 # validate r1
|
||||
spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # validate cpu timer
|
||||
sckc __LC_CLOCK_COMPARATOR # validate comparator
|
||||
lam %a0,%a15,__LC_AREGS_SAVE_AREA-4095(%r1) # validate acrs
|
||||
lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# validate gprs
|
||||
lg %r12,__LC_CURRENT
|
||||
larl %r13,cleanup_critical
|
||||
lmg %r8,%r9,__LC_MCK_OLD_PSW
|
||||
TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
|
||||
jo .Lmcck_panic # yes -> rest of mcck code invalid
|
||||
lghi %r14,__LC_CPU_TIMER_SAVE_AREA
|
||||
TSTMSK __LC_MCCK_CODE,MCCK_CODE_CR_VALID
|
||||
jno .Lmcck_panic # control registers invalid -> panic
|
||||
la %r14,4095
|
||||
lctlg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r14) # validate ctl regs
|
||||
ptlb
|
||||
lg %r11,__LC_MCESAD-4095(%r14) # extended machine check save area
|
||||
nill %r11,0xfc00 # MCESA_ORIGIN_MASK
|
||||
TSTMSK __LC_CREGS_SAVE_AREA+16-4095(%r14),CR2_GUARDED_STORAGE
|
||||
jno 0f
|
||||
TSTMSK __LC_MCCK_CODE,MCCK_CODE_GS_VALID
|
||||
jno 0f
|
||||
.insn rxy,0xe3000000004d,0,__MCESA_GS_SAVE_AREA(%r11) # LGSC
|
||||
0: l %r14,__LC_FP_CREG_SAVE_AREA-4095(%r14)
|
||||
TSTMSK __LC_MCCK_CODE,MCCK_CODE_FC_VALID
|
||||
jo 0f
|
||||
sr %r14,%r14
|
||||
0: sfpc %r14
|
||||
TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
|
||||
jo 0f
|
||||
lghi %r14,__LC_FPREGS_SAVE_AREA
|
||||
ld %f0,0(%r14)
|
||||
ld %f1,8(%r14)
|
||||
ld %f2,16(%r14)
|
||||
ld %f3,24(%r14)
|
||||
ld %f4,32(%r14)
|
||||
ld %f5,40(%r14)
|
||||
ld %f6,48(%r14)
|
||||
ld %f7,56(%r14)
|
||||
ld %f8,64(%r14)
|
||||
ld %f9,72(%r14)
|
||||
ld %f10,80(%r14)
|
||||
ld %f11,88(%r14)
|
||||
ld %f12,96(%r14)
|
||||
ld %f13,104(%r14)
|
||||
ld %f14,112(%r14)
|
||||
ld %f15,120(%r14)
|
||||
j 1f
|
||||
0: VLM %v0,%v15,0,%r11
|
||||
VLM %v16,%v31,256,%r11
|
||||
1: lghi %r14,__LC_CPU_TIMER_SAVE_AREA
|
||||
mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
|
||||
TSTMSK __LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID
|
||||
jo 3f
|
||||
@ -976,9 +1018,13 @@ ENTRY(mcck_int_handler)
|
||||
la %r14,__LC_LAST_UPDATE_TIMER
|
||||
2: spt 0(%r14)
|
||||
mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
|
||||
3: TSTMSK __LC_MCCK_CODE,(MCCK_CODE_PSW_MWP_VALID|MCCK_CODE_PSW_IA_VALID)
|
||||
jno .Lmcck_panic # no -> skip cleanup critical
|
||||
SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER
|
||||
3: TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_MWP_VALID
|
||||
jno .Lmcck_panic
|
||||
tmhh %r8,0x0001 # interrupting from user ?
|
||||
jnz 4f
|
||||
TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID
|
||||
jno .Lmcck_panic
|
||||
4: SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER
|
||||
.Lmcck_skip:
|
||||
lghi %r14,__LC_GPREGS_SAVE_AREA+64
|
||||
stmg %r0,%r7,__PT_R0(%r11)
|
||||
|
@ -78,6 +78,7 @@ long sys_s390_runtime_instr(int command, int signum);
|
||||
long sys_s390_guarded_storage(int command, struct gs_cb __user *);
|
||||
long sys_s390_pci_mmio_write(unsigned long, const void __user *, size_t);
|
||||
long sys_s390_pci_mmio_read(unsigned long, void __user *, size_t);
|
||||
long sys_s390_sthyi(unsigned long function_code, void __user *buffer, u64 __user *return_code, unsigned long flags);
|
||||
|
||||
DECLARE_PER_CPU(u64, mt_cycles[8]);
|
||||
|
||||
|
@ -12,11 +12,10 @@
|
||||
#include <asm/guarded_storage.h>
|
||||
#include "entry.h"
|
||||
|
||||
void exit_thread_gs(void)
|
||||
void guarded_storage_release(struct task_struct *tsk)
|
||||
{
|
||||
kfree(current->thread.gs_cb);
|
||||
kfree(current->thread.gs_bc_cb);
|
||||
current->thread.gs_cb = current->thread.gs_bc_cb = NULL;
|
||||
kfree(tsk->thread.gs_cb);
|
||||
kfree(tsk->thread.gs_bc_cb);
|
||||
}
|
||||
|
||||
static int gs_enable(void)
|
||||
|
@ -279,8 +279,6 @@ static __init enum ipl_type get_ipl_type(void)
|
||||
{
|
||||
struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START;
|
||||
|
||||
if (ipl_flags & IPL_NSS_VALID)
|
||||
return IPL_TYPE_NSS;
|
||||
if (!(ipl_flags & IPL_DEVNO_VALID))
|
||||
return IPL_TYPE_UNKNOWN;
|
||||
if (!(ipl_flags & IPL_PARMBLOCK_VALID))
|
||||
@ -533,22 +531,6 @@ static struct attribute_group ipl_ccw_attr_group_lpar = {
|
||||
.attrs = ipl_ccw_attrs_lpar
|
||||
};
|
||||
|
||||
/* NSS ipl device attributes */
|
||||
|
||||
DEFINE_IPL_ATTR_RO(ipl_nss, name, "%s\n", kernel_nss_name);
|
||||
|
||||
static struct attribute *ipl_nss_attrs[] = {
|
||||
&sys_ipl_type_attr.attr,
|
||||
&sys_ipl_nss_name_attr.attr,
|
||||
&sys_ipl_ccw_loadparm_attr.attr,
|
||||
&sys_ipl_vm_parm_attr.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group ipl_nss_attr_group = {
|
||||
.attrs = ipl_nss_attrs,
|
||||
};
|
||||
|
||||
/* UNKNOWN ipl device attributes */
|
||||
|
||||
static struct attribute *ipl_unknown_attrs[] = {
|
||||
@ -598,9 +580,6 @@ static int __init ipl_init(void)
|
||||
case IPL_TYPE_FCP_DUMP:
|
||||
rc = sysfs_create_group(&ipl_kset->kobj, &ipl_fcp_attr_group);
|
||||
break;
|
||||
case IPL_TYPE_NSS:
|
||||
rc = sysfs_create_group(&ipl_kset->kobj, &ipl_nss_attr_group);
|
||||
break;
|
||||
default:
|
||||
rc = sysfs_create_group(&ipl_kset->kobj,
|
||||
&ipl_unknown_attr_group);
|
||||
@ -1172,18 +1151,6 @@ static int __init reipl_nss_init(void)
|
||||
return rc;
|
||||
|
||||
reipl_block_ccw_init(reipl_block_nss);
|
||||
if (ipl_info.type == IPL_TYPE_NSS) {
|
||||
memset(reipl_block_nss->ipl_info.ccw.nss_name,
|
||||
' ', NSS_NAME_SIZE);
|
||||
memcpy(reipl_block_nss->ipl_info.ccw.nss_name,
|
||||
kernel_nss_name, strlen(kernel_nss_name));
|
||||
ASCEBC(reipl_block_nss->ipl_info.ccw.nss_name, NSS_NAME_SIZE);
|
||||
reipl_block_nss->ipl_info.ccw.vm_flags |=
|
||||
DIAG308_VM_FLAGS_NSS_VALID;
|
||||
|
||||
reipl_block_ccw_fill_parms(reipl_block_nss);
|
||||
}
|
||||
|
||||
reipl_capabilities |= IPL_TYPE_NSS;
|
||||
return 0;
|
||||
}
|
||||
@ -1971,9 +1938,6 @@ void __init setup_ipl(void)
|
||||
ipl_info.data.fcp.lun = IPL_PARMBLOCK_START->ipl_info.fcp.lun;
|
||||
break;
|
||||
case IPL_TYPE_NSS:
|
||||
strncpy(ipl_info.data.nss.name, kernel_nss_name,
|
||||
sizeof(ipl_info.data.nss.name));
|
||||
break;
|
||||
case IPL_TYPE_UNKNOWN:
|
||||
/* We have no info to copy */
|
||||
break;
|
||||
|
@ -161,8 +161,6 @@ struct swap_insn_args {
|
||||
|
||||
static int swap_instruction(void *data)
|
||||
{
|
||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||
unsigned long status = kcb->kprobe_status;
|
||||
struct swap_insn_args *args = data;
|
||||
struct ftrace_insn new_insn, *insn;
|
||||
struct kprobe *p = args->p;
|
||||
@ -185,9 +183,7 @@ static int swap_instruction(void *data)
|
||||
ftrace_generate_nop_insn(&new_insn);
|
||||
}
|
||||
skip_ftrace:
|
||||
kcb->kprobe_status = KPROBE_SWAP_INST;
|
||||
s390_kernel_write(p->addr, &new_insn, len);
|
||||
kcb->kprobe_status = status;
|
||||
return 0;
|
||||
}
|
||||
NOKPROBE_SYMBOL(swap_instruction);
|
||||
@ -574,9 +570,6 @@ static int kprobe_trap_handler(struct pt_regs *regs, int trapnr)
|
||||
const struct exception_table_entry *entry;
|
||||
|
||||
switch(kcb->kprobe_status) {
|
||||
case KPROBE_SWAP_INST:
|
||||
/* We are here because the instruction replacement failed */
|
||||
return 0;
|
||||
case KPROBE_HIT_SS:
|
||||
case KPROBE_REENTER:
|
||||
/*
|
||||
|
@ -106,7 +106,7 @@ static void __do_machine_kdump(void *image)
|
||||
static noinline void __machine_kdump(void *image)
|
||||
{
|
||||
struct mcesa *mcesa;
|
||||
unsigned long cr2_old, cr2_new;
|
||||
union ctlreg2 cr2_old, cr2_new;
|
||||
int this_cpu, cpu;
|
||||
|
||||
lgr_info_log();
|
||||
@ -123,11 +123,12 @@ static noinline void __machine_kdump(void *image)
|
||||
if (MACHINE_HAS_VX)
|
||||
save_vx_regs((__vector128 *) mcesa->vector_save_area);
|
||||
if (MACHINE_HAS_GS) {
|
||||
__ctl_store(cr2_old, 2, 2);
|
||||
cr2_new = cr2_old | (1UL << 4);
|
||||
__ctl_load(cr2_new, 2, 2);
|
||||
__ctl_store(cr2_old.val, 2, 2);
|
||||
cr2_new = cr2_old;
|
||||
cr2_new.gse = 1;
|
||||
__ctl_load(cr2_new.val, 2, 2);
|
||||
save_gs_cb((struct gs_cb *) mcesa->guarded_storage_save_area);
|
||||
__ctl_load(cr2_old, 2, 2);
|
||||
__ctl_load(cr2_old.val, 2, 2);
|
||||
}
|
||||
/*
|
||||
* To create a good backchain for this CPU in the dump store_status
|
||||
@ -145,7 +146,7 @@ static noinline void __machine_kdump(void *image)
|
||||
/*
|
||||
* Check if kdump checksums are valid: We call purgatory with parameter "0"
|
||||
*/
|
||||
static int kdump_csum_valid(struct kimage *image)
|
||||
static bool kdump_csum_valid(struct kimage *image)
|
||||
{
|
||||
#ifdef CONFIG_CRASH_DUMP
|
||||
int (*start_kdump)(int) = (void *)image->start;
|
||||
@ -154,9 +155,9 @@ static int kdump_csum_valid(struct kimage *image)
|
||||
__arch_local_irq_stnsm(0xfb); /* disable DAT */
|
||||
rc = start_kdump(0);
|
||||
__arch_local_irq_stosm(0x04); /* enable DAT */
|
||||
return rc ? 0 : -EINVAL;
|
||||
return rc == 0;
|
||||
#else
|
||||
return -EINVAL;
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -219,10 +220,6 @@ int machine_kexec_prepare(struct kimage *image)
|
||||
{
|
||||
void *reboot_code_buffer;
|
||||
|
||||
/* Can't replace kernel image since it is read-only. */
|
||||
if (ipl_flags & IPL_NSS_VALID)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (image->type == KEXEC_TYPE_CRASH)
|
||||
return machine_kexec_prepare_kdump();
|
||||
|
||||
@ -269,6 +266,7 @@ static void __do_machine_kexec(void *data)
|
||||
s390_reset_system();
|
||||
data_mover = (relocate_kernel_t) page_to_phys(image->control_code_page);
|
||||
|
||||
__arch_local_irq_stnsm(0xfb); /* disable DAT - avoid no-execute */
|
||||
/* Call the moving routine */
|
||||
(*data_mover)(&image->head, image->start);
|
||||
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/moduleloader.h>
|
||||
#include <linux/bug.h>
|
||||
#include <asm/alternative.h>
|
||||
|
||||
#if 0
|
||||
#define DEBUGP printk
|
||||
@ -429,6 +430,22 @@ int module_finalize(const Elf_Ehdr *hdr,
|
||||
const Elf_Shdr *sechdrs,
|
||||
struct module *me)
|
||||
{
|
||||
const Elf_Shdr *s;
|
||||
char *secstrings;
|
||||
|
||||
if (IS_ENABLED(CONFIG_ALTERNATIVES)) {
|
||||
secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
|
||||
for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
|
||||
if (!strcmp(".altinstructions",
|
||||
secstrings + s->sh_name)) {
|
||||
/* patch .altinstructions */
|
||||
void *aseg = (void *)s->sh_addr;
|
||||
|
||||
apply_alternatives(aseg, aseg + s->sh_size);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
jump_label_apply_nops(me);
|
||||
return 0;
|
||||
}
|
||||
|
@ -12,6 +12,9 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/log2.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/sched/signal.h>
|
||||
@ -37,13 +40,94 @@ struct mcck_struct {
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct mcck_struct, cpu_mcck);
|
||||
static struct kmem_cache *mcesa_cache;
|
||||
static unsigned long mcesa_origin_lc;
|
||||
|
||||
static void s390_handle_damage(void)
|
||||
static inline int nmi_needs_mcesa(void)
|
||||
{
|
||||
smp_send_stop();
|
||||
return MACHINE_HAS_VX || MACHINE_HAS_GS;
|
||||
}
|
||||
|
||||
static inline unsigned long nmi_get_mcesa_size(void)
|
||||
{
|
||||
if (MACHINE_HAS_GS)
|
||||
return MCESA_MAX_SIZE;
|
||||
return MCESA_MIN_SIZE;
|
||||
}
|
||||
|
||||
/*
|
||||
* The initial machine check extended save area for the boot CPU.
|
||||
* It will be replaced by nmi_init() with an allocated structure.
|
||||
* The structure is required for machine check happening early in
|
||||
* the boot process.
|
||||
*/
|
||||
static struct mcesa boot_mcesa __initdata __aligned(MCESA_MAX_SIZE);
|
||||
|
||||
void __init nmi_alloc_boot_cpu(struct lowcore *lc)
|
||||
{
|
||||
if (!nmi_needs_mcesa())
|
||||
return;
|
||||
lc->mcesad = (unsigned long) &boot_mcesa;
|
||||
if (MACHINE_HAS_GS)
|
||||
lc->mcesad |= ilog2(MCESA_MAX_SIZE);
|
||||
}
|
||||
|
||||
static int __init nmi_init(void)
|
||||
{
|
||||
unsigned long origin, cr0, size;
|
||||
|
||||
if (!nmi_needs_mcesa())
|
||||
return 0;
|
||||
size = nmi_get_mcesa_size();
|
||||
if (size > MCESA_MIN_SIZE)
|
||||
mcesa_origin_lc = ilog2(size);
|
||||
/* create slab cache for the machine-check-extended-save-areas */
|
||||
mcesa_cache = kmem_cache_create("nmi_save_areas", size, size, 0, NULL);
|
||||
if (!mcesa_cache)
|
||||
panic("Couldn't create nmi save area cache");
|
||||
origin = (unsigned long) kmem_cache_alloc(mcesa_cache, GFP_KERNEL);
|
||||
if (!origin)
|
||||
panic("Couldn't allocate nmi save area");
|
||||
/* The pointer is stored with mcesa_bits ORed in */
|
||||
kmemleak_not_leak((void *) origin);
|
||||
__ctl_store(cr0, 0, 0);
|
||||
__ctl_clear_bit(0, 28); /* disable lowcore protection */
|
||||
/* Replace boot_mcesa on the boot CPU */
|
||||
S390_lowcore.mcesad = origin | mcesa_origin_lc;
|
||||
__ctl_load(cr0, 0, 0);
|
||||
return 0;
|
||||
}
|
||||
early_initcall(nmi_init);
|
||||
|
||||
int nmi_alloc_per_cpu(struct lowcore *lc)
|
||||
{
|
||||
unsigned long origin;
|
||||
|
||||
if (!nmi_needs_mcesa())
|
||||
return 0;
|
||||
origin = (unsigned long) kmem_cache_alloc(mcesa_cache, GFP_KERNEL);
|
||||
if (!origin)
|
||||
return -ENOMEM;
|
||||
/* The pointer is stored with mcesa_bits ORed in */
|
||||
kmemleak_not_leak((void *) origin);
|
||||
lc->mcesad = origin | mcesa_origin_lc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void nmi_free_per_cpu(struct lowcore *lc)
|
||||
{
|
||||
if (!nmi_needs_mcesa())
|
||||
return;
|
||||
kmem_cache_free(mcesa_cache, (void *)(lc->mcesad & MCESA_ORIGIN_MASK));
|
||||
}
|
||||
|
||||
static notrace void s390_handle_damage(void)
|
||||
{
|
||||
smp_emergency_stop();
|
||||
disabled_wait((unsigned long) __builtin_return_address(0));
|
||||
while (1);
|
||||
}
|
||||
NOKPROBE_SYMBOL(s390_handle_damage);
|
||||
|
||||
/*
|
||||
* Main machine check handler function. Will be called with interrupts enabled
|
||||
@ -100,18 +184,16 @@ void s390_handle_mcck(void)
|
||||
EXPORT_SYMBOL_GPL(s390_handle_mcck);
|
||||
|
||||
/*
|
||||
* returns 0 if all registers could be validated
|
||||
* returns 0 if all required registers are available
|
||||
* returns 1 otherwise
|
||||
*/
|
||||
static int notrace s390_validate_registers(union mci mci, int umode)
|
||||
static int notrace s390_check_registers(union mci mci, int umode)
|
||||
{
|
||||
union ctlreg2 cr2;
|
||||
int kill_task;
|
||||
u64 zero;
|
||||
void *fpt_save_area;
|
||||
struct mcesa *mcesa;
|
||||
|
||||
kill_task = 0;
|
||||
zero = 0;
|
||||
|
||||
if (!mci.gr) {
|
||||
/*
|
||||
@ -122,18 +204,13 @@ static int notrace s390_validate_registers(union mci mci, int umode)
|
||||
s390_handle_damage();
|
||||
kill_task = 1;
|
||||
}
|
||||
/* Validate control registers */
|
||||
/* Check control registers */
|
||||
if (!mci.cr) {
|
||||
/*
|
||||
* Control registers have unknown contents.
|
||||
* Can't recover and therefore stopping machine.
|
||||
*/
|
||||
s390_handle_damage();
|
||||
} else {
|
||||
asm volatile(
|
||||
" lctlg 0,15,0(%0)\n"
|
||||
" ptlb\n"
|
||||
: : "a" (&S390_lowcore.cregs_save_area) : "memory");
|
||||
}
|
||||
if (!mci.fp) {
|
||||
/*
|
||||
@ -141,7 +218,6 @@ static int notrace s390_validate_registers(union mci mci, int umode)
|
||||
* kernel currently uses floating point registers the
|
||||
* system is stopped. If the process has its floating
|
||||
* pointer registers loaded it is terminated.
|
||||
* Otherwise just revalidate the registers.
|
||||
*/
|
||||
if (S390_lowcore.fpu_flags & KERNEL_VXR_V0V7)
|
||||
s390_handle_damage();
|
||||
@ -155,72 +231,29 @@ static int notrace s390_validate_registers(union mci mci, int umode)
|
||||
* If the kernel currently uses the floating pointer
|
||||
* registers and needs the FPC register the system is
|
||||
* stopped. If the process has its floating pointer
|
||||
* registers loaded it is terminated. Otherwiese the
|
||||
* FPC is just revalidated.
|
||||
* registers loaded it is terminated.
|
||||
*/
|
||||
if (S390_lowcore.fpu_flags & KERNEL_FPC)
|
||||
s390_handle_damage();
|
||||
asm volatile("lfpc %0" : : "Q" (zero));
|
||||
if (!test_cpu_flag(CIF_FPU))
|
||||
kill_task = 1;
|
||||
} else {
|
||||
asm volatile("lfpc %0"
|
||||
: : "Q" (S390_lowcore.fpt_creg_save_area));
|
||||
}
|
||||
|
||||
mcesa = (struct mcesa *)(S390_lowcore.mcesad & MCESA_ORIGIN_MASK);
|
||||
if (!MACHINE_HAS_VX) {
|
||||
/* Validate floating point registers */
|
||||
asm volatile(
|
||||
" ld 0,0(%0)\n"
|
||||
" ld 1,8(%0)\n"
|
||||
" ld 2,16(%0)\n"
|
||||
" ld 3,24(%0)\n"
|
||||
" ld 4,32(%0)\n"
|
||||
" ld 5,40(%0)\n"
|
||||
" ld 6,48(%0)\n"
|
||||
" ld 7,56(%0)\n"
|
||||
" ld 8,64(%0)\n"
|
||||
" ld 9,72(%0)\n"
|
||||
" ld 10,80(%0)\n"
|
||||
" ld 11,88(%0)\n"
|
||||
" ld 12,96(%0)\n"
|
||||
" ld 13,104(%0)\n"
|
||||
" ld 14,112(%0)\n"
|
||||
" ld 15,120(%0)\n"
|
||||
: : "a" (fpt_save_area) : "memory");
|
||||
} else {
|
||||
/* Validate vector registers */
|
||||
union ctlreg0 cr0;
|
||||
|
||||
if (MACHINE_HAS_VX) {
|
||||
if (!mci.vr) {
|
||||
/*
|
||||
* Vector registers can't be restored. If the kernel
|
||||
* currently uses vector registers the system is
|
||||
* stopped. If the process has its vector registers
|
||||
* loaded it is terminated. Otherwise just revalidate
|
||||
* the registers.
|
||||
* loaded it is terminated.
|
||||
*/
|
||||
if (S390_lowcore.fpu_flags & KERNEL_VXR)
|
||||
s390_handle_damage();
|
||||
if (!test_cpu_flag(CIF_FPU))
|
||||
kill_task = 1;
|
||||
}
|
||||
cr0.val = S390_lowcore.cregs_save_area[0];
|
||||
cr0.afp = cr0.vx = 1;
|
||||
__ctl_load(cr0.val, 0, 0);
|
||||
asm volatile(
|
||||
" la 1,%0\n"
|
||||
" .word 0xe70f,0x1000,0x0036\n" /* vlm 0,15,0(1) */
|
||||
" .word 0xe70f,0x1100,0x0c36\n" /* vlm 16,31,256(1) */
|
||||
: : "Q" (*(struct vx_array *) mcesa->vector_save_area)
|
||||
: "1");
|
||||
__ctl_load(S390_lowcore.cregs_save_area[0], 0, 0);
|
||||
}
|
||||
/* Validate access registers */
|
||||
asm volatile(
|
||||
" lam 0,15,0(%0)"
|
||||
: : "a" (&S390_lowcore.access_regs_save_area));
|
||||
/* Check if access registers are valid */
|
||||
if (!mci.ar) {
|
||||
/*
|
||||
* Access registers have unknown contents.
|
||||
@ -228,53 +261,41 @@ static int notrace s390_validate_registers(union mci mci, int umode)
|
||||
*/
|
||||
kill_task = 1;
|
||||
}
|
||||
/* Validate guarded storage registers */
|
||||
if (MACHINE_HAS_GS && (S390_lowcore.cregs_save_area[2] & (1UL << 4))) {
|
||||
if (!mci.gs)
|
||||
/* Check guarded storage registers */
|
||||
cr2.val = S390_lowcore.cregs_save_area[2];
|
||||
if (cr2.gse) {
|
||||
if (!mci.gs) {
|
||||
/*
|
||||
* Guarded storage register can't be restored and
|
||||
* the current processes uses guarded storage.
|
||||
* It has to be terminated.
|
||||
*/
|
||||
kill_task = 1;
|
||||
else
|
||||
load_gs_cb((struct gs_cb *)
|
||||
mcesa->guarded_storage_save_area);
|
||||
}
|
||||
}
|
||||
/*
|
||||
* We don't even try to validate the TOD register, since we simply
|
||||
* can't write something sensible into that register.
|
||||
*/
|
||||
/*
|
||||
* See if we can validate the TOD programmable register with its
|
||||
* old contents (should be zero) otherwise set it to zero.
|
||||
*/
|
||||
if (!mci.pr)
|
||||
asm volatile(
|
||||
" sr 0,0\n"
|
||||
" sckpf"
|
||||
: : : "0", "cc");
|
||||
else
|
||||
asm volatile(
|
||||
" l 0,%0\n"
|
||||
" sckpf"
|
||||
: : "Q" (S390_lowcore.tod_progreg_save_area)
|
||||
: "0", "cc");
|
||||
/* Validate clock comparator register */
|
||||
set_clock_comparator(S390_lowcore.clock_comparator);
|
||||
/* Check if old PSW is valid */
|
||||
if (!mci.wp)
|
||||
if (!mci.wp) {
|
||||
/*
|
||||
* Can't tell if we come from user or kernel mode
|
||||
* -> stopping machine.
|
||||
*/
|
||||
s390_handle_damage();
|
||||
}
|
||||
/* Check for invalid kernel instruction address */
|
||||
if (!mci.ia && !umode) {
|
||||
/*
|
||||
* The instruction address got lost while running
|
||||
* in the kernel -> stopping machine.
|
||||
*/
|
||||
s390_handle_damage();
|
||||
}
|
||||
|
||||
if (!mci.ms || !mci.pm || !mci.ia)
|
||||
kill_task = 1;
|
||||
|
||||
return kill_task;
|
||||
}
|
||||
NOKPROBE_SYMBOL(s390_check_registers);
|
||||
|
||||
/*
|
||||
* Backup the guest's machine check info to its description block
|
||||
@ -300,6 +321,7 @@ static void notrace s390_backup_mcck_info(struct pt_regs *regs)
|
||||
mcck_backup->failing_storage_address
|
||||
= S390_lowcore.failing_storage_address;
|
||||
}
|
||||
NOKPROBE_SYMBOL(s390_backup_mcck_info);
|
||||
|
||||
#define MAX_IPD_COUNT 29
|
||||
#define MAX_IPD_TIME (5 * 60 * USEC_PER_SEC) /* 5 minutes */
|
||||
@ -372,7 +394,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
|
||||
s390_handle_damage();
|
||||
}
|
||||
}
|
||||
if (s390_validate_registers(mci, user_mode(regs))) {
|
||||
if (s390_check_registers(mci, user_mode(regs))) {
|
||||
/*
|
||||
* Couldn't restore all register contents for the
|
||||
* user space process -> mark task for termination.
|
||||
@ -443,6 +465,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
|
||||
clear_cpu_flag(CIF_MCCK_GUEST);
|
||||
nmi_exit();
|
||||
}
|
||||
NOKPROBE_SYMBOL(s390_do_machine_check);
|
||||
|
||||
static int __init machine_check_init(void)
|
||||
{
|
||||
|
@ -10,34 +10,42 @@
|
||||
|
||||
/* BEGIN: CPUM_CF COUNTER DEFINITIONS =================================== */
|
||||
|
||||
CPUMF_EVENT_ATTR(cf, CPU_CYCLES, 0x0000);
|
||||
CPUMF_EVENT_ATTR(cf, INSTRUCTIONS, 0x0001);
|
||||
CPUMF_EVENT_ATTR(cf, L1I_DIR_WRITES, 0x0002);
|
||||
CPUMF_EVENT_ATTR(cf, L1I_PENALTY_CYCLES, 0x0003);
|
||||
CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_CPU_CYCLES, 0x0020);
|
||||
CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_INSTRUCTIONS, 0x0021);
|
||||
CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_L1I_DIR_WRITES, 0x0022);
|
||||
CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_L1I_PENALTY_CYCLES, 0x0023);
|
||||
CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_L1D_DIR_WRITES, 0x0024);
|
||||
CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_L1D_PENALTY_CYCLES, 0x0025);
|
||||
CPUMF_EVENT_ATTR(cf, L1D_DIR_WRITES, 0x0004);
|
||||
CPUMF_EVENT_ATTR(cf, L1D_PENALTY_CYCLES, 0x0005);
|
||||
CPUMF_EVENT_ATTR(cf, PRNG_FUNCTIONS, 0x0040);
|
||||
CPUMF_EVENT_ATTR(cf, PRNG_CYCLES, 0x0041);
|
||||
CPUMF_EVENT_ATTR(cf, PRNG_BLOCKED_FUNCTIONS, 0x0042);
|
||||
CPUMF_EVENT_ATTR(cf, PRNG_BLOCKED_CYCLES, 0x0043);
|
||||
CPUMF_EVENT_ATTR(cf, SHA_FUNCTIONS, 0x0044);
|
||||
CPUMF_EVENT_ATTR(cf, SHA_CYCLES, 0x0045);
|
||||
CPUMF_EVENT_ATTR(cf, SHA_BLOCKED_FUNCTIONS, 0x0046);
|
||||
CPUMF_EVENT_ATTR(cf, SHA_BLOCKED_CYCLES, 0x0047);
|
||||
CPUMF_EVENT_ATTR(cf, DEA_FUNCTIONS, 0x0048);
|
||||
CPUMF_EVENT_ATTR(cf, DEA_CYCLES, 0x0049);
|
||||
CPUMF_EVENT_ATTR(cf, DEA_BLOCKED_FUNCTIONS, 0x004a);
|
||||
CPUMF_EVENT_ATTR(cf, DEA_BLOCKED_CYCLES, 0x004b);
|
||||
CPUMF_EVENT_ATTR(cf, AES_FUNCTIONS, 0x004c);
|
||||
CPUMF_EVENT_ATTR(cf, AES_CYCLES, 0x004d);
|
||||
CPUMF_EVENT_ATTR(cf, AES_BLOCKED_FUNCTIONS, 0x004e);
|
||||
CPUMF_EVENT_ATTR(cf, AES_BLOCKED_CYCLES, 0x004f);
|
||||
CPUMF_EVENT_ATTR(cf_fvn1, CPU_CYCLES, 0x0000);
|
||||
CPUMF_EVENT_ATTR(cf_fvn1, INSTRUCTIONS, 0x0001);
|
||||
CPUMF_EVENT_ATTR(cf_fvn1, L1I_DIR_WRITES, 0x0002);
|
||||
CPUMF_EVENT_ATTR(cf_fvn1, L1I_PENALTY_CYCLES, 0x0003);
|
||||
CPUMF_EVENT_ATTR(cf_fvn1, PROBLEM_STATE_CPU_CYCLES, 0x0020);
|
||||
CPUMF_EVENT_ATTR(cf_fvn1, PROBLEM_STATE_INSTRUCTIONS, 0x0021);
|
||||
CPUMF_EVENT_ATTR(cf_fvn1, PROBLEM_STATE_L1I_DIR_WRITES, 0x0022);
|
||||
CPUMF_EVENT_ATTR(cf_fvn1, PROBLEM_STATE_L1I_PENALTY_CYCLES, 0x0023);
|
||||
CPUMF_EVENT_ATTR(cf_fvn1, PROBLEM_STATE_L1D_DIR_WRITES, 0x0024);
|
||||
CPUMF_EVENT_ATTR(cf_fvn1, PROBLEM_STATE_L1D_PENALTY_CYCLES, 0x0025);
|
||||
CPUMF_EVENT_ATTR(cf_fvn1, L1D_DIR_WRITES, 0x0004);
|
||||
CPUMF_EVENT_ATTR(cf_fvn1, L1D_PENALTY_CYCLES, 0x0005);
|
||||
CPUMF_EVENT_ATTR(cf_fvn3, CPU_CYCLES, 0x0000);
|
||||
CPUMF_EVENT_ATTR(cf_fvn3, INSTRUCTIONS, 0x0001);
|
||||
CPUMF_EVENT_ATTR(cf_fvn3, L1I_DIR_WRITES, 0x0002);
|
||||
CPUMF_EVENT_ATTR(cf_fvn3, L1I_PENALTY_CYCLES, 0x0003);
|
||||
CPUMF_EVENT_ATTR(cf_fvn3, PROBLEM_STATE_CPU_CYCLES, 0x0020);
|
||||
CPUMF_EVENT_ATTR(cf_fvn3, PROBLEM_STATE_INSTRUCTIONS, 0x0021);
|
||||
CPUMF_EVENT_ATTR(cf_fvn3, L1D_DIR_WRITES, 0x0004);
|
||||
CPUMF_EVENT_ATTR(cf_fvn3, L1D_PENALTY_CYCLES, 0x0005);
|
||||
CPUMF_EVENT_ATTR(cf_svn_generic, PRNG_FUNCTIONS, 0x0040);
|
||||
CPUMF_EVENT_ATTR(cf_svn_generic, PRNG_CYCLES, 0x0041);
|
||||
CPUMF_EVENT_ATTR(cf_svn_generic, PRNG_BLOCKED_FUNCTIONS, 0x0042);
|
||||
CPUMF_EVENT_ATTR(cf_svn_generic, PRNG_BLOCKED_CYCLES, 0x0043);
|
||||
CPUMF_EVENT_ATTR(cf_svn_generic, SHA_FUNCTIONS, 0x0044);
|
||||
CPUMF_EVENT_ATTR(cf_svn_generic, SHA_CYCLES, 0x0045);
|
||||
CPUMF_EVENT_ATTR(cf_svn_generic, SHA_BLOCKED_FUNCTIONS, 0x0046);
|
||||
CPUMF_EVENT_ATTR(cf_svn_generic, SHA_BLOCKED_CYCLES, 0x0047);
|
||||
CPUMF_EVENT_ATTR(cf_svn_generic, DEA_FUNCTIONS, 0x0048);
|
||||
CPUMF_EVENT_ATTR(cf_svn_generic, DEA_CYCLES, 0x0049);
|
||||
CPUMF_EVENT_ATTR(cf_svn_generic, DEA_BLOCKED_FUNCTIONS, 0x004a);
|
||||
CPUMF_EVENT_ATTR(cf_svn_generic, DEA_BLOCKED_CYCLES, 0x004b);
|
||||
CPUMF_EVENT_ATTR(cf_svn_generic, AES_FUNCTIONS, 0x004c);
|
||||
CPUMF_EVENT_ATTR(cf_svn_generic, AES_CYCLES, 0x004d);
|
||||
CPUMF_EVENT_ATTR(cf_svn_generic, AES_BLOCKED_FUNCTIONS, 0x004e);
|
||||
CPUMF_EVENT_ATTR(cf_svn_generic, AES_BLOCKED_CYCLES, 0x004f);
|
||||
CPUMF_EVENT_ATTR(cf_z10, L1I_L2_SOURCED_WRITES, 0x0080);
|
||||
CPUMF_EVENT_ATTR(cf_z10, L1D_L2_SOURCED_WRITES, 0x0081);
|
||||
CPUMF_EVENT_ATTR(cf_z10, L1I_L3_LOCAL_WRITES, 0x0082);
|
||||
@ -171,36 +179,105 @@ CPUMF_EVENT_ATTR(cf_z13, TX_C_TABORT_NO_SPECIAL, 0x00db);
|
||||
CPUMF_EVENT_ATTR(cf_z13, TX_C_TABORT_SPECIAL, 0x00dc);
|
||||
CPUMF_EVENT_ATTR(cf_z13, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0);
|
||||
CPUMF_EVENT_ATTR(cf_z13, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1);
|
||||
CPUMF_EVENT_ATTR(cf_z14, L1D_WRITES_RO_EXCL, 0x0080);
|
||||
CPUMF_EVENT_ATTR(cf_z14, DTLB2_WRITES, 0x0081);
|
||||
CPUMF_EVENT_ATTR(cf_z14, DTLB2_MISSES, 0x0082);
|
||||
CPUMF_EVENT_ATTR(cf_z14, DTLB2_HPAGE_WRITES, 0x0083);
|
||||
CPUMF_EVENT_ATTR(cf_z14, DTLB2_GPAGE_WRITES, 0x0084);
|
||||
CPUMF_EVENT_ATTR(cf_z14, L1D_L2D_SOURCED_WRITES, 0x0085);
|
||||
CPUMF_EVENT_ATTR(cf_z14, ITLB2_WRITES, 0x0086);
|
||||
CPUMF_EVENT_ATTR(cf_z14, ITLB2_MISSES, 0x0087);
|
||||
CPUMF_EVENT_ATTR(cf_z14, L1I_L2I_SOURCED_WRITES, 0x0088);
|
||||
CPUMF_EVENT_ATTR(cf_z14, TLB2_PTE_WRITES, 0x0089);
|
||||
CPUMF_EVENT_ATTR(cf_z14, TLB2_CRSTE_WRITES, 0x008a);
|
||||
CPUMF_EVENT_ATTR(cf_z14, TLB2_ENGINES_BUSY, 0x008b);
|
||||
CPUMF_EVENT_ATTR(cf_z14, TX_C_TEND, 0x008c);
|
||||
CPUMF_EVENT_ATTR(cf_z14, TX_NC_TEND, 0x008d);
|
||||
CPUMF_EVENT_ATTR(cf_z14, L1C_TLB2_MISSES, 0x008f);
|
||||
CPUMF_EVENT_ATTR(cf_z14, L1D_ONCHIP_L3_SOURCED_WRITES, 0x0090);
|
||||
CPUMF_EVENT_ATTR(cf_z14, L1D_ONCHIP_MEMORY_SOURCED_WRITES, 0x0091);
|
||||
CPUMF_EVENT_ATTR(cf_z14, L1D_ONCHIP_L3_SOURCED_WRITES_IV, 0x0092);
|
||||
CPUMF_EVENT_ATTR(cf_z14, L1D_ONCLUSTER_L3_SOURCED_WRITES, 0x0093);
|
||||
CPUMF_EVENT_ATTR(cf_z14, L1D_ONCLUSTER_MEMORY_SOURCED_WRITES, 0x0094);
|
||||
CPUMF_EVENT_ATTR(cf_z14, L1D_ONCLUSTER_L3_SOURCED_WRITES_IV, 0x0095);
|
||||
CPUMF_EVENT_ATTR(cf_z14, L1D_OFFCLUSTER_L3_SOURCED_WRITES, 0x0096);
|
||||
CPUMF_EVENT_ATTR(cf_z14, L1D_OFFCLUSTER_MEMORY_SOURCED_WRITES, 0x0097);
|
||||
CPUMF_EVENT_ATTR(cf_z14, L1D_OFFCLUSTER_L3_SOURCED_WRITES_IV, 0x0098);
|
||||
CPUMF_EVENT_ATTR(cf_z14, L1D_OFFDRAWER_L3_SOURCED_WRITES, 0x0099);
|
||||
CPUMF_EVENT_ATTR(cf_z14, L1D_OFFDRAWER_MEMORY_SOURCED_WRITES, 0x009a);
|
||||
CPUMF_EVENT_ATTR(cf_z14, L1D_OFFDRAWER_L3_SOURCED_WRITES_IV, 0x009b);
|
||||
CPUMF_EVENT_ATTR(cf_z14, L1D_ONDRAWER_L4_SOURCED_WRITES, 0x009c);
|
||||
CPUMF_EVENT_ATTR(cf_z14, L1D_OFFDRAWER_L4_SOURCED_WRITES, 0x009d);
|
||||
CPUMF_EVENT_ATTR(cf_z14, L1D_ONCHIP_L3_SOURCED_WRITES_RO, 0x009e);
|
||||
CPUMF_EVENT_ATTR(cf_z14, L1I_ONCHIP_L3_SOURCED_WRITES, 0x00a2);
|
||||
CPUMF_EVENT_ATTR(cf_z14, L1I_ONCHIP_MEMORY_SOURCED_WRITES, 0x00a3);
|
||||
CPUMF_EVENT_ATTR(cf_z14, L1I_ONCHIP_L3_SOURCED_WRITES_IV, 0x00a4);
|
||||
CPUMF_EVENT_ATTR(cf_z14, L1I_ONCLUSTER_L3_SOURCED_WRITES, 0x00a5);
|
||||
CPUMF_EVENT_ATTR(cf_z14, L1I_ONCLUSTER_MEMORY_SOURCED_WRITES, 0x00a6);
|
||||
CPUMF_EVENT_ATTR(cf_z14, L1I_ONCLUSTER_L3_SOURCED_WRITES_IV, 0x00a7);
|
||||
CPUMF_EVENT_ATTR(cf_z14, L1I_OFFCLUSTER_L3_SOURCED_WRITES, 0x00a8);
|
||||
CPUMF_EVENT_ATTR(cf_z14, L1I_OFFCLUSTER_MEMORY_SOURCED_WRITES, 0x00a9);
|
||||
CPUMF_EVENT_ATTR(cf_z14, L1I_OFFCLUSTER_L3_SOURCED_WRITES_IV, 0x00aa);
|
||||
CPUMF_EVENT_ATTR(cf_z14, L1I_OFFDRAWER_L3_SOURCED_WRITES, 0x00ab);
|
||||
CPUMF_EVENT_ATTR(cf_z14, L1I_OFFDRAWER_MEMORY_SOURCED_WRITES, 0x00ac);
|
||||
CPUMF_EVENT_ATTR(cf_z14, L1I_OFFDRAWER_L3_SOURCED_WRITES_IV, 0x00ad);
|
||||
CPUMF_EVENT_ATTR(cf_z14, L1I_ONDRAWER_L4_SOURCED_WRITES, 0x00ae);
|
||||
CPUMF_EVENT_ATTR(cf_z14, L1I_OFFDRAWER_L4_SOURCED_WRITES, 0x00af);
|
||||
CPUMF_EVENT_ATTR(cf_z14, BCD_DFP_EXECUTION_SLOTS, 0x00e0);
|
||||
CPUMF_EVENT_ATTR(cf_z14, VX_BCD_EXECUTION_SLOTS, 0x00e1);
|
||||
CPUMF_EVENT_ATTR(cf_z14, DECIMAL_INSTRUCTIONS, 0x00e2);
|
||||
CPUMF_EVENT_ATTR(cf_z14, LAST_HOST_TRANSLATIONS, 0x00e9);
|
||||
CPUMF_EVENT_ATTR(cf_z14, TX_NC_TABORT, 0x00f3);
|
||||
CPUMF_EVENT_ATTR(cf_z14, TX_C_TABORT_NO_SPECIAL, 0x00f4);
|
||||
CPUMF_EVENT_ATTR(cf_z14, TX_C_TABORT_SPECIAL, 0x00f5);
|
||||
CPUMF_EVENT_ATTR(cf_z14, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0);
|
||||
CPUMF_EVENT_ATTR(cf_z14, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1);
|
||||
|
||||
static struct attribute *cpumcf_pmu_event_attr[] __initdata = {
|
||||
CPUMF_EVENT_PTR(cf, CPU_CYCLES),
|
||||
CPUMF_EVENT_PTR(cf, INSTRUCTIONS),
|
||||
CPUMF_EVENT_PTR(cf, L1I_DIR_WRITES),
|
||||
CPUMF_EVENT_PTR(cf, L1I_PENALTY_CYCLES),
|
||||
CPUMF_EVENT_PTR(cf, PROBLEM_STATE_CPU_CYCLES),
|
||||
CPUMF_EVENT_PTR(cf, PROBLEM_STATE_INSTRUCTIONS),
|
||||
CPUMF_EVENT_PTR(cf, PROBLEM_STATE_L1I_DIR_WRITES),
|
||||
CPUMF_EVENT_PTR(cf, PROBLEM_STATE_L1I_PENALTY_CYCLES),
|
||||
CPUMF_EVENT_PTR(cf, PROBLEM_STATE_L1D_DIR_WRITES),
|
||||
CPUMF_EVENT_PTR(cf, PROBLEM_STATE_L1D_PENALTY_CYCLES),
|
||||
CPUMF_EVENT_PTR(cf, L1D_DIR_WRITES),
|
||||
CPUMF_EVENT_PTR(cf, L1D_PENALTY_CYCLES),
|
||||
CPUMF_EVENT_PTR(cf, PRNG_FUNCTIONS),
|
||||
CPUMF_EVENT_PTR(cf, PRNG_CYCLES),
|
||||
CPUMF_EVENT_PTR(cf, PRNG_BLOCKED_FUNCTIONS),
|
||||
CPUMF_EVENT_PTR(cf, PRNG_BLOCKED_CYCLES),
|
||||
CPUMF_EVENT_PTR(cf, SHA_FUNCTIONS),
|
||||
CPUMF_EVENT_PTR(cf, SHA_CYCLES),
|
||||
CPUMF_EVENT_PTR(cf, SHA_BLOCKED_FUNCTIONS),
|
||||
CPUMF_EVENT_PTR(cf, SHA_BLOCKED_CYCLES),
|
||||
CPUMF_EVENT_PTR(cf, DEA_FUNCTIONS),
|
||||
CPUMF_EVENT_PTR(cf, DEA_CYCLES),
|
||||
CPUMF_EVENT_PTR(cf, DEA_BLOCKED_FUNCTIONS),
|
||||
CPUMF_EVENT_PTR(cf, DEA_BLOCKED_CYCLES),
|
||||
CPUMF_EVENT_PTR(cf, AES_FUNCTIONS),
|
||||
CPUMF_EVENT_PTR(cf, AES_CYCLES),
|
||||
CPUMF_EVENT_PTR(cf, AES_BLOCKED_FUNCTIONS),
|
||||
CPUMF_EVENT_PTR(cf, AES_BLOCKED_CYCLES),
|
||||
static struct attribute *cpumcf_fvn1_pmu_event_attr[] __initdata = {
|
||||
CPUMF_EVENT_PTR(cf_fvn1, CPU_CYCLES),
|
||||
CPUMF_EVENT_PTR(cf_fvn1, INSTRUCTIONS),
|
||||
CPUMF_EVENT_PTR(cf_fvn1, L1I_DIR_WRITES),
|
||||
CPUMF_EVENT_PTR(cf_fvn1, L1I_PENALTY_CYCLES),
|
||||
CPUMF_EVENT_PTR(cf_fvn1, PROBLEM_STATE_CPU_CYCLES),
|
||||
CPUMF_EVENT_PTR(cf_fvn1, PROBLEM_STATE_INSTRUCTIONS),
|
||||
CPUMF_EVENT_PTR(cf_fvn1, PROBLEM_STATE_L1I_DIR_WRITES),
|
||||
CPUMF_EVENT_PTR(cf_fvn1, PROBLEM_STATE_L1I_PENALTY_CYCLES),
|
||||
CPUMF_EVENT_PTR(cf_fvn1, PROBLEM_STATE_L1D_DIR_WRITES),
|
||||
CPUMF_EVENT_PTR(cf_fvn1, PROBLEM_STATE_L1D_PENALTY_CYCLES),
|
||||
CPUMF_EVENT_PTR(cf_fvn1, L1D_DIR_WRITES),
|
||||
CPUMF_EVENT_PTR(cf_fvn1, L1D_PENALTY_CYCLES),
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute *cpumcf_fvn3_pmu_event_attr[] __initdata = {
|
||||
CPUMF_EVENT_PTR(cf_fvn3, CPU_CYCLES),
|
||||
CPUMF_EVENT_PTR(cf_fvn3, INSTRUCTIONS),
|
||||
CPUMF_EVENT_PTR(cf_fvn3, L1I_DIR_WRITES),
|
||||
CPUMF_EVENT_PTR(cf_fvn3, L1I_PENALTY_CYCLES),
|
||||
CPUMF_EVENT_PTR(cf_fvn3, PROBLEM_STATE_CPU_CYCLES),
|
||||
CPUMF_EVENT_PTR(cf_fvn3, PROBLEM_STATE_INSTRUCTIONS),
|
||||
CPUMF_EVENT_PTR(cf_fvn3, L1D_DIR_WRITES),
|
||||
CPUMF_EVENT_PTR(cf_fvn3, L1D_PENALTY_CYCLES),
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute *cpumcf_svn_generic_pmu_event_attr[] __initdata = {
|
||||
CPUMF_EVENT_PTR(cf_svn_generic, PRNG_FUNCTIONS),
|
||||
CPUMF_EVENT_PTR(cf_svn_generic, PRNG_CYCLES),
|
||||
CPUMF_EVENT_PTR(cf_svn_generic, PRNG_BLOCKED_FUNCTIONS),
|
||||
CPUMF_EVENT_PTR(cf_svn_generic, PRNG_BLOCKED_CYCLES),
|
||||
CPUMF_EVENT_PTR(cf_svn_generic, SHA_FUNCTIONS),
|
||||
CPUMF_EVENT_PTR(cf_svn_generic, SHA_CYCLES),
|
||||
CPUMF_EVENT_PTR(cf_svn_generic, SHA_BLOCKED_FUNCTIONS),
|
||||
CPUMF_EVENT_PTR(cf_svn_generic, SHA_BLOCKED_CYCLES),
|
||||
CPUMF_EVENT_PTR(cf_svn_generic, DEA_FUNCTIONS),
|
||||
CPUMF_EVENT_PTR(cf_svn_generic, DEA_CYCLES),
|
||||
CPUMF_EVENT_PTR(cf_svn_generic, DEA_BLOCKED_FUNCTIONS),
|
||||
CPUMF_EVENT_PTR(cf_svn_generic, DEA_BLOCKED_CYCLES),
|
||||
CPUMF_EVENT_PTR(cf_svn_generic, AES_FUNCTIONS),
|
||||
CPUMF_EVENT_PTR(cf_svn_generic, AES_CYCLES),
|
||||
CPUMF_EVENT_PTR(cf_svn_generic, AES_BLOCKED_FUNCTIONS),
|
||||
CPUMF_EVENT_PTR(cf_svn_generic, AES_BLOCKED_CYCLES),
|
||||
NULL,
|
||||
};
|
||||
|
||||
@ -353,6 +430,63 @@ static struct attribute *cpumcf_z13_pmu_event_attr[] __initdata = {
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute *cpumcf_z14_pmu_event_attr[] __initdata = {
|
||||
CPUMF_EVENT_PTR(cf_z14, L1D_WRITES_RO_EXCL),
|
||||
CPUMF_EVENT_PTR(cf_z14, DTLB2_WRITES),
|
||||
CPUMF_EVENT_PTR(cf_z14, DTLB2_MISSES),
|
||||
CPUMF_EVENT_PTR(cf_z14, DTLB2_HPAGE_WRITES),
|
||||
CPUMF_EVENT_PTR(cf_z14, DTLB2_GPAGE_WRITES),
|
||||
CPUMF_EVENT_PTR(cf_z14, L1D_L2D_SOURCED_WRITES),
|
||||
CPUMF_EVENT_PTR(cf_z14, ITLB2_WRITES),
|
||||
CPUMF_EVENT_PTR(cf_z14, ITLB2_MISSES),
|
||||
CPUMF_EVENT_PTR(cf_z14, L1I_L2I_SOURCED_WRITES),
|
||||
CPUMF_EVENT_PTR(cf_z14, TLB2_PTE_WRITES),
|
||||
CPUMF_EVENT_PTR(cf_z14, TLB2_CRSTE_WRITES),
|
||||
CPUMF_EVENT_PTR(cf_z14, TLB2_ENGINES_BUSY),
|
||||
CPUMF_EVENT_PTR(cf_z14, TX_C_TEND),
|
||||
CPUMF_EVENT_PTR(cf_z14, TX_NC_TEND),
|
||||
CPUMF_EVENT_PTR(cf_z14, L1C_TLB2_MISSES),
|
||||
CPUMF_EVENT_PTR(cf_z14, L1D_ONCHIP_L3_SOURCED_WRITES),
|
||||
CPUMF_EVENT_PTR(cf_z14, L1D_ONCHIP_MEMORY_SOURCED_WRITES),
|
||||
CPUMF_EVENT_PTR(cf_z14, L1D_ONCHIP_L3_SOURCED_WRITES_IV),
|
||||
CPUMF_EVENT_PTR(cf_z14, L1D_ONCLUSTER_L3_SOURCED_WRITES),
|
||||
CPUMF_EVENT_PTR(cf_z14, L1D_ONCLUSTER_MEMORY_SOURCED_WRITES),
|
||||
CPUMF_EVENT_PTR(cf_z14, L1D_ONCLUSTER_L3_SOURCED_WRITES_IV),
|
||||
CPUMF_EVENT_PTR(cf_z14, L1D_OFFCLUSTER_L3_SOURCED_WRITES),
|
||||
CPUMF_EVENT_PTR(cf_z14, L1D_OFFCLUSTER_MEMORY_SOURCED_WRITES),
|
||||
CPUMF_EVENT_PTR(cf_z14, L1D_OFFCLUSTER_L3_SOURCED_WRITES_IV),
|
||||
CPUMF_EVENT_PTR(cf_z14, L1D_OFFDRAWER_L3_SOURCED_WRITES),
|
||||
CPUMF_EVENT_PTR(cf_z14, L1D_OFFDRAWER_MEMORY_SOURCED_WRITES),
|
||||
CPUMF_EVENT_PTR(cf_z14, L1D_OFFDRAWER_L3_SOURCED_WRITES_IV),
|
||||
CPUMF_EVENT_PTR(cf_z14, L1D_ONDRAWER_L4_SOURCED_WRITES),
|
||||
CPUMF_EVENT_PTR(cf_z14, L1D_OFFDRAWER_L4_SOURCED_WRITES),
|
||||
CPUMF_EVENT_PTR(cf_z14, L1D_ONCHIP_L3_SOURCED_WRITES_RO),
|
||||
CPUMF_EVENT_PTR(cf_z14, L1I_ONCHIP_L3_SOURCED_WRITES),
|
||||
CPUMF_EVENT_PTR(cf_z14, L1I_ONCHIP_MEMORY_SOURCED_WRITES),
|
||||
CPUMF_EVENT_PTR(cf_z14, L1I_ONCHIP_L3_SOURCED_WRITES_IV),
|
||||
CPUMF_EVENT_PTR(cf_z14, L1I_ONCLUSTER_L3_SOURCED_WRITES),
|
||||
CPUMF_EVENT_PTR(cf_z14, L1I_ONCLUSTER_MEMORY_SOURCED_WRITES),
|
||||
CPUMF_EVENT_PTR(cf_z14, L1I_ONCLUSTER_L3_SOURCED_WRITES_IV),
|
||||
CPUMF_EVENT_PTR(cf_z14, L1I_OFFCLUSTER_L3_SOURCED_WRITES),
|
||||
CPUMF_EVENT_PTR(cf_z14, L1I_OFFCLUSTER_MEMORY_SOURCED_WRITES),
|
||||
CPUMF_EVENT_PTR(cf_z14, L1I_OFFCLUSTER_L3_SOURCED_WRITES_IV),
|
||||
CPUMF_EVENT_PTR(cf_z14, L1I_OFFDRAWER_L3_SOURCED_WRITES),
|
||||
CPUMF_EVENT_PTR(cf_z14, L1I_OFFDRAWER_MEMORY_SOURCED_WRITES),
|
||||
CPUMF_EVENT_PTR(cf_z14, L1I_OFFDRAWER_L3_SOURCED_WRITES_IV),
|
||||
CPUMF_EVENT_PTR(cf_z14, L1I_ONDRAWER_L4_SOURCED_WRITES),
|
||||
CPUMF_EVENT_PTR(cf_z14, L1I_OFFDRAWER_L4_SOURCED_WRITES),
|
||||
CPUMF_EVENT_PTR(cf_z14, BCD_DFP_EXECUTION_SLOTS),
|
||||
CPUMF_EVENT_PTR(cf_z14, VX_BCD_EXECUTION_SLOTS),
|
||||
CPUMF_EVENT_PTR(cf_z14, DECIMAL_INSTRUCTIONS),
|
||||
CPUMF_EVENT_PTR(cf_z14, LAST_HOST_TRANSLATIONS),
|
||||
CPUMF_EVENT_PTR(cf_z14, TX_NC_TABORT),
|
||||
CPUMF_EVENT_PTR(cf_z14, TX_C_TABORT_NO_SPECIAL),
|
||||
CPUMF_EVENT_PTR(cf_z14, TX_C_TABORT_SPECIAL),
|
||||
CPUMF_EVENT_PTR(cf_z14, MT_DIAG_CYCLES_ONE_THR_ACTIVE),
|
||||
CPUMF_EVENT_PTR(cf_z14, MT_DIAG_CYCLES_TWO_THR_ACTIVE),
|
||||
NULL,
|
||||
};
|
||||
|
||||
/* END: CPUM_CF COUNTER DEFINITIONS ===================================== */
|
||||
|
||||
static struct attribute_group cpumcf_pmu_events_group = {
|
||||
@ -379,7 +513,8 @@ static const struct attribute_group *cpumcf_pmu_attr_groups[] = {
|
||||
|
||||
|
||||
static __init struct attribute **merge_attr(struct attribute **a,
|
||||
struct attribute **b)
|
||||
struct attribute **b,
|
||||
struct attribute **c)
|
||||
{
|
||||
struct attribute **new;
|
||||
int j, i;
|
||||
@ -388,6 +523,8 @@ static __init struct attribute **merge_attr(struct attribute **a,
|
||||
;
|
||||
for (i = 0; b[i]; i++)
|
||||
j++;
|
||||
for (i = 0; c[i]; i++)
|
||||
j++;
|
||||
j++;
|
||||
|
||||
new = kmalloc(sizeof(struct attribute *) * j, GFP_KERNEL);
|
||||
@ -398,6 +535,8 @@ static __init struct attribute **merge_attr(struct attribute **a,
|
||||
new[j++] = a[i];
|
||||
for (i = 0; b[i]; i++)
|
||||
new[j++] = b[i];
|
||||
for (i = 0; c[i]; i++)
|
||||
new[j++] = c[i];
|
||||
new[j] = NULL;
|
||||
|
||||
return new;
|
||||
@ -405,10 +544,26 @@ static __init struct attribute **merge_attr(struct attribute **a,
|
||||
|
||||
__init const struct attribute_group **cpumf_cf_event_group(void)
|
||||
{
|
||||
struct attribute **combined, **model;
|
||||
struct attribute **combined, **model, **cfvn, **csvn;
|
||||
struct attribute *none[] = { NULL };
|
||||
struct cpumf_ctr_info ci;
|
||||
struct cpuid cpu_id;
|
||||
|
||||
/* Determine generic counters set(s) */
|
||||
qctri(&ci);
|
||||
switch (ci.cfvn) {
|
||||
case 1:
|
||||
cfvn = cpumcf_fvn1_pmu_event_attr;
|
||||
break;
|
||||
case 3:
|
||||
cfvn = cpumcf_fvn3_pmu_event_attr;
|
||||
break;
|
||||
default:
|
||||
cfvn = none;
|
||||
}
|
||||
csvn = cpumcf_svn_generic_pmu_event_attr;
|
||||
|
||||
/* Determine model-specific counter set(s) */
|
||||
get_cpu_id(&cpu_id);
|
||||
switch (cpu_id.machine) {
|
||||
case 0x2097:
|
||||
@ -427,12 +582,15 @@ __init const struct attribute_group **cpumf_cf_event_group(void)
|
||||
case 0x2965:
|
||||
model = cpumcf_z13_pmu_event_attr;
|
||||
break;
|
||||
case 0x3906:
|
||||
model = cpumcf_z14_pmu_event_attr;
|
||||
break;
|
||||
default:
|
||||
model = none;
|
||||
break;
|
||||
}
|
||||
|
||||
combined = merge_attr(cpumcf_pmu_event_attr, model);
|
||||
combined = merge_attr(cfvn, csvn, model);
|
||||
if (combined)
|
||||
cpumcf_pmu_events_group.attrs = combined;
|
||||
return cpumcf_pmu_attr_groups;
|
||||
|
@ -823,12 +823,8 @@ static int cpumsf_pmu_event_init(struct perf_event *event)
|
||||
}
|
||||
|
||||
/* Check online status of the CPU to which the event is pinned */
|
||||
if (event->cpu >= 0) {
|
||||
if ((unsigned int)event->cpu >= nr_cpumask_bits)
|
||||
if (event->cpu >= 0 && !cpu_online(event->cpu))
|
||||
return -ENODEV;
|
||||
if (!cpu_online(event->cpu))
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* Force reset of idle/hv excludes regardless of what the
|
||||
* user requested.
|
||||
|
@ -44,27 +44,14 @@ asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
|
||||
|
||||
extern void kernel_thread_starter(void);
|
||||
|
||||
/*
|
||||
* Free current thread data structures etc..
|
||||
*/
|
||||
void exit_thread(struct task_struct *tsk)
|
||||
{
|
||||
if (tsk == current) {
|
||||
exit_thread_runtime_instr();
|
||||
exit_thread_gs();
|
||||
}
|
||||
}
|
||||
|
||||
void flush_thread(void)
|
||||
{
|
||||
}
|
||||
|
||||
void release_thread(struct task_struct *dead_task)
|
||||
{
|
||||
}
|
||||
|
||||
void arch_release_task_struct(struct task_struct *tsk)
|
||||
{
|
||||
runtime_instr_release(tsk);
|
||||
guarded_storage_release(tsk);
|
||||
}
|
||||
|
||||
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
|
||||
@ -100,6 +87,7 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long new_stackp,
|
||||
memset(&p->thread.per_user, 0, sizeof(p->thread.per_user));
|
||||
memset(&p->thread.per_event, 0, sizeof(p->thread.per_event));
|
||||
clear_tsk_thread_flag(p, TIF_SINGLE_STEP);
|
||||
p->thread.per_flags = 0;
|
||||
/* Initialize per thread user and system timer values */
|
||||
p->thread.user_timer = 0;
|
||||
p->thread.guest_timer = 0;
|
||||
|
@ -31,6 +31,9 @@
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/unistd.h>
|
||||
#include <asm/switch_to.h>
|
||||
#include <asm/runtime_instr.h>
|
||||
#include <asm/facility.h>
|
||||
|
||||
#include "entry.h"
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
@ -45,42 +48,42 @@ void update_cr_regs(struct task_struct *task)
|
||||
struct pt_regs *regs = task_pt_regs(task);
|
||||
struct thread_struct *thread = &task->thread;
|
||||
struct per_regs old, new;
|
||||
unsigned long cr0_old, cr0_new;
|
||||
unsigned long cr2_old, cr2_new;
|
||||
union ctlreg0 cr0_old, cr0_new;
|
||||
union ctlreg2 cr2_old, cr2_new;
|
||||
int cr0_changed, cr2_changed;
|
||||
|
||||
__ctl_store(cr0_old, 0, 0);
|
||||
__ctl_store(cr2_old, 2, 2);
|
||||
__ctl_store(cr0_old.val, 0, 0);
|
||||
__ctl_store(cr2_old.val, 2, 2);
|
||||
cr0_new = cr0_old;
|
||||
cr2_new = cr2_old;
|
||||
/* Take care of the enable/disable of transactional execution. */
|
||||
if (MACHINE_HAS_TE) {
|
||||
/* Set or clear transaction execution TXC bit 8. */
|
||||
cr0_new |= (1UL << 55);
|
||||
cr0_new.tcx = 1;
|
||||
if (task->thread.per_flags & PER_FLAG_NO_TE)
|
||||
cr0_new &= ~(1UL << 55);
|
||||
cr0_new.tcx = 0;
|
||||
/* Set or clear transaction execution TDC bits 62 and 63. */
|
||||
cr2_new &= ~3UL;
|
||||
cr2_new.tdc = 0;
|
||||
if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) {
|
||||
if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND)
|
||||
cr2_new |= 1UL;
|
||||
cr2_new.tdc = 1;
|
||||
else
|
||||
cr2_new |= 2UL;
|
||||
cr2_new.tdc = 2;
|
||||
}
|
||||
}
|
||||
/* Take care of enable/disable of guarded storage. */
|
||||
if (MACHINE_HAS_GS) {
|
||||
cr2_new &= ~(1UL << 4);
|
||||
cr2_new.gse = 0;
|
||||
if (task->thread.gs_cb)
|
||||
cr2_new |= (1UL << 4);
|
||||
cr2_new.gse = 1;
|
||||
}
|
||||
/* Load control register 0/2 iff changed */
|
||||
cr0_changed = cr0_new != cr0_old;
|
||||
cr2_changed = cr2_new != cr2_old;
|
||||
cr0_changed = cr0_new.val != cr0_old.val;
|
||||
cr2_changed = cr2_new.val != cr2_old.val;
|
||||
if (cr0_changed)
|
||||
__ctl_load(cr0_new, 0, 0);
|
||||
__ctl_load(cr0_new.val, 0, 0);
|
||||
if (cr2_changed)
|
||||
__ctl_load(cr2_new, 2, 2);
|
||||
__ctl_load(cr2_new.val, 2, 2);
|
||||
/* Copy user specified PER registers */
|
||||
new.control = thread->per_user.control;
|
||||
new.start = thread->per_user.start;
|
||||
@ -1172,26 +1175,37 @@ static int s390_gs_cb_set(struct task_struct *target,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
{
|
||||
struct gs_cb *data = target->thread.gs_cb;
|
||||
struct gs_cb gs_cb = { }, *data = NULL;
|
||||
int rc;
|
||||
|
||||
if (!MACHINE_HAS_GS)
|
||||
return -ENODEV;
|
||||
if (!data) {
|
||||
if (!target->thread.gs_cb) {
|
||||
data = kzalloc(sizeof(*data), GFP_KERNEL);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
data->gsd = 25;
|
||||
target->thread.gs_cb = data;
|
||||
if (target == current)
|
||||
__ctl_set_bit(2, 4);
|
||||
} else if (target == current) {
|
||||
save_gs_cb(data);
|
||||
}
|
||||
if (!target->thread.gs_cb)
|
||||
gs_cb.gsd = 25;
|
||||
else if (target == current)
|
||||
save_gs_cb(&gs_cb);
|
||||
else
|
||||
gs_cb = *target->thread.gs_cb;
|
||||
rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
data, 0, sizeof(struct gs_cb));
|
||||
if (target == current)
|
||||
restore_gs_cb(data);
|
||||
&gs_cb, 0, sizeof(gs_cb));
|
||||
if (rc) {
|
||||
kfree(data);
|
||||
return -EFAULT;
|
||||
}
|
||||
preempt_disable();
|
||||
if (!target->thread.gs_cb)
|
||||
target->thread.gs_cb = data;
|
||||
*target->thread.gs_cb = gs_cb;
|
||||
if (target == current) {
|
||||
__ctl_set_bit(2, 4);
|
||||
restore_gs_cb(target->thread.gs_cb);
|
||||
}
|
||||
preempt_enable();
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -1229,6 +1243,96 @@ static int s390_gs_bc_set(struct task_struct *target,
|
||||
data, 0, sizeof(struct gs_cb));
|
||||
}
|
||||
|
||||
static bool is_ri_cb_valid(struct runtime_instr_cb *cb)
|
||||
{
|
||||
return (cb->rca & 0x1f) == 0 &&
|
||||
(cb->roa & 0xfff) == 0 &&
|
||||
(cb->rla & 0xfff) == 0xfff &&
|
||||
cb->s == 1 &&
|
||||
cb->k == 1 &&
|
||||
cb->h == 0 &&
|
||||
cb->reserved1 == 0 &&
|
||||
cb->ps == 1 &&
|
||||
cb->qs == 0 &&
|
||||
cb->pc == 1 &&
|
||||
cb->qc == 0 &&
|
||||
cb->reserved2 == 0 &&
|
||||
cb->key == PAGE_DEFAULT_KEY &&
|
||||
cb->reserved3 == 0 &&
|
||||
cb->reserved4 == 0 &&
|
||||
cb->reserved5 == 0 &&
|
||||
cb->reserved6 == 0 &&
|
||||
cb->reserved7 == 0 &&
|
||||
cb->reserved8 == 0 &&
|
||||
cb->rla >= cb->roa &&
|
||||
cb->rca >= cb->roa &&
|
||||
cb->rca <= cb->rla+1 &&
|
||||
cb->m < 3;
|
||||
}
|
||||
|
||||
static int s390_runtime_instr_get(struct task_struct *target,
|
||||
const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
void *kbuf, void __user *ubuf)
|
||||
{
|
||||
struct runtime_instr_cb *data = target->thread.ri_cb;
|
||||
|
||||
if (!test_facility(64))
|
||||
return -ENODEV;
|
||||
if (!data)
|
||||
return -ENODATA;
|
||||
|
||||
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
data, 0, sizeof(struct runtime_instr_cb));
|
||||
}
|
||||
|
||||
static int s390_runtime_instr_set(struct task_struct *target,
|
||||
const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
{
|
||||
struct runtime_instr_cb ri_cb = { }, *data = NULL;
|
||||
int rc;
|
||||
|
||||
if (!test_facility(64))
|
||||
return -ENODEV;
|
||||
|
||||
if (!target->thread.ri_cb) {
|
||||
data = kzalloc(sizeof(*data), GFP_KERNEL);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (target->thread.ri_cb) {
|
||||
if (target == current)
|
||||
store_runtime_instr_cb(&ri_cb);
|
||||
else
|
||||
ri_cb = *target->thread.ri_cb;
|
||||
}
|
||||
|
||||
rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&ri_cb, 0, sizeof(struct runtime_instr_cb));
|
||||
if (rc) {
|
||||
kfree(data);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (!is_ri_cb_valid(&ri_cb)) {
|
||||
kfree(data);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
preempt_disable();
|
||||
if (!target->thread.ri_cb)
|
||||
target->thread.ri_cb = data;
|
||||
*target->thread.ri_cb = ri_cb;
|
||||
if (target == current)
|
||||
load_runtime_instr_cb(target->thread.ri_cb);
|
||||
preempt_enable();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct user_regset s390_regsets[] = {
|
||||
{
|
||||
.core_note_type = NT_PRSTATUS,
|
||||
@ -1302,6 +1406,14 @@ static const struct user_regset s390_regsets[] = {
|
||||
.get = s390_gs_bc_get,
|
||||
.set = s390_gs_bc_set,
|
||||
},
|
||||
{
|
||||
.core_note_type = NT_S390_RI_CB,
|
||||
.n = sizeof(struct runtime_instr_cb) / sizeof(__u64),
|
||||
.size = sizeof(__u64),
|
||||
.align = sizeof(__u64),
|
||||
.get = s390_runtime_instr_get,
|
||||
.set = s390_runtime_instr_set,
|
||||
},
|
||||
};
|
||||
|
||||
static const struct user_regset_view user_s390_view = {
|
||||
@ -1538,6 +1650,14 @@ static const struct user_regset s390_compat_regsets[] = {
|
||||
.get = s390_gs_cb_get,
|
||||
.set = s390_gs_cb_set,
|
||||
},
|
||||
{
|
||||
.core_note_type = NT_S390_RI_CB,
|
||||
.n = sizeof(struct runtime_instr_cb) / sizeof(__u64),
|
||||
.size = sizeof(__u64),
|
||||
.align = sizeof(__u64),
|
||||
.get = s390_runtime_instr_get,
|
||||
.set = s390_runtime_instr_set,
|
||||
},
|
||||
};
|
||||
|
||||
static const struct user_regset_view user_s390_compat_view = {
|
||||
|
@ -29,7 +29,6 @@
|
||||
ENTRY(relocate_kernel)
|
||||
basr %r13,0 # base address
|
||||
.base:
|
||||
stnsm sys_msk-.base(%r13),0xfb # disable DAT
|
||||
stctg %c0,%c15,ctlregs-.base(%r13)
|
||||
stmg %r0,%r15,gprregs-.base(%r13)
|
||||
lghi %r0,3
|
||||
@ -103,8 +102,6 @@ ENTRY(relocate_kernel)
|
||||
.align 8
|
||||
load_psw:
|
||||
.long 0x00080000,0x80000000
|
||||
sys_msk:
|
||||
.quad 0
|
||||
ctlregs:
|
||||
.rept 16
|
||||
.quad 0
|
||||
|
@ -21,11 +21,24 @@
|
||||
/* empty control block to disable RI by loading it */
|
||||
struct runtime_instr_cb runtime_instr_empty_cb;
|
||||
|
||||
void runtime_instr_release(struct task_struct *tsk)
|
||||
{
|
||||
kfree(tsk->thread.ri_cb);
|
||||
}
|
||||
|
||||
static void disable_runtime_instr(void)
|
||||
{
|
||||
struct pt_regs *regs = task_pt_regs(current);
|
||||
struct task_struct *task = current;
|
||||
struct pt_regs *regs;
|
||||
|
||||
if (!task->thread.ri_cb)
|
||||
return;
|
||||
regs = task_pt_regs(task);
|
||||
preempt_disable();
|
||||
load_runtime_instr_cb(&runtime_instr_empty_cb);
|
||||
kfree(task->thread.ri_cb);
|
||||
task->thread.ri_cb = NULL;
|
||||
preempt_enable();
|
||||
|
||||
/*
|
||||
* Make sure the RI bit is deleted from the PSW. If the user did not
|
||||
@ -37,24 +50,13 @@ static void disable_runtime_instr(void)
|
||||
|
||||
static void init_runtime_instr_cb(struct runtime_instr_cb *cb)
|
||||
{
|
||||
cb->buf_limit = 0xfff;
|
||||
cb->pstate = 1;
|
||||
cb->pstate_set_buf = 1;
|
||||
cb->pstate_sample = 1;
|
||||
cb->pstate_collect = 1;
|
||||
cb->rla = 0xfff;
|
||||
cb->s = 1;
|
||||
cb->k = 1;
|
||||
cb->ps = 1;
|
||||
cb->pc = 1;
|
||||
cb->key = PAGE_DEFAULT_KEY;
|
||||
cb->valid = 1;
|
||||
}
|
||||
|
||||
void exit_thread_runtime_instr(void)
|
||||
{
|
||||
struct task_struct *task = current;
|
||||
|
||||
if (!task->thread.ri_cb)
|
||||
return;
|
||||
disable_runtime_instr();
|
||||
kfree(task->thread.ri_cb);
|
||||
task->thread.ri_cb = NULL;
|
||||
cb->v = 1;
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE1(s390_runtime_instr, int, command)
|
||||
@ -65,9 +67,7 @@ SYSCALL_DEFINE1(s390_runtime_instr, int, command)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (command == S390_RUNTIME_INSTR_STOP) {
|
||||
preempt_disable();
|
||||
exit_thread_runtime_instr();
|
||||
preempt_enable();
|
||||
disable_runtime_instr();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -55,17 +55,18 @@
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/cpcmd.h>
|
||||
#include <asm/lowcore.h>
|
||||
#include <asm/nmi.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/ebcdic.h>
|
||||
#include <asm/kvm_virtio.h>
|
||||
#include <asm/diag.h>
|
||||
#include <asm/os_info.h>
|
||||
#include <asm/sclp.h>
|
||||
#include <asm/sysinfo.h>
|
||||
#include <asm/numa.h>
|
||||
#include <asm/alternative.h>
|
||||
#include "entry.h"
|
||||
|
||||
/*
|
||||
@ -339,16 +340,8 @@ static void __init setup_lowcore(void)
|
||||
lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
|
||||
memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
|
||||
MAX_FACILITY_BIT/8);
|
||||
if (MACHINE_HAS_VX || MACHINE_HAS_GS) {
|
||||
unsigned long bits, size;
|
||||
|
||||
bits = MACHINE_HAS_GS ? 11 : 10;
|
||||
size = 1UL << bits;
|
||||
lc->mcesad = (__u64) memblock_virt_alloc(size, size);
|
||||
if (MACHINE_HAS_GS)
|
||||
lc->mcesad |= bits;
|
||||
}
|
||||
lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0];
|
||||
nmi_alloc_boot_cpu(lc);
|
||||
vdso_alloc_boot_cpu(lc);
|
||||
lc->sync_enter_timer = S390_lowcore.sync_enter_timer;
|
||||
lc->async_enter_timer = S390_lowcore.async_enter_timer;
|
||||
lc->exit_timer = S390_lowcore.exit_timer;
|
||||
@ -380,6 +373,8 @@ static void __init setup_lowcore(void)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
lc->spinlock_lockval = arch_spin_lockval(0);
|
||||
lc->spinlock_index = 0;
|
||||
arch_spin_lock_setup(0);
|
||||
#endif
|
||||
|
||||
set_prefix((u32)(unsigned long) lc);
|
||||
@ -764,7 +759,7 @@ static int __init setup_hwcaps(void)
|
||||
/*
|
||||
* Transactional execution support HWCAP_S390_TE is bit 10.
|
||||
*/
|
||||
if (test_facility(50) && test_facility(73))
|
||||
if (MACHINE_HAS_TE)
|
||||
elf_hwcap |= HWCAP_S390_TE;
|
||||
|
||||
/*
|
||||
@ -955,6 +950,8 @@ void __init setup_arch(char **cmdline_p)
|
||||
conmode_default();
|
||||
set_preferred_console();
|
||||
|
||||
apply_alternative_instructions();
|
||||
|
||||
/* Setup zfcpdump support */
|
||||
setup_zfcpdump();
|
||||
|
||||
|
@ -37,6 +37,7 @@
|
||||
#include <linux/sched/task_stack.h>
|
||||
#include <linux/crash_dump.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/diag.h>
|
||||
#include <asm/switch_to.h>
|
||||
@ -81,8 +82,6 @@ struct pcpu {
|
||||
static u8 boot_core_type;
|
||||
static struct pcpu pcpu_devices[NR_CPUS];
|
||||
|
||||
static struct kmem_cache *pcpu_mcesa_cache;
|
||||
|
||||
unsigned int smp_cpu_mt_shift;
|
||||
EXPORT_SYMBOL(smp_cpu_mt_shift);
|
||||
|
||||
@ -193,10 +192,8 @@ static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
|
||||
static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
|
||||
{
|
||||
unsigned long async_stack, panic_stack;
|
||||
unsigned long mcesa_origin, mcesa_bits;
|
||||
struct lowcore *lc;
|
||||
|
||||
mcesa_origin = mcesa_bits = 0;
|
||||
if (pcpu != &pcpu_devices[0]) {
|
||||
pcpu->lowcore = (struct lowcore *)
|
||||
__get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
|
||||
@ -204,39 +201,30 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
|
||||
panic_stack = __get_free_page(GFP_KERNEL);
|
||||
if (!pcpu->lowcore || !panic_stack || !async_stack)
|
||||
goto out;
|
||||
if (MACHINE_HAS_VX || MACHINE_HAS_GS) {
|
||||
mcesa_origin = (unsigned long)
|
||||
kmem_cache_alloc(pcpu_mcesa_cache, GFP_KERNEL);
|
||||
if (!mcesa_origin)
|
||||
goto out;
|
||||
/* The pointer is stored with mcesa_bits ORed in */
|
||||
kmemleak_not_leak((void *) mcesa_origin);
|
||||
mcesa_bits = MACHINE_HAS_GS ? 11 : 0;
|
||||
}
|
||||
} else {
|
||||
async_stack = pcpu->lowcore->async_stack - ASYNC_FRAME_OFFSET;
|
||||
panic_stack = pcpu->lowcore->panic_stack - PANIC_FRAME_OFFSET;
|
||||
mcesa_origin = pcpu->lowcore->mcesad & MCESA_ORIGIN_MASK;
|
||||
mcesa_bits = pcpu->lowcore->mcesad & MCESA_LC_MASK;
|
||||
}
|
||||
lc = pcpu->lowcore;
|
||||
memcpy(lc, &S390_lowcore, 512);
|
||||
memset((char *) lc + 512, 0, sizeof(*lc) - 512);
|
||||
lc->async_stack = async_stack + ASYNC_FRAME_OFFSET;
|
||||
lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET;
|
||||
lc->mcesad = mcesa_origin | mcesa_bits;
|
||||
lc->cpu_nr = cpu;
|
||||
lc->spinlock_lockval = arch_spin_lockval(cpu);
|
||||
if (vdso_alloc_per_cpu(lc))
|
||||
lc->spinlock_index = 0;
|
||||
if (nmi_alloc_per_cpu(lc))
|
||||
goto out;
|
||||
if (vdso_alloc_per_cpu(lc))
|
||||
goto out_mcesa;
|
||||
lowcore_ptr[cpu] = lc;
|
||||
pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
|
||||
return 0;
|
||||
|
||||
out_mcesa:
|
||||
nmi_free_per_cpu(lc);
|
||||
out:
|
||||
if (pcpu != &pcpu_devices[0]) {
|
||||
if (mcesa_origin)
|
||||
kmem_cache_free(pcpu_mcesa_cache,
|
||||
(void *) mcesa_origin);
|
||||
free_page(panic_stack);
|
||||
free_pages(async_stack, ASYNC_ORDER);
|
||||
free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
|
||||
@ -248,17 +236,12 @@ out:
|
||||
|
||||
static void pcpu_free_lowcore(struct pcpu *pcpu)
|
||||
{
|
||||
unsigned long mcesa_origin;
|
||||
|
||||
pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
|
||||
lowcore_ptr[pcpu - pcpu_devices] = NULL;
|
||||
vdso_free_per_cpu(pcpu->lowcore);
|
||||
nmi_free_per_cpu(pcpu->lowcore);
|
||||
if (pcpu == &pcpu_devices[0])
|
||||
return;
|
||||
if (MACHINE_HAS_VX || MACHINE_HAS_GS) {
|
||||
mcesa_origin = pcpu->lowcore->mcesad & MCESA_ORIGIN_MASK;
|
||||
kmem_cache_free(pcpu_mcesa_cache, (void *) mcesa_origin);
|
||||
}
|
||||
free_page(pcpu->lowcore->panic_stack-PANIC_FRAME_OFFSET);
|
||||
free_pages(pcpu->lowcore->async_stack-ASYNC_FRAME_OFFSET, ASYNC_ORDER);
|
||||
free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
|
||||
@ -274,6 +257,7 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
|
||||
cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
|
||||
lc->cpu_nr = cpu;
|
||||
lc->spinlock_lockval = arch_spin_lockval(cpu);
|
||||
lc->spinlock_index = 0;
|
||||
lc->percpu_offset = __per_cpu_offset[cpu];
|
||||
lc->kernel_asce = S390_lowcore.kernel_asce;
|
||||
lc->machine_flags = S390_lowcore.machine_flags;
|
||||
@ -282,6 +266,7 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
|
||||
save_access_regs((unsigned int *) lc->access_regs_save_area);
|
||||
memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
|
||||
MAX_FACILITY_BIT/8);
|
||||
arch_spin_lock_setup(cpu);
|
||||
}
|
||||
|
||||
static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
|
||||
@ -423,13 +408,17 @@ void smp_yield_cpu(int cpu)
|
||||
* Send cpus emergency shutdown signal. This gives the cpus the
|
||||
* opportunity to complete outstanding interrupts.
|
||||
*/
|
||||
static void smp_emergency_stop(cpumask_t *cpumask)
|
||||
void notrace smp_emergency_stop(void)
|
||||
{
|
||||
cpumask_t cpumask;
|
||||
u64 end;
|
||||
int cpu;
|
||||
|
||||
cpumask_copy(&cpumask, cpu_online_mask);
|
||||
cpumask_clear_cpu(smp_processor_id(), &cpumask);
|
||||
|
||||
end = get_tod_clock() + (1000000UL << 12);
|
||||
for_each_cpu(cpu, cpumask) {
|
||||
for_each_cpu(cpu, &cpumask) {
|
||||
struct pcpu *pcpu = pcpu_devices + cpu;
|
||||
set_bit(ec_stop_cpu, &pcpu->ec_mask);
|
||||
while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL,
|
||||
@ -438,21 +427,21 @@ static void smp_emergency_stop(cpumask_t *cpumask)
|
||||
cpu_relax();
|
||||
}
|
||||
while (get_tod_clock() < end) {
|
||||
for_each_cpu(cpu, cpumask)
|
||||
for_each_cpu(cpu, &cpumask)
|
||||
if (pcpu_stopped(pcpu_devices + cpu))
|
||||
cpumask_clear_cpu(cpu, cpumask);
|
||||
if (cpumask_empty(cpumask))
|
||||
cpumask_clear_cpu(cpu, &cpumask);
|
||||
if (cpumask_empty(&cpumask))
|
||||
break;
|
||||
cpu_relax();
|
||||
}
|
||||
}
|
||||
NOKPROBE_SYMBOL(smp_emergency_stop);
|
||||
|
||||
/*
|
||||
* Stop all cpus but the current one.
|
||||
*/
|
||||
void smp_send_stop(void)
|
||||
{
|
||||
cpumask_t cpumask;
|
||||
int cpu;
|
||||
|
||||
/* Disable all interrupts/machine checks */
|
||||
@ -460,17 +449,16 @@ void smp_send_stop(void)
|
||||
trace_hardirqs_off();
|
||||
|
||||
debug_set_critical();
|
||||
cpumask_copy(&cpumask, cpu_online_mask);
|
||||
cpumask_clear_cpu(smp_processor_id(), &cpumask);
|
||||
|
||||
if (oops_in_progress)
|
||||
smp_emergency_stop(&cpumask);
|
||||
smp_emergency_stop();
|
||||
|
||||
/* stop all processors */
|
||||
for_each_cpu(cpu, &cpumask) {
|
||||
struct pcpu *pcpu = pcpu_devices + cpu;
|
||||
pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
|
||||
while (!pcpu_stopped(pcpu))
|
||||
for_each_online_cpu(cpu) {
|
||||
if (cpu == smp_processor_id())
|
||||
continue;
|
||||
pcpu_sigp_retry(pcpu_devices + cpu, SIGP_STOP, 0);
|
||||
while (!pcpu_stopped(pcpu_devices + cpu))
|
||||
cpu_relax();
|
||||
}
|
||||
}
|
||||
@ -804,6 +792,8 @@ void __init smp_detect_cpus(void)
|
||||
*/
|
||||
static void smp_start_secondary(void *cpuvoid)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
S390_lowcore.last_update_clock = get_tod_clock();
|
||||
S390_lowcore.restart_stack = (unsigned long) restart_stack;
|
||||
S390_lowcore.restart_fn = (unsigned long) do_restart;
|
||||
@ -817,8 +807,12 @@ static void smp_start_secondary(void *cpuvoid)
|
||||
init_cpu_timer();
|
||||
vtime_init();
|
||||
pfault_init();
|
||||
notify_cpu_starting(smp_processor_id());
|
||||
set_cpu_online(smp_processor_id(), true);
|
||||
notify_cpu_starting(cpu);
|
||||
if (topology_cpu_dedicated(cpu))
|
||||
set_cpu_flag(CIF_DEDICATED_CPU);
|
||||
else
|
||||
clear_cpu_flag(CIF_DEDICATED_CPU);
|
||||
set_cpu_online(cpu, true);
|
||||
inc_irq_stat(CPU_RST);
|
||||
local_irq_enable();
|
||||
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
|
||||
@ -927,22 +921,12 @@ void __init smp_fill_possible_mask(void)
|
||||
|
||||
void __init smp_prepare_cpus(unsigned int max_cpus)
|
||||
{
|
||||
unsigned long size;
|
||||
|
||||
/* request the 0x1201 emergency signal external interrupt */
|
||||
if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt))
|
||||
panic("Couldn't request external interrupt 0x1201");
|
||||
/* request the 0x1202 external call external interrupt */
|
||||
if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
|
||||
panic("Couldn't request external interrupt 0x1202");
|
||||
/* create slab cache for the machine-check-extended-save-areas */
|
||||
if (MACHINE_HAS_VX || MACHINE_HAS_GS) {
|
||||
size = 1UL << (MACHINE_HAS_GS ? 11 : 10);
|
||||
pcpu_mcesa_cache = kmem_cache_create("nmi_save_areas",
|
||||
size, size, 0, NULL);
|
||||
if (!pcpu_mcesa_cache)
|
||||
panic("Couldn't create nmi save area cache");
|
||||
}
|
||||
}
|
||||
|
||||
void __init smp_prepare_boot_cpu(void)
|
||||
@ -965,6 +949,7 @@ void __init smp_setup_processor_id(void)
|
||||
pcpu_devices[0].address = stap();
|
||||
S390_lowcore.cpu_nr = 0;
|
||||
S390_lowcore.spinlock_lockval = arch_spin_lockval(0);
|
||||
S390_lowcore.spinlock_index = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -8,22 +8,19 @@
|
||||
* Copyright IBM Corp. 2016
|
||||
* Author(s): Janosch Frank <frankja@linux.vnet.ibm.com>
|
||||
*/
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/ratelimit.h>
|
||||
|
||||
#include <asm/kvm_host.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/sclp.h>
|
||||
#include <asm/diag.h>
|
||||
#include <asm/sysinfo.h>
|
||||
#include <asm/ebcdic.h>
|
||||
|
||||
#include "kvm-s390.h"
|
||||
#include "gaccess.h"
|
||||
#include "trace.h"
|
||||
#include <asm/facility.h>
|
||||
#include <asm/sthyi.h>
|
||||
#include "entry.h"
|
||||
|
||||
#define DED_WEIGHT 0xffff
|
||||
/*
|
||||
@ -144,6 +141,21 @@ struct lpar_cpu_inf {
|
||||
struct cpu_inf ifl;
|
||||
};
|
||||
|
||||
/*
|
||||
* STHYI requires extensive locking in the higher hypervisors
|
||||
* and is very computational/memory expensive. Therefore we
|
||||
* cache the retrieved data whose valid period is 1s.
|
||||
*/
|
||||
#define CACHE_VALID_JIFFIES HZ
|
||||
|
||||
struct sthyi_info {
|
||||
void *info;
|
||||
unsigned long end;
|
||||
};
|
||||
|
||||
static DEFINE_MUTEX(sthyi_mutex);
|
||||
static struct sthyi_info sthyi_cache;
|
||||
|
||||
static inline u64 cpu_id(u8 ctidx, void *diag224_buf)
|
||||
{
|
||||
return *((u64 *)(diag224_buf + (ctidx + 1) * DIAG204_CPU_NAME_LEN));
|
||||
@ -382,88 +394,124 @@ out:
|
||||
vfree(diag204_buf);
|
||||
}
|
||||
|
||||
static int sthyi(u64 vaddr)
|
||||
static int sthyi(u64 vaddr, u64 *rc)
|
||||
{
|
||||
register u64 code asm("0") = 0;
|
||||
register u64 addr asm("2") = vaddr;
|
||||
register u64 rcode asm("3");
|
||||
int cc;
|
||||
|
||||
asm volatile(
|
||||
".insn rre,0xB2560000,%[code],%[addr]\n"
|
||||
"ipm %[cc]\n"
|
||||
"srl %[cc],28\n"
|
||||
: [cc] "=d" (cc)
|
||||
: [cc] "=d" (cc), "=d" (rcode)
|
||||
: [code] "d" (code), [addr] "a" (addr)
|
||||
: "3", "memory", "cc");
|
||||
: "memory", "cc");
|
||||
*rc = rcode;
|
||||
return cc;
|
||||
}
|
||||
|
||||
int handle_sthyi(struct kvm_vcpu *vcpu)
|
||||
static int fill_dst(void *dst, u64 *rc)
|
||||
{
|
||||
int reg1, reg2, r = 0;
|
||||
u64 code, addr, cc = 0;
|
||||
struct sthyi_sctns *sctns = NULL;
|
||||
|
||||
if (!test_kvm_facility(vcpu->kvm, 74))
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
|
||||
struct sthyi_sctns *sctns = (struct sthyi_sctns *)dst;
|
||||
|
||||
/*
|
||||
* STHYI requires extensive locking in the higher hypervisors
|
||||
* and is very computational/memory expensive. Therefore we
|
||||
* ratelimit the executions per VM.
|
||||
* If the facility is on, we don't want to emulate the instruction.
|
||||
* We ask the hypervisor to provide the data.
|
||||
*/
|
||||
if (!__ratelimit(&vcpu->kvm->arch.sthyi_limit)) {
|
||||
kvm_s390_retry_instr(vcpu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
kvm_s390_get_regs_rre(vcpu, ®1, ®2);
|
||||
code = vcpu->run->s.regs.gprs[reg1];
|
||||
addr = vcpu->run->s.regs.gprs[reg2];
|
||||
|
||||
vcpu->stat.instruction_sthyi++;
|
||||
VCPU_EVENT(vcpu, 3, "STHYI: fc: %llu addr: 0x%016llx", code, addr);
|
||||
trace_kvm_s390_handle_sthyi(vcpu, code, addr);
|
||||
|
||||
if (reg1 == reg2 || reg1 & 1 || reg2 & 1)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
||||
|
||||
if (code & 0xffff) {
|
||||
cc = 3;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (addr & ~PAGE_MASK)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
||||
|
||||
sctns = (void *)get_zeroed_page(GFP_KERNEL);
|
||||
if (!sctns)
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
* If we are a guest, we don't want to emulate an emulated
|
||||
* instruction. We ask the hypervisor to provide the data.
|
||||
*/
|
||||
if (test_facility(74)) {
|
||||
cc = sthyi((u64)sctns);
|
||||
goto out;
|
||||
}
|
||||
if (test_facility(74))
|
||||
return sthyi((u64)dst, rc);
|
||||
|
||||
fill_hdr(sctns);
|
||||
fill_stsi(sctns);
|
||||
fill_diag(sctns);
|
||||
*rc = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
out:
|
||||
if (!cc) {
|
||||
r = write_guest(vcpu, addr, reg2, sctns, PAGE_SIZE);
|
||||
if (r) {
|
||||
free_page((unsigned long)sctns);
|
||||
return kvm_s390_inject_prog_cond(vcpu, r);
|
||||
}
|
||||
}
|
||||
static int sthyi_init_cache(void)
|
||||
{
|
||||
if (sthyi_cache.info)
|
||||
return 0;
|
||||
sthyi_cache.info = (void *)get_zeroed_page(GFP_KERNEL);
|
||||
if (!sthyi_cache.info)
|
||||
return -ENOMEM;
|
||||
sthyi_cache.end = jiffies - 1; /* expired */
|
||||
return 0;
|
||||
}
|
||||
|
||||
free_page((unsigned long)sctns);
|
||||
vcpu->run->s.regs.gprs[reg2 + 1] = cc ? 4 : 0;
|
||||
kvm_s390_set_psw_cc(vcpu, cc);
|
||||
static int sthyi_update_cache(u64 *rc)
|
||||
{
|
||||
int r;
|
||||
|
||||
memset(sthyi_cache.info, 0, PAGE_SIZE);
|
||||
r = fill_dst(sthyi_cache.info, rc);
|
||||
if (r)
|
||||
return r;
|
||||
sthyi_cache.end = jiffies + CACHE_VALID_JIFFIES;
|
||||
return r;
|
||||
}
|
||||
|
||||
/*
|
||||
* sthyi_fill - Fill page with data returned by the STHYI instruction
|
||||
*
|
||||
* @dst: Pointer to zeroed page
|
||||
* @rc: Pointer for storing the return code of the instruction
|
||||
*
|
||||
* Fills the destination with system information returned by the STHYI
|
||||
* instruction. The data is generated by emulation or execution of STHYI,
|
||||
* if available. The return value is the condition code that would be
|
||||
* returned, the rc parameter is the return code which is passed in
|
||||
* register R2 + 1.
|
||||
*/
|
||||
int sthyi_fill(void *dst, u64 *rc)
|
||||
{
|
||||
int r;
|
||||
|
||||
mutex_lock(&sthyi_mutex);
|
||||
r = sthyi_init_cache();
|
||||
if (r)
|
||||
goto out;
|
||||
|
||||
if (time_is_before_jiffies(sthyi_cache.end)) {
|
||||
/* cache expired */
|
||||
r = sthyi_update_cache(rc);
|
||||
if (r)
|
||||
goto out;
|
||||
}
|
||||
*rc = 0;
|
||||
memcpy(dst, sthyi_cache.info, PAGE_SIZE);
|
||||
out:
|
||||
mutex_unlock(&sthyi_mutex);
|
||||
return r;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sthyi_fill);
|
||||
|
||||
SYSCALL_DEFINE4(s390_sthyi, unsigned long, function_code, void __user *, buffer,
|
||||
u64 __user *, return_code, unsigned long, flags)
|
||||
{
|
||||
u64 sthyi_rc;
|
||||
void *info;
|
||||
int r;
|
||||
|
||||
if (flags)
|
||||
return -EINVAL;
|
||||
if (function_code != STHYI_FC_CP_IFL_CAP)
|
||||
return -EOPNOTSUPP;
|
||||
info = (void *)get_zeroed_page(GFP_KERNEL);
|
||||
if (!info)
|
||||
return -ENOMEM;
|
||||
r = sthyi_fill(info, &sthyi_rc);
|
||||
if (r < 0)
|
||||
goto out;
|
||||
if (return_code && put_user(sthyi_rc, return_code)) {
|
||||
r = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
if (copy_to_user(buffer, info, PAGE_SIZE))
|
||||
r = -EFAULT;
|
||||
out:
|
||||
free_page((unsigned long)info);
|
||||
return r;
|
||||
}
|
@ -153,7 +153,7 @@ int pfn_is_nosave(unsigned long pfn)
|
||||
{
|
||||
unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin));
|
||||
unsigned long nosave_end_pfn = PFN_DOWN(__pa(&__nosave_end));
|
||||
unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1;
|
||||
unsigned long end_rodata_pfn = PFN_DOWN(__pa(&__end_rodata)) - 1;
|
||||
unsigned long stext_pfn = PFN_DOWN(__pa(&_stext));
|
||||
|
||||
/* Always save lowcore pages (LC protection might be enabled). */
|
||||
@ -161,9 +161,9 @@ int pfn_is_nosave(unsigned long pfn)
|
||||
return 0;
|
||||
if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn)
|
||||
return 1;
|
||||
/* Skip memory holes and read-only pages (NSS, DCSS, ...). */
|
||||
if (pfn >= stext_pfn && pfn <= eshared_pfn)
|
||||
return ipl_info.type == IPL_TYPE_NSS ? 1 : 0;
|
||||
/* Skip memory holes and read-only pages (DCSS, ...). */
|
||||
if (pfn >= stext_pfn && pfn <= end_rodata_pfn)
|
||||
return 0;
|
||||
if (tprot(PFN_PHYS(pfn)))
|
||||
return 1;
|
||||
return 0;
|
||||
|
@ -389,3 +389,4 @@ SYSCALL(sys_preadv2,compat_sys_preadv2)
|
||||
SYSCALL(sys_pwritev2,compat_sys_pwritev2)
|
||||
SYSCALL(sys_s390_guarded_storage,compat_sys_s390_guarded_storage) /* 378 */
|
||||
SYSCALL(sys_statx,compat_sys_statx)
|
||||
SYSCALL(sys_s390_sthyi,compat_sys_s390_sthyi)
|
||||
|
@ -133,6 +133,7 @@ static void add_cpus_to_mask(struct topology_core *tl_core,
|
||||
topo->socket_id = socket->id;
|
||||
topo->core_id = rcore;
|
||||
topo->thread_id = lcpu + i;
|
||||
topo->dedicated = tl_core->d;
|
||||
cpumask_set_cpu(lcpu + i, &drawer->mask);
|
||||
cpumask_set_cpu(lcpu + i, &book->mask);
|
||||
cpumask_set_cpu(lcpu + i, &socket->mask);
|
||||
@ -273,6 +274,14 @@ void store_topology(struct sysinfo_15_1_x *info)
|
||||
stsi(info, 15, 1, topology_mnest_limit());
|
||||
}
|
||||
|
||||
static void __arch_update_dedicated_flag(void *arg)
|
||||
{
|
||||
if (topology_cpu_dedicated(smp_processor_id()))
|
||||
set_cpu_flag(CIF_DEDICATED_CPU);
|
||||
else
|
||||
clear_cpu_flag(CIF_DEDICATED_CPU);
|
||||
}
|
||||
|
||||
static int __arch_update_cpu_topology(void)
|
||||
{
|
||||
struct sysinfo_15_1_x *info = tl_info;
|
||||
@ -298,6 +307,7 @@ int arch_update_cpu_topology(void)
|
||||
int cpu, rc;
|
||||
|
||||
rc = __arch_update_cpu_topology();
|
||||
on_each_cpu(__arch_update_dedicated_flag, NULL, 0);
|
||||
for_each_online_cpu(cpu) {
|
||||
dev = get_cpu_device(cpu);
|
||||
kobject_uevent(&dev->kobj, KOBJ_CHANGE);
|
||||
@ -435,9 +445,39 @@ static struct attribute_group topology_cpu_attr_group = {
|
||||
.attrs = topology_cpu_attrs,
|
||||
};
|
||||
|
||||
static ssize_t cpu_dedicated_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
int cpu = dev->id;
|
||||
ssize_t count;
|
||||
|
||||
mutex_lock(&smp_cpu_state_mutex);
|
||||
count = sprintf(buf, "%d\n", topology_cpu_dedicated(cpu));
|
||||
mutex_unlock(&smp_cpu_state_mutex);
|
||||
return count;
|
||||
}
|
||||
static DEVICE_ATTR(dedicated, 0444, cpu_dedicated_show, NULL);
|
||||
|
||||
static struct attribute *topology_extra_cpu_attrs[] = {
|
||||
&dev_attr_dedicated.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group topology_extra_cpu_attr_group = {
|
||||
.attrs = topology_extra_cpu_attrs,
|
||||
};
|
||||
|
||||
int topology_cpu_init(struct cpu *cpu)
|
||||
{
|
||||
return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
|
||||
int rc;
|
||||
|
||||
rc = sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
|
||||
if (rc || !MACHINE_HAS_TOPOLOGY)
|
||||
return rc;
|
||||
rc = sysfs_create_group(&cpu->dev.kobj, &topology_extra_cpu_attr_group);
|
||||
if (rc)
|
||||
sysfs_remove_group(&cpu->dev.kobj, &topology_cpu_attr_group);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static const struct cpumask *cpu_thread_mask(int cpu)
|
||||
@ -509,6 +549,7 @@ void __init topology_init_early(void)
|
||||
alloc_masks(info, &drawer_info, 3);
|
||||
out:
|
||||
__arch_update_cpu_topology();
|
||||
__arch_update_dedicated_flag(NULL);
|
||||
}
|
||||
|
||||
static inline int topology_get_mode(int enabled)
|
||||
|
@ -140,6 +140,20 @@ static void __init vdso_init_data(struct vdso_data *vd)
|
||||
*/
|
||||
#define SEGMENT_ORDER 2
|
||||
|
||||
/*
|
||||
* The initial vdso_data structure for the boot CPU. Eventually
|
||||
* it is replaced with a properly allocated structure in vdso_init.
|
||||
* This is necessary because a valid S390_lowcore.vdso_per_cpu_data
|
||||
* pointer is required to be able to return from an interrupt or
|
||||
* program check. See the exit paths in entry.S.
|
||||
*/
|
||||
struct vdso_data boot_vdso_data __initdata;
|
||||
|
||||
void __init vdso_alloc_boot_cpu(struct lowcore *lowcore)
|
||||
{
|
||||
lowcore->vdso_per_cpu_data = (unsigned long) &boot_vdso_data;
|
||||
}
|
||||
|
||||
int vdso_alloc_per_cpu(struct lowcore *lowcore)
|
||||
{
|
||||
unsigned long segment_table, page_table, page_frame;
|
||||
@ -166,10 +180,8 @@ int vdso_alloc_per_cpu(struct lowcore *lowcore)
|
||||
vd->node_id = cpu_to_node(vd->cpu_nr);
|
||||
|
||||
/* Set up access register mode page table */
|
||||
clear_table((unsigned long *) segment_table, _SEGMENT_ENTRY_EMPTY,
|
||||
PAGE_SIZE << SEGMENT_ORDER);
|
||||
clear_table((unsigned long *) page_table, _PAGE_INVALID,
|
||||
256*sizeof(unsigned long));
|
||||
memset64((u64 *)segment_table, _SEGMENT_ENTRY_EMPTY, _CRST_ENTRIES);
|
||||
memset64((u64 *)page_table, _PAGE_INVALID, PTRS_PER_PTE);
|
||||
|
||||
*(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table;
|
||||
*(unsigned long *) page_table = _PAGE_PROTECT + page_frame;
|
||||
|
@ -60,12 +60,7 @@ SECTIONS
|
||||
|
||||
RO_DATA_SECTION(PAGE_SIZE)
|
||||
|
||||
#ifdef CONFIG_SHARED_KERNEL
|
||||
. = ALIGN(0x100000); /* VM shared segments are 1MB aligned */
|
||||
#endif
|
||||
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
_eshared = .; /* End of shareable data */
|
||||
_sdata = .; /* Start of data section */
|
||||
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
@ -105,6 +100,29 @@ SECTIONS
|
||||
EXIT_DATA
|
||||
}
|
||||
|
||||
/*
|
||||
* struct alt_inst entries. From the header (alternative.h):
|
||||
* "Alternative instructions for different CPU types or capabilities"
|
||||
* Think locking instructions on spinlocks.
|
||||
* Note, that it is a part of __init region.
|
||||
*/
|
||||
. = ALIGN(8);
|
||||
.altinstructions : {
|
||||
__alt_instructions = .;
|
||||
*(.altinstructions)
|
||||
__alt_instructions_end = .;
|
||||
}
|
||||
|
||||
/*
|
||||
* And here are the replacement instructions. The linker sticks
|
||||
* them as binary blobs. The .altinstructions has enough data to
|
||||
* get the address and the length of them to patch the kernel safely.
|
||||
* Note, that it is a part of __init region.
|
||||
*/
|
||||
.altinstr_replacement : {
|
||||
*(.altinstr_replacement)
|
||||
}
|
||||
|
||||
/* early.c uses stsi, which requires page aligned data. */
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
INIT_DATA_SECTION(0x100)
|
||||
|
@ -12,6 +12,6 @@ common-objs = $(KVM)/kvm_main.o $(KVM)/eventfd.o $(KVM)/async_pf.o $(KVM)/irqch
|
||||
ccflags-y := -Ivirt/kvm -Iarch/s390/kvm
|
||||
|
||||
kvm-objs := $(common-objs) kvm-s390.o intercept.o interrupt.o priv.o sigp.o
|
||||
kvm-objs += diag.o gaccess.o guestdbg.o sthyi.o vsie.o
|
||||
kvm-objs += diag.o gaccess.o guestdbg.o vsie.o
|
||||
|
||||
obj-$(CONFIG_KVM) += kvm.o
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <asm/kvm_host.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/sysinfo.h>
|
||||
|
||||
#include "kvm-s390.h"
|
||||
#include "gaccess.h"
|
||||
@ -360,6 +361,61 @@ static int handle_partial_execution(struct kvm_vcpu *vcpu)
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle the sthyi instruction that provides the guest with system
|
||||
* information, like current CPU resources available at each level of
|
||||
* the machine.
|
||||
*/
|
||||
int handle_sthyi(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int reg1, reg2, r = 0;
|
||||
u64 code, addr, cc = 0, rc = 0;
|
||||
struct sthyi_sctns *sctns = NULL;
|
||||
|
||||
if (!test_kvm_facility(vcpu->kvm, 74))
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
|
||||
|
||||
kvm_s390_get_regs_rre(vcpu, ®1, ®2);
|
||||
code = vcpu->run->s.regs.gprs[reg1];
|
||||
addr = vcpu->run->s.regs.gprs[reg2];
|
||||
|
||||
vcpu->stat.instruction_sthyi++;
|
||||
VCPU_EVENT(vcpu, 3, "STHYI: fc: %llu addr: 0x%016llx", code, addr);
|
||||
trace_kvm_s390_handle_sthyi(vcpu, code, addr);
|
||||
|
||||
if (reg1 == reg2 || reg1 & 1 || reg2 & 1)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
||||
|
||||
if (code & 0xffff) {
|
||||
cc = 3;
|
||||
rc = 4;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (addr & ~PAGE_MASK)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
||||
|
||||
sctns = (void *)get_zeroed_page(GFP_KERNEL);
|
||||
if (!sctns)
|
||||
return -ENOMEM;
|
||||
|
||||
cc = sthyi_fill(sctns, &rc);
|
||||
|
||||
out:
|
||||
if (!cc) {
|
||||
r = write_guest(vcpu, addr, reg2, sctns, PAGE_SIZE);
|
||||
if (r) {
|
||||
free_page((unsigned long)sctns);
|
||||
return kvm_s390_inject_prog_cond(vcpu, r);
|
||||
}
|
||||
}
|
||||
|
||||
free_page((unsigned long)sctns);
|
||||
vcpu->run->s.regs.gprs[reg2 + 1] = rc;
|
||||
kvm_s390_set_psw_cc(vcpu, cc);
|
||||
return r;
|
||||
}
|
||||
|
||||
static int handle_operexc(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
psw_t oldpsw, newpsw;
|
||||
|
@ -2483,11 +2483,11 @@ void kvm_s390_reinject_machine_check(struct kvm_vcpu *vcpu,
|
||||
|
||||
mci.val = mcck_info->mcic;
|
||||
if (mci.sr)
|
||||
cr14 |= MCCK_CR14_RECOVERY_SUB_MASK;
|
||||
cr14 |= CR14_RECOVERY_SUBMASK;
|
||||
if (mci.dg)
|
||||
cr14 |= MCCK_CR14_DEGRAD_SUB_MASK;
|
||||
cr14 |= CR14_DEGRADATION_SUBMASK;
|
||||
if (mci.w)
|
||||
cr14 |= MCCK_CR14_WARN_SUB_MASK;
|
||||
cr14 |= CR14_WARNING_SUBMASK;
|
||||
|
||||
mchk = mci.ck ? &inti.mchk : &irq.u.mchk;
|
||||
mchk->cr14 = cr14;
|
||||
|
@ -1884,8 +1884,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||
|
||||
rc = -ENOMEM;
|
||||
|
||||
ratelimit_state_init(&kvm->arch.sthyi_limit, 5 * HZ, 500);
|
||||
|
||||
kvm->arch.use_esca = 0; /* start with basic SCA */
|
||||
if (!sclp.has_64bscao)
|
||||
alloc_flags |= GFP_DMA;
|
||||
@ -3283,7 +3281,7 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
||||
*/
|
||||
if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
|
||||
test_kvm_facility(vcpu->kvm, 64) &&
|
||||
riccb->valid &&
|
||||
riccb->v &&
|
||||
!(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
|
||||
VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
|
||||
vcpu->arch.sie_block->ecb3 |= ECB3_RI;
|
||||
|
@ -242,6 +242,8 @@ static inline void kvm_s390_retry_instr(struct kvm_vcpu *vcpu)
|
||||
kvm_s390_rewind_psw(vcpu, kvm_s390_get_ilen(vcpu));
|
||||
}
|
||||
|
||||
int handle_sthyi(struct kvm_vcpu *vcpu);
|
||||
|
||||
/* implemented in priv.c */
|
||||
int is_valid_psw(psw_t *psw);
|
||||
int kvm_s390_handle_aa(struct kvm_vcpu *vcpu);
|
||||
@ -268,9 +270,6 @@ void kvm_s390_vsie_destroy(struct kvm *kvm);
|
||||
int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
|
||||
int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
|
||||
|
||||
/* implemented in sthyi.c */
|
||||
int handle_sthyi(struct kvm_vcpu *vcpu);
|
||||
|
||||
/* implemented in kvm-s390.c */
|
||||
void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
|
||||
const struct kvm_s390_vm_tod_clock *gtod);
|
||||
|
@ -79,21 +79,25 @@ ENTRY(memset)
|
||||
ex %r4,0(%r3)
|
||||
br %r14
|
||||
.Lmemset_fill:
|
||||
stc %r3,0(%r2)
|
||||
cghi %r4,1
|
||||
lgr %r1,%r2
|
||||
ber %r14
|
||||
je .Lmemset_fill_exit
|
||||
aghi %r4,-2
|
||||
srlg %r3,%r4,8
|
||||
ltgr %r3,%r3
|
||||
srlg %r5,%r4,8
|
||||
ltgr %r5,%r5
|
||||
jz .Lmemset_fill_remainder
|
||||
.Lmemset_fill_loop:
|
||||
mvc 1(256,%r1),0(%r1)
|
||||
stc %r3,0(%r1)
|
||||
mvc 1(255,%r1),0(%r1)
|
||||
la %r1,256(%r1)
|
||||
brctg %r3,.Lmemset_fill_loop
|
||||
brctg %r5,.Lmemset_fill_loop
|
||||
.Lmemset_fill_remainder:
|
||||
larl %r3,.Lmemset_mvc
|
||||
ex %r4,0(%r3)
|
||||
stc %r3,0(%r1)
|
||||
larl %r5,.Lmemset_mvc
|
||||
ex %r4,0(%r5)
|
||||
br %r14
|
||||
.Lmemset_fill_exit:
|
||||
stc %r3,0(%r1)
|
||||
br %r14
|
||||
.Lmemset_xc:
|
||||
xc 0(1,%r1),0(%r1)
|
||||
@ -127,3 +131,47 @@ ENTRY(memcpy)
|
||||
.Lmemcpy_mvc:
|
||||
mvc 0(1,%r1),0(%r3)
|
||||
EXPORT_SYMBOL(memcpy)
|
||||
|
||||
/*
|
||||
* __memset16/32/64
|
||||
*
|
||||
* void *__memset16(uint16_t *s, uint16_t v, size_t count)
|
||||
* void *__memset32(uint32_t *s, uint32_t v, size_t count)
|
||||
* void *__memset64(uint64_t *s, uint64_t v, size_t count)
|
||||
*/
|
||||
.macro __MEMSET bits,bytes,insn
|
||||
ENTRY(__memset\bits)
|
||||
ltgr %r4,%r4
|
||||
bzr %r14
|
||||
cghi %r4,\bytes
|
||||
je .L__memset_exit\bits
|
||||
aghi %r4,-(\bytes+1)
|
||||
srlg %r5,%r4,8
|
||||
ltgr %r5,%r5
|
||||
lgr %r1,%r2
|
||||
jz .L__memset_remainder\bits
|
||||
.L__memset_loop\bits:
|
||||
\insn %r3,0(%r1)
|
||||
mvc \bytes(256-\bytes,%r1),0(%r1)
|
||||
la %r1,256(%r1)
|
||||
brctg %r5,.L__memset_loop\bits
|
||||
.L__memset_remainder\bits:
|
||||
\insn %r3,0(%r1)
|
||||
larl %r5,.L__memset_mvc\bits
|
||||
ex %r4,0(%r5)
|
||||
br %r14
|
||||
.L__memset_exit\bits:
|
||||
\insn %r3,0(%r2)
|
||||
br %r14
|
||||
.L__memset_mvc\bits:
|
||||
mvc \bytes(1,%r1),0(%r1)
|
||||
.endm
|
||||
|
||||
__MEMSET 16,2,sth
|
||||
EXPORT_SYMBOL(__memset16)
|
||||
|
||||
__MEMSET 32,4,st
|
||||
EXPORT_SYMBOL(__memset32)
|
||||
|
||||
__MEMSET 64,8,stg
|
||||
EXPORT_SYMBOL(__memset64)
|
||||
|
@ -9,8 +9,11 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
int spin_retry = -1;
|
||||
@ -33,14 +36,46 @@ static int __init spin_retry_setup(char *str)
|
||||
}
|
||||
__setup("spin_retry=", spin_retry_setup);
|
||||
|
||||
struct spin_wait {
|
||||
struct spin_wait *next, *prev;
|
||||
int node_id;
|
||||
} __aligned(32);
|
||||
|
||||
static DEFINE_PER_CPU_ALIGNED(struct spin_wait, spin_wait[4]);
|
||||
|
||||
#define _Q_LOCK_CPU_OFFSET 0
|
||||
#define _Q_LOCK_STEAL_OFFSET 16
|
||||
#define _Q_TAIL_IDX_OFFSET 18
|
||||
#define _Q_TAIL_CPU_OFFSET 20
|
||||
|
||||
#define _Q_LOCK_CPU_MASK 0x0000ffff
|
||||
#define _Q_LOCK_STEAL_ADD 0x00010000
|
||||
#define _Q_LOCK_STEAL_MASK 0x00030000
|
||||
#define _Q_TAIL_IDX_MASK 0x000c0000
|
||||
#define _Q_TAIL_CPU_MASK 0xfff00000
|
||||
|
||||
#define _Q_LOCK_MASK (_Q_LOCK_CPU_MASK | _Q_LOCK_STEAL_MASK)
|
||||
#define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK)
|
||||
|
||||
void arch_spin_lock_setup(int cpu)
|
||||
{
|
||||
struct spin_wait *node;
|
||||
int ix;
|
||||
|
||||
node = per_cpu_ptr(&spin_wait[0], cpu);
|
||||
for (ix = 0; ix < 4; ix++, node++) {
|
||||
memset(node, 0, sizeof(*node));
|
||||
node->node_id = ((cpu + 1) << _Q_TAIL_CPU_OFFSET) +
|
||||
(ix << _Q_TAIL_IDX_OFFSET);
|
||||
}
|
||||
}
|
||||
|
||||
static inline int arch_load_niai4(int *lock)
|
||||
{
|
||||
int owner;
|
||||
|
||||
asm volatile(
|
||||
#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
|
||||
" .long 0xb2fa0040\n" /* NIAI 4 */
|
||||
#endif
|
||||
ALTERNATIVE("", ".long 0xb2fa0040", 49) /* NIAI 4 */
|
||||
" l %0,%1\n"
|
||||
: "=d" (owner) : "Q" (*lock) : "memory");
|
||||
return owner;
|
||||
@ -51,9 +86,7 @@ static inline int arch_cmpxchg_niai8(int *lock, int old, int new)
|
||||
int expected = old;
|
||||
|
||||
asm volatile(
|
||||
#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
|
||||
" .long 0xb2fa0080\n" /* NIAI 8 */
|
||||
#endif
|
||||
ALTERNATIVE("", ".long 0xb2fa0080", 49) /* NIAI 8 */
|
||||
" cs %0,%3,%1\n"
|
||||
: "=d" (old), "=Q" (*lock)
|
||||
: "0" (old), "d" (new), "Q" (*lock)
|
||||
@ -61,76 +94,161 @@ static inline int arch_cmpxchg_niai8(int *lock, int old, int new)
|
||||
return expected == old;
|
||||
}
|
||||
|
||||
static inline struct spin_wait *arch_spin_decode_tail(int lock)
|
||||
{
|
||||
int ix, cpu;
|
||||
|
||||
ix = (lock & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET;
|
||||
cpu = (lock & _Q_TAIL_CPU_MASK) >> _Q_TAIL_CPU_OFFSET;
|
||||
return per_cpu_ptr(&spin_wait[ix], cpu - 1);
|
||||
}
|
||||
|
||||
static inline int arch_spin_yield_target(int lock, struct spin_wait *node)
|
||||
{
|
||||
if (lock & _Q_LOCK_CPU_MASK)
|
||||
return lock & _Q_LOCK_CPU_MASK;
|
||||
if (node == NULL || node->prev == NULL)
|
||||
return 0; /* 0 -> no target cpu */
|
||||
while (node->prev)
|
||||
node = node->prev;
|
||||
return node->node_id >> _Q_TAIL_CPU_OFFSET;
|
||||
}
|
||||
|
||||
static inline void arch_spin_lock_queued(arch_spinlock_t *lp)
|
||||
{
|
||||
struct spin_wait *node, *next;
|
||||
int lockval, ix, node_id, tail_id, old, new, owner, count;
|
||||
|
||||
ix = S390_lowcore.spinlock_index++;
|
||||
barrier();
|
||||
lockval = SPINLOCK_LOCKVAL; /* cpu + 1 */
|
||||
node = this_cpu_ptr(&spin_wait[ix]);
|
||||
node->prev = node->next = NULL;
|
||||
node_id = node->node_id;
|
||||
|
||||
/* Enqueue the node for this CPU in the spinlock wait queue */
|
||||
while (1) {
|
||||
old = READ_ONCE(lp->lock);
|
||||
if ((old & _Q_LOCK_CPU_MASK) == 0 &&
|
||||
(old & _Q_LOCK_STEAL_MASK) != _Q_LOCK_STEAL_MASK) {
|
||||
/*
|
||||
* The lock is free but there may be waiters.
|
||||
* With no waiters simply take the lock, if there
|
||||
* are waiters try to steal the lock. The lock may
|
||||
* be stolen three times before the next queued
|
||||
* waiter will get the lock.
|
||||
*/
|
||||
new = (old ? (old + _Q_LOCK_STEAL_ADD) : 0) | lockval;
|
||||
if (__atomic_cmpxchg_bool(&lp->lock, old, new))
|
||||
/* Got the lock */
|
||||
goto out;
|
||||
/* lock passing in progress */
|
||||
continue;
|
||||
}
|
||||
/* Make the node of this CPU the new tail. */
|
||||
new = node_id | (old & _Q_LOCK_MASK);
|
||||
if (__atomic_cmpxchg_bool(&lp->lock, old, new))
|
||||
break;
|
||||
}
|
||||
/* Set the 'next' pointer of the tail node in the queue */
|
||||
tail_id = old & _Q_TAIL_MASK;
|
||||
if (tail_id != 0) {
|
||||
node->prev = arch_spin_decode_tail(tail_id);
|
||||
WRITE_ONCE(node->prev->next, node);
|
||||
}
|
||||
|
||||
/* Pass the virtual CPU to the lock holder if it is not running */
|
||||
owner = arch_spin_yield_target(old, node);
|
||||
if (owner && arch_vcpu_is_preempted(owner - 1))
|
||||
smp_yield_cpu(owner - 1);
|
||||
|
||||
/* Spin on the CPU local node->prev pointer */
|
||||
if (tail_id != 0) {
|
||||
count = spin_retry;
|
||||
while (READ_ONCE(node->prev) != NULL) {
|
||||
if (count-- >= 0)
|
||||
continue;
|
||||
count = spin_retry;
|
||||
/* Query running state of lock holder again. */
|
||||
owner = arch_spin_yield_target(old, node);
|
||||
if (owner && arch_vcpu_is_preempted(owner - 1))
|
||||
smp_yield_cpu(owner - 1);
|
||||
}
|
||||
}
|
||||
|
||||
/* Spin on the lock value in the spinlock_t */
|
||||
count = spin_retry;
|
||||
while (1) {
|
||||
old = READ_ONCE(lp->lock);
|
||||
owner = old & _Q_LOCK_CPU_MASK;
|
||||
if (!owner) {
|
||||
tail_id = old & _Q_TAIL_MASK;
|
||||
new = ((tail_id != node_id) ? tail_id : 0) | lockval;
|
||||
if (__atomic_cmpxchg_bool(&lp->lock, old, new))
|
||||
/* Got the lock */
|
||||
break;
|
||||
continue;
|
||||
}
|
||||
if (count-- >= 0)
|
||||
continue;
|
||||
count = spin_retry;
|
||||
if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(owner - 1))
|
||||
smp_yield_cpu(owner - 1);
|
||||
}
|
||||
|
||||
/* Pass lock_spin job to next CPU in the queue */
|
||||
if (node_id && tail_id != node_id) {
|
||||
/* Wait until the next CPU has set up the 'next' pointer */
|
||||
while ((next = READ_ONCE(node->next)) == NULL)
|
||||
;
|
||||
next->prev = NULL;
|
||||
}
|
||||
|
||||
out:
|
||||
S390_lowcore.spinlock_index--;
|
||||
}
|
||||
|
||||
static inline void arch_spin_lock_classic(arch_spinlock_t *lp)
|
||||
{
|
||||
int lockval, old, new, owner, count;
|
||||
|
||||
lockval = SPINLOCK_LOCKVAL; /* cpu + 1 */
|
||||
|
||||
/* Pass the virtual CPU to the lock holder if it is not running */
|
||||
owner = arch_spin_yield_target(ACCESS_ONCE(lp->lock), NULL);
|
||||
if (owner && arch_vcpu_is_preempted(owner - 1))
|
||||
smp_yield_cpu(owner - 1);
|
||||
|
||||
count = spin_retry;
|
||||
while (1) {
|
||||
old = arch_load_niai4(&lp->lock);
|
||||
owner = old & _Q_LOCK_CPU_MASK;
|
||||
/* Try to get the lock if it is free. */
|
||||
if (!owner) {
|
||||
new = (old & _Q_TAIL_MASK) | lockval;
|
||||
if (arch_cmpxchg_niai8(&lp->lock, old, new))
|
||||
/* Got the lock */
|
||||
return;
|
||||
continue;
|
||||
}
|
||||
if (count-- >= 0)
|
||||
continue;
|
||||
count = spin_retry;
|
||||
if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(owner - 1))
|
||||
smp_yield_cpu(owner - 1);
|
||||
}
|
||||
}
|
||||
|
||||
void arch_spin_lock_wait(arch_spinlock_t *lp)
|
||||
{
|
||||
int cpu = SPINLOCK_LOCKVAL;
|
||||
int owner, count;
|
||||
|
||||
/* Pass the virtual CPU to the lock holder if it is not running */
|
||||
owner = arch_load_niai4(&lp->lock);
|
||||
if (owner && arch_vcpu_is_preempted(~owner))
|
||||
smp_yield_cpu(~owner);
|
||||
|
||||
count = spin_retry;
|
||||
while (1) {
|
||||
owner = arch_load_niai4(&lp->lock);
|
||||
/* Try to get the lock if it is free. */
|
||||
if (!owner) {
|
||||
if (arch_cmpxchg_niai8(&lp->lock, 0, cpu))
|
||||
return;
|
||||
continue;
|
||||
}
|
||||
if (count-- >= 0)
|
||||
continue;
|
||||
count = spin_retry;
|
||||
/*
|
||||
* For multiple layers of hypervisors, e.g. z/VM + LPAR
|
||||
* yield the CPU unconditionally. For LPAR rely on the
|
||||
* sense running status.
|
||||
*/
|
||||
if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner))
|
||||
smp_yield_cpu(~owner);
|
||||
}
|
||||
/* Use classic spinlocks + niai if the steal time is >= 10% */
|
||||
if (test_cpu_flag(CIF_DEDICATED_CPU))
|
||||
arch_spin_lock_queued(lp);
|
||||
else
|
||||
arch_spin_lock_classic(lp);
|
||||
}
|
||||
EXPORT_SYMBOL(arch_spin_lock_wait);
|
||||
|
||||
void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
|
||||
{
|
||||
int cpu = SPINLOCK_LOCKVAL;
|
||||
int owner, count;
|
||||
|
||||
local_irq_restore(flags);
|
||||
|
||||
/* Pass the virtual CPU to the lock holder if it is not running */
|
||||
owner = arch_load_niai4(&lp->lock);
|
||||
if (owner && arch_vcpu_is_preempted(~owner))
|
||||
smp_yield_cpu(~owner);
|
||||
|
||||
count = spin_retry;
|
||||
while (1) {
|
||||
owner = arch_load_niai4(&lp->lock);
|
||||
/* Try to get the lock if it is free. */
|
||||
if (!owner) {
|
||||
local_irq_disable();
|
||||
if (arch_cmpxchg_niai8(&lp->lock, 0, cpu))
|
||||
return;
|
||||
local_irq_restore(flags);
|
||||
continue;
|
||||
}
|
||||
if (count-- >= 0)
|
||||
continue;
|
||||
count = spin_retry;
|
||||
/*
|
||||
* For multiple layers of hypervisors, e.g. z/VM + LPAR
|
||||
* yield the CPU unconditionally. For LPAR rely on the
|
||||
* sense running status.
|
||||
*/
|
||||
if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner))
|
||||
smp_yield_cpu(~owner);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(arch_spin_lock_wait_flags);
|
||||
|
||||
int arch_spin_trylock_retry(arch_spinlock_t *lp)
|
||||
{
|
||||
int cpu = SPINLOCK_LOCKVAL;
|
||||
@ -148,126 +266,59 @@ int arch_spin_trylock_retry(arch_spinlock_t *lp)
|
||||
}
|
||||
EXPORT_SYMBOL(arch_spin_trylock_retry);
|
||||
|
||||
void _raw_read_lock_wait(arch_rwlock_t *rw)
|
||||
void arch_read_lock_wait(arch_rwlock_t *rw)
|
||||
{
|
||||
int count = spin_retry;
|
||||
int owner, old;
|
||||
|
||||
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
|
||||
__RAW_LOCK(&rw->lock, -1, __RAW_OP_ADD);
|
||||
#endif
|
||||
owner = 0;
|
||||
while (1) {
|
||||
if (count-- <= 0) {
|
||||
if (owner && arch_vcpu_is_preempted(~owner))
|
||||
smp_yield_cpu(~owner);
|
||||
count = spin_retry;
|
||||
}
|
||||
old = ACCESS_ONCE(rw->lock);
|
||||
owner = ACCESS_ONCE(rw->owner);
|
||||
if (old < 0)
|
||||
continue;
|
||||
if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
|
||||
return;
|
||||
if (unlikely(in_interrupt())) {
|
||||
while (READ_ONCE(rw->cnts) & 0x10000)
|
||||
barrier();
|
||||
return;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(_raw_read_lock_wait);
|
||||
|
||||
int _raw_read_trylock_retry(arch_rwlock_t *rw)
|
||||
/* Remove this reader again to allow recursive read locking */
|
||||
__atomic_add_const(-1, &rw->cnts);
|
||||
/* Put the reader into the wait queue */
|
||||
arch_spin_lock(&rw->wait);
|
||||
/* Now add this reader to the count value again */
|
||||
__atomic_add_const(1, &rw->cnts);
|
||||
/* Loop until the writer is done */
|
||||
while (READ_ONCE(rw->cnts) & 0x10000)
|
||||
barrier();
|
||||
arch_spin_unlock(&rw->wait);
|
||||
}
|
||||
EXPORT_SYMBOL(arch_read_lock_wait);
|
||||
|
||||
void arch_write_lock_wait(arch_rwlock_t *rw)
|
||||
{
|
||||
int count = spin_retry;
|
||||
int old;
|
||||
|
||||
while (count-- > 0) {
|
||||
old = ACCESS_ONCE(rw->lock);
|
||||
if (old < 0)
|
||||
continue;
|
||||
if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(_raw_read_trylock_retry);
|
||||
/* Add this CPU to the write waiters */
|
||||
__atomic_add(0x20000, &rw->cnts);
|
||||
|
||||
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
|
||||
/* Put the writer into the wait queue */
|
||||
arch_spin_lock(&rw->wait);
|
||||
|
||||
void _raw_write_lock_wait(arch_rwlock_t *rw, int prev)
|
||||
{
|
||||
int count = spin_retry;
|
||||
int owner, old;
|
||||
|
||||
owner = 0;
|
||||
while (1) {
|
||||
if (count-- <= 0) {
|
||||
if (owner && arch_vcpu_is_preempted(~owner))
|
||||
smp_yield_cpu(~owner);
|
||||
count = spin_retry;
|
||||
}
|
||||
old = ACCESS_ONCE(rw->lock);
|
||||
owner = ACCESS_ONCE(rw->owner);
|
||||
smp_mb();
|
||||
if (old >= 0) {
|
||||
prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
|
||||
old = prev;
|
||||
}
|
||||
if ((old & 0x7fffffff) == 0 && prev >= 0)
|
||||
old = READ_ONCE(rw->cnts);
|
||||
if ((old & 0x1ffff) == 0 &&
|
||||
__atomic_cmpxchg_bool(&rw->cnts, old, old | 0x10000))
|
||||
/* Got the lock */
|
||||
break;
|
||||
barrier();
|
||||
}
|
||||
|
||||
arch_spin_unlock(&rw->wait);
|
||||
}
|
||||
EXPORT_SYMBOL(_raw_write_lock_wait);
|
||||
EXPORT_SYMBOL(arch_write_lock_wait);
|
||||
|
||||
#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
|
||||
|
||||
void _raw_write_lock_wait(arch_rwlock_t *rw)
|
||||
void arch_spin_relax(arch_spinlock_t *lp)
|
||||
{
|
||||
int count = spin_retry;
|
||||
int owner, old, prev;
|
||||
int cpu;
|
||||
|
||||
prev = 0x80000000;
|
||||
owner = 0;
|
||||
while (1) {
|
||||
if (count-- <= 0) {
|
||||
if (owner && arch_vcpu_is_preempted(~owner))
|
||||
smp_yield_cpu(~owner);
|
||||
count = spin_retry;
|
||||
}
|
||||
old = ACCESS_ONCE(rw->lock);
|
||||
owner = ACCESS_ONCE(rw->owner);
|
||||
if (old >= 0 &&
|
||||
__atomic_cmpxchg_bool(&rw->lock, old, old | 0x80000000))
|
||||
prev = old;
|
||||
else
|
||||
smp_mb();
|
||||
if ((old & 0x7fffffff) == 0 && prev >= 0)
|
||||
break;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(_raw_write_lock_wait);
|
||||
|
||||
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
|
||||
|
||||
int _raw_write_trylock_retry(arch_rwlock_t *rw)
|
||||
{
|
||||
int count = spin_retry;
|
||||
int old;
|
||||
|
||||
while (count-- > 0) {
|
||||
old = ACCESS_ONCE(rw->lock);
|
||||
if (old)
|
||||
continue;
|
||||
if (__atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000))
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(_raw_write_trylock_retry);
|
||||
|
||||
void arch_lock_relax(int cpu)
|
||||
{
|
||||
cpu = READ_ONCE(lp->lock) & _Q_LOCK_CPU_MASK;
|
||||
if (!cpu)
|
||||
return;
|
||||
if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(~cpu))
|
||||
if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(cpu - 1))
|
||||
return;
|
||||
smp_yield_cpu(~cpu);
|
||||
smp_yield_cpu(cpu - 1);
|
||||
}
|
||||
EXPORT_SYMBOL(arch_lock_relax);
|
||||
EXPORT_SYMBOL(arch_spin_relax);
|
||||
|
@ -56,7 +56,7 @@ EXPORT_SYMBOL(strlen);
|
||||
*
|
||||
* returns the minimum of the length of @s and @n
|
||||
*/
|
||||
size_t strnlen(const char * s, size_t n)
|
||||
size_t strnlen(const char *s, size_t n)
|
||||
{
|
||||
return __strnend(s, n) - s;
|
||||
}
|
||||
@ -195,14 +195,14 @@ EXPORT_SYMBOL(strncat);
|
||||
|
||||
/**
|
||||
* strcmp - Compare two strings
|
||||
* @cs: One string
|
||||
* @ct: Another string
|
||||
* @s1: One string
|
||||
* @s2: Another string
|
||||
*
|
||||
* returns 0 if @cs and @ct are equal,
|
||||
* < 0 if @cs is less than @ct
|
||||
* > 0 if @cs is greater than @ct
|
||||
* returns 0 if @s1 and @s2 are equal,
|
||||
* < 0 if @s1 is less than @s2
|
||||
* > 0 if @s1 is greater than @s2
|
||||
*/
|
||||
int strcmp(const char *cs, const char *ct)
|
||||
int strcmp(const char *s1, const char *s2)
|
||||
{
|
||||
register int r0 asm("0") = 0;
|
||||
int ret = 0;
|
||||
@ -214,7 +214,7 @@ int strcmp(const char *cs, const char *ct)
|
||||
" ic %1,0(%3)\n"
|
||||
" sr %0,%1\n"
|
||||
"1:"
|
||||
: "+d" (ret), "+d" (r0), "+a" (cs), "+a" (ct)
|
||||
: "+d" (ret), "+d" (r0), "+a" (s1), "+a" (s2)
|
||||
: : "cc", "memory");
|
||||
return ret;
|
||||
}
|
||||
@ -225,7 +225,7 @@ EXPORT_SYMBOL(strcmp);
|
||||
* @s: The string to be searched
|
||||
* @c: The character to search for
|
||||
*/
|
||||
char * strrchr(const char * s, int c)
|
||||
char *strrchr(const char *s, int c)
|
||||
{
|
||||
size_t len = __strend(s) - s;
|
||||
|
||||
@ -261,7 +261,7 @@ static inline int clcle(const char *s1, unsigned long l1,
|
||||
* @s1: The string to be searched
|
||||
* @s2: The string to search for
|
||||
*/
|
||||
char * strstr(const char * s1,const char * s2)
|
||||
char *strstr(const char *s1, const char *s2)
|
||||
{
|
||||
int l1, l2;
|
||||
|
||||
@ -307,15 +307,15 @@ EXPORT_SYMBOL(memchr);
|
||||
|
||||
/**
|
||||
* memcmp - Compare two areas of memory
|
||||
* @cs: One area of memory
|
||||
* @ct: Another area of memory
|
||||
* @s1: One area of memory
|
||||
* @s2: Another area of memory
|
||||
* @count: The size of the area.
|
||||
*/
|
||||
int memcmp(const void *cs, const void *ct, size_t n)
|
||||
int memcmp(const void *s1, const void *s2, size_t n)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = clcle(cs, n, ct, n);
|
||||
ret = clcle(s1, n, s2, n);
|
||||
if (ret)
|
||||
ret = ret == 1 ? -1 : 1;
|
||||
return ret;
|
||||
|
@ -145,8 +145,8 @@ void __init mem_init(void)
|
||||
|
||||
void free_initmem(void)
|
||||
{
|
||||
__set_memory((unsigned long) _sinittext,
|
||||
(_einittext - _sinittext) >> PAGE_SHIFT,
|
||||
__set_memory((unsigned long)_sinittext,
|
||||
(unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
|
||||
SET_MEMORY_RW | SET_MEMORY_NX);
|
||||
free_initmem_default(POISON_FREE_INITMEM);
|
||||
}
|
||||
|
@ -159,13 +159,13 @@ static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
|
||||
struct page *page_table_alloc_pgste(struct mm_struct *mm)
|
||||
{
|
||||
struct page *page;
|
||||
unsigned long *table;
|
||||
u64 *table;
|
||||
|
||||
page = alloc_page(GFP_KERNEL);
|
||||
if (page) {
|
||||
table = (unsigned long *) page_to_phys(page);
|
||||
clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
|
||||
clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
|
||||
table = (u64 *)page_to_phys(page);
|
||||
memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
|
||||
memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
|
||||
}
|
||||
return page;
|
||||
}
|
||||
@ -222,12 +222,12 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
|
||||
if (mm_alloc_pgste(mm)) {
|
||||
/* Return 4K page table with PGSTEs */
|
||||
atomic_set(&page->_mapcount, 3);
|
||||
clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
|
||||
clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
|
||||
memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
|
||||
memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
|
||||
} else {
|
||||
/* Return the first 2K fragment of the page */
|
||||
atomic_set(&page->_mapcount, 1);
|
||||
clear_table(table, _PAGE_INVALID, PAGE_SIZE);
|
||||
memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE);
|
||||
spin_lock_bh(&mm->context.lock);
|
||||
list_add(&page->lru, &mm->context.pgtable_list);
|
||||
spin_unlock_bh(&mm->context.lock);
|
||||
|
@ -60,7 +60,7 @@ pte_t __ref *vmem_pte_alloc(void)
|
||||
pte = (pte_t *) memblock_alloc(size, size);
|
||||
if (!pte)
|
||||
return NULL;
|
||||
clear_table((unsigned long *) pte, _PAGE_INVALID, size);
|
||||
memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
|
||||
return pte;
|
||||
}
|
||||
|
||||
@ -403,17 +403,17 @@ void __init vmem_map_init(void)
|
||||
|
||||
for_each_memblock(memory, reg)
|
||||
vmem_add_mem(reg->base, reg->size);
|
||||
__set_memory((unsigned long) _stext,
|
||||
(_etext - _stext) >> PAGE_SHIFT,
|
||||
__set_memory((unsigned long)_stext,
|
||||
(unsigned long)(_etext - _stext) >> PAGE_SHIFT,
|
||||
SET_MEMORY_RO | SET_MEMORY_X);
|
||||
__set_memory((unsigned long) _etext,
|
||||
(_eshared - _etext) >> PAGE_SHIFT,
|
||||
__set_memory((unsigned long)_etext,
|
||||
(unsigned long)(__end_rodata - _etext) >> PAGE_SHIFT,
|
||||
SET_MEMORY_RO);
|
||||
__set_memory((unsigned long) _sinittext,
|
||||
(_einittext - _sinittext) >> PAGE_SHIFT,
|
||||
__set_memory((unsigned long)_sinittext,
|
||||
(unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
|
||||
SET_MEMORY_RO | SET_MEMORY_X);
|
||||
pr_info("Write protected kernel read-only data: %luk\n",
|
||||
(_eshared - _stext) >> 10);
|
||||
(unsigned long)(__end_rodata - _stext) >> 10);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -53,10 +53,13 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
|
||||
*
|
||||
* We get 160 bytes stack space from calling function, but only use
|
||||
* 12 * 8 byte for old backchain, r15..r6, and tail_call_cnt.
|
||||
*
|
||||
* The stack size used by the BPF program ("BPF stack" above) is passed
|
||||
* via "aux->stack_depth".
|
||||
*/
|
||||
#define STK_SPACE (MAX_BPF_STACK + 8 + 8 + 4 + 4 + 160)
|
||||
#define STK_SPACE_ADD (8 + 8 + 4 + 4 + 160)
|
||||
#define STK_160_UNUSED (160 - 12 * 8)
|
||||
#define STK_OFF (STK_SPACE - STK_160_UNUSED)
|
||||
#define STK_OFF (STK_SPACE_ADD - STK_160_UNUSED)
|
||||
#define STK_OFF_TMP 160 /* Offset of tmp buffer on stack */
|
||||
#define STK_OFF_HLEN 168 /* Offset of SKB header length on stack */
|
||||
#define STK_OFF_SKBP 176 /* Offset of SKB pointer on stack */
|
||||
|
@ -320,12 +320,12 @@ static void save_regs(struct bpf_jit *jit, u32 rs, u32 re)
|
||||
/*
|
||||
* Restore registers from "rs" (register start) to "re" (register end) on stack
|
||||
*/
|
||||
static void restore_regs(struct bpf_jit *jit, u32 rs, u32 re)
|
||||
static void restore_regs(struct bpf_jit *jit, u32 rs, u32 re, u32 stack_depth)
|
||||
{
|
||||
u32 off = STK_OFF_R6 + (rs - 6) * 8;
|
||||
|
||||
if (jit->seen & SEEN_STACK)
|
||||
off += STK_OFF;
|
||||
off += STK_OFF + stack_depth;
|
||||
|
||||
if (rs == re)
|
||||
/* lg %rs,off(%r15) */
|
||||
@ -369,7 +369,7 @@ static int get_end(struct bpf_jit *jit, int start)
|
||||
* Save and restore clobbered registers (6-15) on stack.
|
||||
* We save/restore registers in chunks with gap >= 2 registers.
|
||||
*/
|
||||
static void save_restore_regs(struct bpf_jit *jit, int op)
|
||||
static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth)
|
||||
{
|
||||
|
||||
int re = 6, rs;
|
||||
@ -382,7 +382,7 @@ static void save_restore_regs(struct bpf_jit *jit, int op)
|
||||
if (op == REGS_SAVE)
|
||||
save_regs(jit, rs, re);
|
||||
else
|
||||
restore_regs(jit, rs, re);
|
||||
restore_regs(jit, rs, re, stack_depth);
|
||||
re++;
|
||||
} while (re <= 15);
|
||||
}
|
||||
@ -414,7 +414,7 @@ static void emit_load_skb_data_hlen(struct bpf_jit *jit)
|
||||
* Save registers and create stack frame if necessary.
|
||||
* See stack frame layout desription in "bpf_jit.h"!
|
||||
*/
|
||||
static void bpf_jit_prologue(struct bpf_jit *jit)
|
||||
static void bpf_jit_prologue(struct bpf_jit *jit, u32 stack_depth)
|
||||
{
|
||||
if (jit->seen & SEEN_TAIL_CALL) {
|
||||
/* xc STK_OFF_TCCNT(4,%r15),STK_OFF_TCCNT(%r15) */
|
||||
@ -427,7 +427,7 @@ static void bpf_jit_prologue(struct bpf_jit *jit)
|
||||
/* Tail calls have to skip above initialization */
|
||||
jit->tail_call_start = jit->prg;
|
||||
/* Save registers */
|
||||
save_restore_regs(jit, REGS_SAVE);
|
||||
save_restore_regs(jit, REGS_SAVE, stack_depth);
|
||||
/* Setup literal pool */
|
||||
if (jit->seen & SEEN_LITERAL) {
|
||||
/* basr %r13,0 */
|
||||
@ -442,7 +442,7 @@ static void bpf_jit_prologue(struct bpf_jit *jit)
|
||||
/* la %bfp,STK_160_UNUSED(%r15) (BPF frame pointer) */
|
||||
EMIT4_DISP(0x41000000, BPF_REG_FP, REG_15, STK_160_UNUSED);
|
||||
/* aghi %r15,-STK_OFF */
|
||||
EMIT4_IMM(0xa70b0000, REG_15, -STK_OFF);
|
||||
EMIT4_IMM(0xa70b0000, REG_15, -(STK_OFF + stack_depth));
|
||||
if (jit->seen & SEEN_FUNC)
|
||||
/* stg %w1,152(%r15) (backchain) */
|
||||
EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0,
|
||||
@ -459,7 +459,7 @@ static void bpf_jit_prologue(struct bpf_jit *jit)
|
||||
/*
|
||||
* Function epilogue
|
||||
*/
|
||||
static void bpf_jit_epilogue(struct bpf_jit *jit)
|
||||
static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
|
||||
{
|
||||
/* Return 0 */
|
||||
if (jit->seen & SEEN_RET0) {
|
||||
@ -471,7 +471,7 @@ static void bpf_jit_epilogue(struct bpf_jit *jit)
|
||||
/* Load exit code: lgr %r2,%b0 */
|
||||
EMIT4(0xb9040000, REG_2, BPF_REG_0);
|
||||
/* Restore registers */
|
||||
save_restore_regs(jit, REGS_RESTORE);
|
||||
save_restore_regs(jit, REGS_RESTORE, stack_depth);
|
||||
/* br %r14 */
|
||||
_EMIT2(0x07fe);
|
||||
}
|
||||
@ -1019,7 +1019,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
|
||||
*/
|
||||
|
||||
if (jit->seen & SEEN_STACK)
|
||||
off = STK_OFF_TCCNT + STK_OFF;
|
||||
off = STK_OFF_TCCNT + STK_OFF + fp->aux->stack_depth;
|
||||
else
|
||||
off = STK_OFF_TCCNT;
|
||||
/* lhi %w0,1 */
|
||||
@ -1047,7 +1047,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
|
||||
/*
|
||||
* Restore registers before calling function
|
||||
*/
|
||||
save_restore_regs(jit, REGS_RESTORE);
|
||||
save_restore_regs(jit, REGS_RESTORE, fp->aux->stack_depth);
|
||||
|
||||
/*
|
||||
* goto *(prog->bpf_func + tail_call_start);
|
||||
@ -1273,7 +1273,7 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp)
|
||||
jit->lit = jit->lit_start;
|
||||
jit->prg = 0;
|
||||
|
||||
bpf_jit_prologue(jit);
|
||||
bpf_jit_prologue(jit, fp->aux->stack_depth);
|
||||
for (i = 0; i < fp->len; i += insn_count) {
|
||||
insn_count = bpf_jit_insn(jit, fp, i);
|
||||
if (insn_count < 0)
|
||||
@ -1281,7 +1281,7 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp)
|
||||
/* Next instruction address */
|
||||
jit->addrs[i + insn_count] = jit->prg;
|
||||
}
|
||||
bpf_jit_epilogue(jit);
|
||||
bpf_jit_epilogue(jit, fp->aux->stack_depth);
|
||||
|
||||
jit->lit_start = jit->prg;
|
||||
jit->size = jit->lit;
|
||||
|
@ -368,7 +368,8 @@ static void zpci_irq_handler(struct airq_struct *airq)
|
||||
/* End of second scan with interrupts on. */
|
||||
break;
|
||||
/* First scan complete, reenable interrupts. */
|
||||
zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
|
||||
if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC))
|
||||
break;
|
||||
si = 0;
|
||||
continue;
|
||||
}
|
||||
@ -956,7 +957,7 @@ static int __init pci_base_init(void)
|
||||
if (!s390_pci_probe)
|
||||
return 0;
|
||||
|
||||
if (!test_facility(69) || !test_facility(71) || !test_facility(72))
|
||||
if (!test_facility(69) || !test_facility(71))
|
||||
return 0;
|
||||
|
||||
rc = zpci_debug_init();
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include <linux/export.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/delay.h>
|
||||
#include <asm/facility.h>
|
||||
#include <asm/pci_insn.h>
|
||||
#include <asm/pci_debug.h>
|
||||
#include <asm/processor.h>
|
||||
@ -91,11 +92,14 @@ int zpci_refresh_trans(u64 fn, u64 addr, u64 range)
|
||||
}
|
||||
|
||||
/* Set Interruption Controls */
|
||||
void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc)
|
||||
int zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc)
|
||||
{
|
||||
if (!test_facility(72))
|
||||
return -EIO;
|
||||
asm volatile (
|
||||
" .insn rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n"
|
||||
: : [ctl] "d" (ctl), [isc] "d" (isc << 27), [u] "Q" (*unused));
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* PCI Load */
|
||||
|
@ -4,11 +4,21 @@
|
||||
#
|
||||
|
||||
hostprogs-y += gen_facilities
|
||||
hostprogs-y += gen_opcode_table
|
||||
|
||||
HOSTCFLAGS_gen_facilities.o += -Wall $(LINUXINCLUDE)
|
||||
HOSTCFLAGS_gen_opcode_table.o += -Wall $(LINUXINCLUDE)
|
||||
|
||||
define filechk_facilities.h
|
||||
$(obj)/gen_facilities
|
||||
endef
|
||||
|
||||
define filechk_dis.h
|
||||
( $(obj)/gen_opcode_table < $(srctree)/arch/$(ARCH)/tools/opcodes.txt )
|
||||
endef
|
||||
|
||||
include/generated/facilities.h: $(obj)/gen_facilities FORCE
|
||||
$(call filechk,facilities.h)
|
||||
|
||||
include/generated/dis.h: $(obj)/gen_opcode_table FORCE
|
||||
$(call filechk,dis.h,__FUN)
|
||||
|
336
arch/s390/tools/gen_opcode_table.c
Normal file
336
arch/s390/tools/gen_opcode_table.c
Normal file
@ -0,0 +1,336 @@
|
||||
/*
|
||||
* Generate opcode table initializers for the in-kernel disassembler.
|
||||
*
|
||||
* Copyright IBM Corp. 2017
|
||||
*
|
||||
*/
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <ctype.h>
|
||||
#include <stdio.h>
|
||||
|
||||
#define STRING_SIZE_MAX 20
|
||||
|
||||
struct insn_type {
|
||||
unsigned char byte;
|
||||
unsigned char mask;
|
||||
char **format;
|
||||
};
|
||||
|
||||
struct insn {
|
||||
struct insn_type *type;
|
||||
char opcode[STRING_SIZE_MAX];
|
||||
char name[STRING_SIZE_MAX];
|
||||
char upper[STRING_SIZE_MAX];
|
||||
char format[STRING_SIZE_MAX];
|
||||
unsigned int name_len;
|
||||
};
|
||||
|
||||
struct insn_group {
|
||||
struct insn_type *type;
|
||||
int offset;
|
||||
int count;
|
||||
char opcode[2];
|
||||
};
|
||||
|
||||
struct insn_format {
|
||||
char *format;
|
||||
int type;
|
||||
};
|
||||
|
||||
struct gen_opcode {
|
||||
struct insn *insn;
|
||||
int nr;
|
||||
struct insn_group *group;
|
||||
int nr_groups;
|
||||
};
|
||||
|
||||
/*
|
||||
* Table of instruction format types. Each opcode is defined with at
|
||||
* least one byte (two nibbles), three nibbles, or two bytes (four
|
||||
* nibbles).
|
||||
* The byte member of each instruction format type entry defines
|
||||
* within which byte of an instruction the third (and fourth) nibble
|
||||
* of an opcode can be found. The mask member is the and-mask that
|
||||
* needs to be applied on this byte in order to get the third (and
|
||||
* fourth) nibble of the opcode.
|
||||
* The format array defines all instruction formats (as defined in the
|
||||
* Principles of Operation) which have the same position of the opcode
|
||||
* nibbles.
|
||||
* A special case are instruction formats with 1-byte opcodes. In this
|
||||
* case the byte member always is zero, so that the mask is applied on
|
||||
* the (only) byte that contains the opcode.
|
||||
*/
|
||||
static struct insn_type insn_type_table[] = {
|
||||
{
|
||||
.byte = 0,
|
||||
.mask = 0xff,
|
||||
.format = (char *[]) {
|
||||
"MII",
|
||||
"RR",
|
||||
"RS",
|
||||
"RSI",
|
||||
"RX",
|
||||
"SI",
|
||||
"SMI",
|
||||
"SS",
|
||||
NULL,
|
||||
},
|
||||
},
|
||||
{
|
||||
.byte = 1,
|
||||
.mask = 0x0f,
|
||||
.format = (char *[]) {
|
||||
"RI",
|
||||
"RIL",
|
||||
"SSF",
|
||||
NULL,
|
||||
},
|
||||
},
|
||||
{
|
||||
.byte = 1,
|
||||
.mask = 0xff,
|
||||
.format = (char *[]) {
|
||||
"E",
|
||||
"IE",
|
||||
"RRE",
|
||||
"RRF",
|
||||
"RRR",
|
||||
"S",
|
||||
"SIL",
|
||||
"SSE",
|
||||
NULL,
|
||||
},
|
||||
},
|
||||
{
|
||||
.byte = 5,
|
||||
.mask = 0xff,
|
||||
.format = (char *[]) {
|
||||
"RIE",
|
||||
"RIS",
|
||||
"RRS",
|
||||
"RSE",
|
||||
"RSL",
|
||||
"RSY",
|
||||
"RXE",
|
||||
"RXF",
|
||||
"RXY",
|
||||
"SIY",
|
||||
"VRI",
|
||||
"VRR",
|
||||
"VRS",
|
||||
"VRV",
|
||||
"VRX",
|
||||
"VSI",
|
||||
NULL,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
static struct insn_type *insn_format_to_type(char *format)
|
||||
{
|
||||
char tmp[STRING_SIZE_MAX];
|
||||
char *base_format, **ptr;
|
||||
int i;
|
||||
|
||||
strcpy(tmp, format);
|
||||
base_format = tmp;
|
||||
base_format = strsep(&base_format, "_");
|
||||
for (i = 0; i < sizeof(insn_type_table) / sizeof(insn_type_table[0]); i++) {
|
||||
ptr = insn_type_table[i].format;
|
||||
while (*ptr) {
|
||||
if (!strcmp(base_format, *ptr))
|
||||
return &insn_type_table[i];
|
||||
ptr++;
|
||||
}
|
||||
}
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
static void read_instructions(struct gen_opcode *desc)
|
||||
{
|
||||
struct insn insn;
|
||||
int rc, i;
|
||||
|
||||
while (1) {
|
||||
rc = scanf("%s %s %s", insn.opcode, insn.name, insn.format);
|
||||
if (rc == EOF)
|
||||
break;
|
||||
if (rc != 3)
|
||||
exit(EXIT_FAILURE);
|
||||
insn.type = insn_format_to_type(insn.format);
|
||||
insn.name_len = strlen(insn.name);
|
||||
for (i = 0; i <= insn.name_len; i++)
|
||||
insn.upper[i] = toupper((unsigned char)insn.name[i]);
|
||||
desc->nr++;
|
||||
desc->insn = realloc(desc->insn, desc->nr * sizeof(*desc->insn));
|
||||
if (!desc->insn)
|
||||
exit(EXIT_FAILURE);
|
||||
desc->insn[desc->nr - 1] = insn;
|
||||
}
|
||||
}
|
||||
|
||||
static int cmpformat(const void *a, const void *b)
|
||||
{
|
||||
return strcmp(((struct insn *)a)->format, ((struct insn *)b)->format);
|
||||
}
|
||||
|
||||
static void print_formats(struct gen_opcode *desc)
|
||||
{
|
||||
char *format;
|
||||
int i, count;
|
||||
|
||||
qsort(desc->insn, desc->nr, sizeof(*desc->insn), cmpformat);
|
||||
format = "";
|
||||
count = 0;
|
||||
printf("enum {\n");
|
||||
for (i = 0; i < desc->nr; i++) {
|
||||
if (!strcmp(format, desc->insn[i].format))
|
||||
continue;
|
||||
count++;
|
||||
format = desc->insn[i].format;
|
||||
printf("\tINSTR_%s,\n", format);
|
||||
}
|
||||
printf("}; /* %d */\n\n", count);
|
||||
}
|
||||
|
||||
static int cmp_long_insn(const void *a, const void *b)
|
||||
{
|
||||
return strcmp(((struct insn *)a)->name, ((struct insn *)b)->name);
|
||||
}
|
||||
|
||||
static void print_long_insn(struct gen_opcode *desc)
|
||||
{
|
||||
struct insn *insn;
|
||||
int i, count;
|
||||
|
||||
qsort(desc->insn, desc->nr, sizeof(*desc->insn), cmp_long_insn);
|
||||
count = 0;
|
||||
printf("enum {\n");
|
||||
for (i = 0; i < desc->nr; i++) {
|
||||
insn = &desc->insn[i];
|
||||
if (insn->name_len < 6)
|
||||
continue;
|
||||
printf("\tLONG_INSN_%s,\n", insn->upper);
|
||||
count++;
|
||||
}
|
||||
printf("}; /* %d */\n\n", count);
|
||||
|
||||
printf("#define LONG_INSN_INITIALIZER { \\\n");
|
||||
for (i = 0; i < desc->nr; i++) {
|
||||
insn = &desc->insn[i];
|
||||
if (insn->name_len < 6)
|
||||
continue;
|
||||
printf("\t[LONG_INSN_%s] = \"%s\", \\\n", insn->upper, insn->name);
|
||||
}
|
||||
printf("}\n\n");
|
||||
}
|
||||
|
||||
static void print_opcode(struct insn *insn, int nr)
|
||||
{
|
||||
char *opcode;
|
||||
|
||||
opcode = insn->opcode;
|
||||
if (insn->type->byte != 0)
|
||||
opcode += 2;
|
||||
printf("\t[%4d] = { .opfrag = 0x%s, .format = INSTR_%s, ", nr, opcode, insn->format);
|
||||
if (insn->name_len < 6)
|
||||
printf(".name = \"%s\" ", insn->name);
|
||||
else
|
||||
printf(".offset = LONG_INSN_%s ", insn->upper);
|
||||
printf("}, \\\n");
|
||||
}
|
||||
|
||||
static void add_to_group(struct gen_opcode *desc, struct insn *insn, int offset)
|
||||
{
|
||||
struct insn_group *group;
|
||||
|
||||
group = desc->group ? &desc->group[desc->nr_groups - 1] : NULL;
|
||||
if (group && (!strncmp(group->opcode, insn->opcode, 2) || group->type->byte == 0)) {
|
||||
group->count++;
|
||||
return;
|
||||
}
|
||||
desc->nr_groups++;
|
||||
desc->group = realloc(desc->group, desc->nr_groups * sizeof(*desc->group));
|
||||
if (!desc->group)
|
||||
exit(EXIT_FAILURE);
|
||||
group = &desc->group[desc->nr_groups - 1];
|
||||
strncpy(group->opcode, insn->opcode, 2);
|
||||
group->type = insn->type;
|
||||
group->offset = offset;
|
||||
group->count = 1;
|
||||
}
|
||||
|
||||
static int cmpopcode(const void *a, const void *b)
|
||||
{
|
||||
return strcmp(((struct insn *)a)->opcode, ((struct insn *)b)->opcode);
|
||||
}
|
||||
|
||||
static void print_opcode_table(struct gen_opcode *desc)
|
||||
{
|
||||
char opcode[2] = "";
|
||||
struct insn *insn;
|
||||
int i, offset;
|
||||
|
||||
qsort(desc->insn, desc->nr, sizeof(*desc->insn), cmpopcode);
|
||||
printf("#define OPCODE_TABLE_INITIALIZER { \\\n");
|
||||
offset = 0;
|
||||
for (i = 0; i < desc->nr; i++) {
|
||||
insn = &desc->insn[i];
|
||||
if (insn->type->byte == 0)
|
||||
continue;
|
||||
add_to_group(desc, insn, offset);
|
||||
if (strncmp(opcode, insn->opcode, 2)) {
|
||||
strncpy(opcode, insn->opcode, 2);
|
||||
printf("\t/* %.2s */ \\\n", opcode);
|
||||
}
|
||||
print_opcode(insn, offset);
|
||||
offset++;
|
||||
}
|
||||
printf("\t/* 1-byte opcode instructions */ \\\n");
|
||||
for (i = 0; i < desc->nr; i++) {
|
||||
insn = &desc->insn[i];
|
||||
if (insn->type->byte != 0)
|
||||
continue;
|
||||
add_to_group(desc, insn, offset);
|
||||
print_opcode(insn, offset);
|
||||
offset++;
|
||||
}
|
||||
printf("}\n\n");
|
||||
}
|
||||
|
||||
static void print_opcode_table_offsets(struct gen_opcode *desc)
|
||||
{
|
||||
struct insn_group *group;
|
||||
int i;
|
||||
|
||||
printf("#define OPCODE_OFFSET_INITIALIZER { \\\n");
|
||||
for (i = 0; i < desc->nr_groups; i++) {
|
||||
group = &desc->group[i];
|
||||
printf("\t{ .opcode = 0x%.2s, .mask = 0x%02x, .byte = %d, .offset = %d, .count = %d }, \\\n",
|
||||
group->opcode, group->type->mask, group->type->byte, group->offset, group->count);
|
||||
}
|
||||
printf("}\n\n");
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
struct gen_opcode _desc = { 0 };
|
||||
struct gen_opcode *desc = &_desc;
|
||||
|
||||
read_instructions(desc);
|
||||
printf("#ifndef __S390_GENERATED_DIS_H__\n");
|
||||
printf("#define __S390_GENERATED_DIS_H__\n");
|
||||
printf("/*\n");
|
||||
printf(" * DO NOT MODIFY.\n");
|
||||
printf(" *\n");
|
||||
printf(" * This file was generated by %s\n", __FILE__);
|
||||
printf(" */\n\n");
|
||||
print_formats(desc);
|
||||
print_long_insn(desc);
|
||||
print_opcode_table(desc);
|
||||
print_opcode_table_offsets(desc);
|
||||
printf("#endif\n");
|
||||
exit(EXIT_SUCCESS);
|
||||
}
|
1183
arch/s390/tools/opcodes.txt
Normal file
1183
arch/s390/tools/opcodes.txt
Normal file
File diff suppressed because it is too large
Load Diff
@ -296,7 +296,7 @@ static void dasd_eer_write_standard_trigger(struct dasd_device *device,
|
||||
{
|
||||
struct dasd_ccw_req *temp_cqr;
|
||||
int data_size;
|
||||
struct timeval tv;
|
||||
struct timespec64 ts;
|
||||
struct dasd_eer_header header;
|
||||
unsigned long flags;
|
||||
struct eerbuffer *eerb;
|
||||
@ -310,9 +310,9 @@ static void dasd_eer_write_standard_trigger(struct dasd_device *device,
|
||||
|
||||
header.total_size = sizeof(header) + data_size + 4; /* "EOR" */
|
||||
header.trigger = trigger;
|
||||
do_gettimeofday(&tv);
|
||||
header.tv_sec = tv.tv_sec;
|
||||
header.tv_usec = tv.tv_usec;
|
||||
ktime_get_real_ts64(&ts);
|
||||
header.tv_sec = ts.tv_sec;
|
||||
header.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
|
||||
strncpy(header.busid, dev_name(&device->cdev->dev),
|
||||
DASD_EER_BUSID_SIZE);
|
||||
|
||||
@ -340,7 +340,7 @@ static void dasd_eer_write_snss_trigger(struct dasd_device *device,
|
||||
{
|
||||
int data_size;
|
||||
int snss_rc;
|
||||
struct timeval tv;
|
||||
struct timespec64 ts;
|
||||
struct dasd_eer_header header;
|
||||
unsigned long flags;
|
||||
struct eerbuffer *eerb;
|
||||
@ -353,9 +353,9 @@ static void dasd_eer_write_snss_trigger(struct dasd_device *device,
|
||||
|
||||
header.total_size = sizeof(header) + data_size + 4; /* "EOR" */
|
||||
header.trigger = DASD_EER_STATECHANGE;
|
||||
do_gettimeofday(&tv);
|
||||
header.tv_sec = tv.tv_sec;
|
||||
header.tv_usec = tv.tv_usec;
|
||||
ktime_get_real_ts64(&ts);
|
||||
header.tv_sec = ts.tv_sec;
|
||||
header.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
|
||||
strncpy(header.busid, dev_name(&device->cdev->dev),
|
||||
DASD_EER_BUSID_SIZE);
|
||||
|
||||
|
@ -96,14 +96,6 @@ do { \
|
||||
d_data); \
|
||||
} while(0)
|
||||
|
||||
#define DBF_DEV_EXC(d_level, d_device, d_str, d_data...) \
|
||||
do { \
|
||||
debug_sprintf_exception(d_device->debug_area, \
|
||||
d_level, \
|
||||
d_str "\n", \
|
||||
d_data); \
|
||||
} while(0)
|
||||
|
||||
#define DBF_EVENT(d_level, d_str, d_data...)\
|
||||
do { \
|
||||
debug_sprintf_event(dasd_debug_area, \
|
||||
@ -122,14 +114,6 @@ do { \
|
||||
__dev_id.ssid, __dev_id.devno, d_data); \
|
||||
} while (0)
|
||||
|
||||
#define DBF_EXC(d_level, d_str, d_data...)\
|
||||
do { \
|
||||
debug_sprintf_exception(dasd_debug_area, \
|
||||
d_level,\
|
||||
d_str "\n", \
|
||||
d_data); \
|
||||
} while(0)
|
||||
|
||||
/* limit size for an errorstring */
|
||||
#define ERRORLENGTH 30
|
||||
|
||||
|
@ -56,13 +56,7 @@ extern debug_info_t *scm_debug;
|
||||
|
||||
static inline void SCM_LOG_HEX(int level, void *data, int length)
|
||||
{
|
||||
if (!debug_level_enabled(scm_debug, level))
|
||||
return;
|
||||
while (length > 0) {
|
||||
debug_event(scm_debug, level, data, length);
|
||||
length -= scm_debug->buf_size;
|
||||
data += scm_debug->buf_size;
|
||||
}
|
||||
debug_event(scm_debug, level, data, length);
|
||||
}
|
||||
|
||||
static inline void SCM_LOG_STATE(int level, struct scm_device *scmdev)
|
||||
|
@ -211,11 +211,8 @@ sclp_console_write(struct console *console, const char *message,
|
||||
/* Setup timer to output current console buffer after 1/10 second */
|
||||
if (sclp_conbuf != NULL && sclp_chars_in_buffer(sclp_conbuf) != 0 &&
|
||||
!timer_pending(&sclp_con_timer)) {
|
||||
init_timer(&sclp_con_timer);
|
||||
sclp_con_timer.function = sclp_console_timeout;
|
||||
sclp_con_timer.data = 0UL;
|
||||
sclp_con_timer.expires = jiffies + HZ/10;
|
||||
add_timer(&sclp_con_timer);
|
||||
setup_timer(&sclp_con_timer, sclp_console_timeout, 0UL);
|
||||
mod_timer(&sclp_con_timer, jiffies + HZ / 10);
|
||||
}
|
||||
out:
|
||||
spin_unlock_irqrestore(&sclp_con_lock, flags);
|
||||
|
@ -218,11 +218,8 @@ static int sclp_tty_write_string(const unsigned char *str, int count, int may_fa
|
||||
/* Setup timer to output current console buffer after 1/10 second */
|
||||
if (sclp_ttybuf && sclp_chars_in_buffer(sclp_ttybuf) &&
|
||||
!timer_pending(&sclp_tty_timer)) {
|
||||
init_timer(&sclp_tty_timer);
|
||||
sclp_tty_timer.function = sclp_tty_timeout;
|
||||
sclp_tty_timer.data = 0UL;
|
||||
sclp_tty_timer.expires = jiffies + HZ/10;
|
||||
add_timer(&sclp_tty_timer);
|
||||
setup_timer(&sclp_tty_timer, sclp_tty_timeout, 0UL);
|
||||
mod_timer(&sclp_tty_timer, jiffies + HZ / 10);
|
||||
}
|
||||
spin_unlock_irqrestore(&sclp_tty_lock, flags);
|
||||
out:
|
||||
|
@ -68,9 +68,8 @@ struct tape_class_device *register_tape_dev(
|
||||
|
||||
tcd->char_device->owner = fops->owner;
|
||||
tcd->char_device->ops = fops;
|
||||
tcd->char_device->dev = dev;
|
||||
|
||||
rc = cdev_add(tcd->char_device, tcd->char_device->dev, 1);
|
||||
rc = cdev_add(tcd->char_device, dev, 1);
|
||||
if (rc)
|
||||
goto fail_with_cdev;
|
||||
|
||||
|
@ -812,8 +812,7 @@ static int vmlogrdr_register_cdev(dev_t dev)
|
||||
}
|
||||
vmlogrdr_cdev->owner = THIS_MODULE;
|
||||
vmlogrdr_cdev->ops = &vmlogrdr_fops;
|
||||
vmlogrdr_cdev->dev = dev;
|
||||
rc = cdev_add(vmlogrdr_cdev, vmlogrdr_cdev->dev, MAXMINOR);
|
||||
rc = cdev_add(vmlogrdr_cdev, dev, MAXMINOR);
|
||||
if (!rc)
|
||||
return 0;
|
||||
|
||||
|
@ -110,7 +110,7 @@ static struct urdev *urdev_alloc(struct ccw_device *cdev)
|
||||
mutex_init(&urd->io_mutex);
|
||||
init_waitqueue_head(&urd->wait);
|
||||
spin_lock_init(&urd->open_lock);
|
||||
atomic_set(&urd->ref_count, 1);
|
||||
refcount_set(&urd->ref_count, 1);
|
||||
urd->cdev = cdev;
|
||||
get_device(&cdev->dev);
|
||||
return urd;
|
||||
@ -126,7 +126,7 @@ static void urdev_free(struct urdev *urd)
|
||||
|
||||
static void urdev_get(struct urdev *urd)
|
||||
{
|
||||
atomic_inc(&urd->ref_count);
|
||||
refcount_inc(&urd->ref_count);
|
||||
}
|
||||
|
||||
static struct urdev *urdev_get_from_cdev(struct ccw_device *cdev)
|
||||
@ -159,7 +159,7 @@ static struct urdev *urdev_get_from_devno(u16 devno)
|
||||
|
||||
static void urdev_put(struct urdev *urd)
|
||||
{
|
||||
if (atomic_dec_and_test(&urd->ref_count))
|
||||
if (refcount_dec_and_test(&urd->ref_count))
|
||||
urdev_free(urd);
|
||||
}
|
||||
|
||||
@ -892,10 +892,9 @@ static int ur_set_online(struct ccw_device *cdev)
|
||||
}
|
||||
|
||||
urd->char_device->ops = &ur_fops;
|
||||
urd->char_device->dev = MKDEV(major, minor);
|
||||
urd->char_device->owner = ur_fops.owner;
|
||||
|
||||
rc = cdev_add(urd->char_device, urd->char_device->dev, 1);
|
||||
rc = cdev_add(urd->char_device, MKDEV(major, minor), 1);
|
||||
if (rc)
|
||||
goto fail_free_cdev;
|
||||
if (urd->cdev->id.cu_type == READER_PUNCH_DEVTYPE) {
|
||||
@ -946,7 +945,7 @@ static int ur_set_offline_force(struct ccw_device *cdev, int force)
|
||||
rc = -EBUSY;
|
||||
goto fail_urdev_put;
|
||||
}
|
||||
if (!force && (atomic_read(&urd->ref_count) > 2)) {
|
||||
if (!force && (refcount_read(&urd->ref_count) > 2)) {
|
||||
/* There is still a user of urd (e.g. ur_open) */
|
||||
TRACE("ur_set_offline: BUSY\n");
|
||||
rc = -EBUSY;
|
||||
|
@ -12,6 +12,8 @@
|
||||
#ifndef _VMUR_H_
|
||||
#define _VMUR_H_
|
||||
|
||||
#include <linux/refcount.h>
|
||||
|
||||
#define DEV_CLASS_UR_I 0x20 /* diag210 unit record input device class */
|
||||
#define DEV_CLASS_UR_O 0x10 /* diag210 unit record output device class */
|
||||
/*
|
||||
@ -70,7 +72,7 @@ struct urdev {
|
||||
size_t reclen; /* Record length for *write* CCWs */
|
||||
int class; /* VM device class */
|
||||
int io_request_rc; /* return code from I/O request */
|
||||
atomic_t ref_count; /* reference counter */
|
||||
refcount_t ref_count; /* reference counter */
|
||||
wait_queue_head_t wait; /* wait queue to serialize open */
|
||||
int open_flag; /* "urdev is open" flag */
|
||||
spinlock_t open_lock; /* serialize critical sections */
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user