mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-14 08:13:56 +08:00
s390 updates for 6.6 merge window
- Add vfio-ap support to pass-through crypto devices to secure execution guests - Add API ordinal 6 support to zcrypt_ep11misc device drive, which is required to handle key generate and key derive (e.g. secure key to protected key) correctly - Add missing secure/has_secure sysfs files for the case where it is not possible to figure where a system has been booted from. Existing user space relies on that these files are always present - Fix DCSS block device driver list corruption, caused by incorrect error handling - Convert virt_to_pfn() and pfn_to_virt() from defines to static inline functions to enforce type checking - Cleanups, improvements, and minor fixes to the kernel mapping setup - Fix various virtual vs physical address confusions - Move pfault code to separate file, since it has nothing to do with regular fault handling - Move s390 documentation to Documentation/arch/ like it has been done for other architectures already - Add HAVE_FUNCTION_GRAPH_RETVAL support - Factor out the s390_hypfs filesystem and add a new config option for it. The filesystem is deprecated and as soon as all users are gone it can be removed some time in the not so near future - Remove support for old CEX2 and CEX3 crypto cards from zcrypt device driver - Add support for user-defined certificates: receive user-defined certificates with a diagnose call and provide them via 'cert_store' keyring to user space - Couple of other small fixes and improvements all over the place -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEECMNfWEw3SLnmiLkZIg7DeRspbsIFAmTrqNYACgkQIg7DeRsp bsKkUBAApWXr3WCJA2tige34AnFwmskx4sBxl/fgwcwJrC55fED1jKWaiXOM6isv P+hqavZnks3gXZdYcD3kxXkNMh+fPNWw7BAL35J5Gu1VShA/jlbTC6ZrvUO3t+Fy NsdLvBDbNDdyUzQF7w0Xb0jyIxqhJTRyhLfR5oXES63FHomv2F/vofu4jWR/q+cc F9mcnoDeN4zLdssdvl6WtPX4nEY9RpG0QOh67drnxuq+8v7sL8gKN4ti94Rp6vhs g4NhNs9xgRIPoOcX2KlSIdFqO9P12jSXZq0G4HcOp8UGQvgU/mS+UG3pQwV3ZJLS 3/kUJZ4/CwQa1xUFtPGP1/4AngGNOnhT9FCD4KrqjDkRZmLsd5RvURe6L1zQ3vbZ KnX7q0Otx4xRVYPlbHb9aP+tC7f3Q10ytBAps616qZoA/2SMss2BLZiiPBpCCvDp L+9dRhBGYCP2PSe6H/qGQFfMW+uY7QF+NDcDAT5mX1lS8OVrGJxqM7Q+sY2pMLGo 5nR16LvM9g6W/ZnsVn0+BWg4CgaPMi+PMfMPxs/o9RG+/0d1AJx1aLSiHdP1pXog 8/Wg4GaaJ27S4Ers0JUmH7VDO+QkkLvAArstjk8l59r1XslWiBP5USebkxtgu6EQ ehAh0+oa432ALq8Rn1FK/X+pWFumbTVf8OPwR8YEjDbeTPIBCqg= =ewd9 -----END PGP SIGNATURE----- Merge tag 's390-6.6-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux Pull s390 updates from Heiko Carstens: - Add vfio-ap support to pass-through crypto devices to secure execution guests - Add API ordinal 6 support to zcrypt_ep11misc device drive, which is required to handle key generate and key derive (e.g. secure key to protected key) correctly - Add missing secure/has_secure sysfs files for the case where it is not possible to figure where a system has been booted from. Existing user space relies on that these files are always present - Fix DCSS block device driver list corruption, caused by incorrect error handling - Convert virt_to_pfn() and pfn_to_virt() from defines to static inline functions to enforce type checking - Cleanups, improvements, and minor fixes to the kernel mapping setup - Fix various virtual vs physical address confusions - Move pfault code to separate file, since it has nothing to do with regular fault handling - Move s390 documentation to Documentation/arch/ like it has been done for other architectures already - Add HAVE_FUNCTION_GRAPH_RETVAL support - Factor out the s390_hypfs filesystem and add a new config option for it. The filesystem is deprecated and as soon as all users are gone it can be removed some time in the not so near future - Remove support for old CEX2 and CEX3 crypto cards from zcrypt device driver - Add support for user-defined certificates: receive user-defined certificates with a diagnose call and provide them via 'cert_store' keyring to user space - Couple of other small fixes and improvements all over the place * tag 's390-6.6-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (66 commits) s390/pci: use builtin_misc_device macro to simplify the code s390/vfio-ap: make sure nib is shared KVM: s390: export kvm_s390_pv*_is_protected functions s390/uv: export uv_pin_shared for direct usage s390/vfio-ap: check for TAPQ response codes 0x35 and 0x36 s390/vfio-ap: handle queue state change in progress on reset s390/vfio-ap: use work struct to verify queue reset s390/vfio-ap: store entire AP queue status word with the queue object s390/vfio-ap: remove upper limit on wait for queue reset to complete s390/vfio-ap: allow deconfigured queue to be passed through to a guest s390/vfio-ap: wait for response code 05 to clear on queue reset s390/vfio-ap: clean up irq resources if possible s390/vfio-ap: no need to check the 'E' and 'I' bits in APQSW after TAPQ s390/ipl: refactor deprecated strncpy s390/ipl: fix virtual vs physical address confusion s390/zcrypt_ep11misc: support API ordinal 6 with empty pin-blob s390/paes: fix PKEY_TYPE_EP11_AES handling for secure keyblobs s390/pkey: fix PKEY_TYPE_EP11_AES handling for sysfs attributes s390/pkey: fix PKEY_TYPE_EP11_AES handling in PKEY_VERIFYKEY2 IOCTL s390/pkey: fix PKEY_TYPE_EP11_AES handling in PKEY_KBLOB2PROTK[23] ...
This commit is contained in:
commit
e5b7ca09e9
@ -553,7 +553,7 @@
|
||||
others).
|
||||
|
||||
ccw_timeout_log [S390]
|
||||
See Documentation/s390/common_io.rst for details.
|
||||
See Documentation/arch/s390/common_io.rst for details.
|
||||
|
||||
cgroup_disable= [KNL] Disable a particular controller or optional feature
|
||||
Format: {name of the controller(s) or feature(s) to disable}
|
||||
@ -598,7 +598,7 @@
|
||||
Setting checkreqprot to 1 is deprecated.
|
||||
|
||||
cio_ignore= [S390]
|
||||
See Documentation/s390/common_io.rst for details.
|
||||
See Documentation/arch/s390/common_io.rst for details.
|
||||
|
||||
clearcpuid=X[,X...] [X86]
|
||||
Disable CPUID feature X for the kernel. See
|
||||
|
@ -21,7 +21,7 @@ implementation.
|
||||
parisc/index
|
||||
../powerpc/index
|
||||
../riscv/index
|
||||
../s390/index
|
||||
s390/index
|
||||
sh/index
|
||||
sparc/index
|
||||
x86/index
|
||||
|
@ -116,7 +116,7 @@ Here are the installation steps in detail:
|
||||
as a 3270, not a 3215.
|
||||
|
||||
5. Run the 3270 configuration script config3270. It is
|
||||
distributed in this same directory, Documentation/s390, as
|
||||
distributed in this same directory, Documentation/arch/s390, as
|
||||
config3270.sh. Inspect the output script it produces,
|
||||
/tmp/mkdev3270, and then run that script. This will create the
|
||||
necessary character special device files and make the necessary
|
||||
@ -125,7 +125,7 @@ Here are the installation steps in detail:
|
||||
Then notify /sbin/init that /etc/inittab has changed, by issuing
|
||||
the telinit command with the q operand::
|
||||
|
||||
cd Documentation/s390
|
||||
cd Documentation/arch/s390
|
||||
sh config3270.sh
|
||||
sh /tmp/mkdev3270
|
||||
telinit q
|
@ -39,7 +39,7 @@ some of them are ESA/390 platform specific.
|
||||
|
||||
Note:
|
||||
In order to write a driver for S/390, you also need to look into the interface
|
||||
described in Documentation/s390/driver-model.rst.
|
||||
described in Documentation/arch/s390/driver-model.rst.
|
||||
|
||||
Note for porting drivers from 2.4:
|
||||
|
@ -136,5 +136,5 @@ debugfs entries
|
||||
|
||||
The level of logging can be changed to be more or less verbose by piping to
|
||||
/sys/kernel/debug/s390dbf/cio_*/level a number between 0 and 6; see the
|
||||
documentation on the S/390 debug feature (Documentation/s390/s390dbf.rst)
|
||||
documentation on the S/390 debug feature (Documentation/arch/s390/s390dbf.rst)
|
||||
for details.
|
@ -40,7 +40,7 @@ For example:
|
||||
Change the level of logging to be more or less verbose by piping
|
||||
a number between 0 and 6 to /sys/kernel/debug/s390dbf/pci_*/level. For
|
||||
details, see the documentation on the S/390 debug feature at
|
||||
Documentation/s390/s390dbf.rst.
|
||||
Documentation/arch/s390/s390dbf.rst.
|
||||
|
||||
Sysfs entries
|
||||
=============
|
@ -440,6 +440,6 @@ Reference
|
||||
1. ESA/s390 Principles of Operation manual (IBM Form. No. SA22-7832)
|
||||
2. ESA/390 Common I/O Device Commands manual (IBM Form. No. SA22-7204)
|
||||
3. https://en.wikipedia.org/wiki/Channel_I/O
|
||||
4. Documentation/s390/cds.rst
|
||||
4. Documentation/arch/s390/cds.rst
|
||||
5. Documentation/driver-api/vfio.rst
|
||||
6. Documentation/driver-api/vfio-mediated-device.rst
|
@ -27,7 +27,7 @@ not strictly considered I/O devices. They are considered here as well,
|
||||
although they are not the focus of this document.
|
||||
|
||||
Some additional information can also be found in the kernel source under
|
||||
Documentation/s390/driver-model.rst.
|
||||
Documentation/arch/s390/driver-model.rst.
|
||||
|
||||
The css bus
|
||||
===========
|
||||
@ -38,7 +38,7 @@ into several categories:
|
||||
* Standard I/O subchannels, for use by the system. They have a child
|
||||
device on the ccw bus and are described below.
|
||||
* I/O subchannels bound to the vfio-ccw driver. See
|
||||
Documentation/s390/vfio-ccw.rst.
|
||||
Documentation/arch/s390/vfio-ccw.rst.
|
||||
* Message subchannels. No Linux driver currently exists.
|
||||
* CHSC subchannels (at most one). The chsc subchannel driver can be used
|
||||
to send asynchronous chsc commands.
|
||||
|
@ -18605,7 +18605,7 @@ L: linux-s390@vger.kernel.org
|
||||
S: Supported
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux.git
|
||||
F: Documentation/driver-api/s390-drivers.rst
|
||||
F: Documentation/s390/
|
||||
F: Documentation/arch/s390/
|
||||
F: arch/s390/
|
||||
F: drivers/s390/
|
||||
F: drivers/watchdog/diag288_wdt.c
|
||||
@ -18666,7 +18666,7 @@ M: Niklas Schnelle <schnelle@linux.ibm.com>
|
||||
M: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
|
||||
L: linux-s390@vger.kernel.org
|
||||
S: Supported
|
||||
F: Documentation/s390/pci.rst
|
||||
F: Documentation/arch/s390/pci.rst
|
||||
F: arch/s390/pci/
|
||||
F: drivers/pci/hotplug/s390_pci_hpc.c
|
||||
|
||||
@ -18683,7 +18683,7 @@ M: Halil Pasic <pasic@linux.ibm.com>
|
||||
M: Jason Herne <jjherne@linux.ibm.com>
|
||||
L: linux-s390@vger.kernel.org
|
||||
S: Supported
|
||||
F: Documentation/s390/vfio-ap*
|
||||
F: Documentation/arch/s390/vfio-ap*
|
||||
F: drivers/s390/crypto/vfio_ap*
|
||||
|
||||
S390 VFIO-CCW DRIVER
|
||||
@ -18693,7 +18693,7 @@ R: Halil Pasic <pasic@linux.ibm.com>
|
||||
L: linux-s390@vger.kernel.org
|
||||
L: kvm@vger.kernel.org
|
||||
S: Supported
|
||||
F: Documentation/s390/vfio-ccw.rst
|
||||
F: Documentation/arch/s390/vfio-ccw.rst
|
||||
F: drivers/s390/cio/vfio_ccw*
|
||||
F: include/uapi/linux/vfio_ccw.h
|
||||
|
||||
|
@ -3,7 +3,7 @@ obj-y += kernel/
|
||||
obj-y += mm/
|
||||
obj-$(CONFIG_KVM) += kvm/
|
||||
obj-y += crypto/
|
||||
obj-$(CONFIG_S390_HYPFS_FS) += hypfs/
|
||||
obj-$(CONFIG_S390_HYPFS) += hypfs/
|
||||
obj-$(CONFIG_APPLDATA_BASE) += appldata/
|
||||
obj-y += net/
|
||||
obj-$(CONFIG_PCI) += pci/
|
||||
|
@ -174,6 +174,7 @@ config S390
|
||||
select HAVE_FTRACE_MCOUNT_RECORD
|
||||
select HAVE_FUNCTION_ARG_ACCESS_API
|
||||
select HAVE_FUNCTION_ERROR_INJECTION
|
||||
select HAVE_FUNCTION_GRAPH_RETVAL
|
||||
select HAVE_FUNCTION_GRAPH_TRACER
|
||||
select HAVE_FUNCTION_TRACER
|
||||
select HAVE_GCC_PLUGINS
|
||||
@ -512,6 +513,17 @@ config KEXEC_SIG
|
||||
verification for the corresponding kernel image type being
|
||||
loaded in order for this to work.
|
||||
|
||||
config CERT_STORE
|
||||
bool "Get user certificates via DIAG320"
|
||||
depends on KEYS
|
||||
select CRYPTO_LIB_SHA256
|
||||
help
|
||||
Enable this option if you want to access user-provided secure boot
|
||||
certificates via DIAG 0x320.
|
||||
|
||||
These certificates will be made available via the keyring named
|
||||
'cert_store'.
|
||||
|
||||
config KERNEL_NOBP
|
||||
def_bool n
|
||||
prompt "Enable modified branch prediction for the kernel by default"
|
||||
@ -743,9 +755,9 @@ config CRASH_DUMP
|
||||
Crash dump kernels are loaded in the main kernel with kexec-tools
|
||||
into a specially reserved region and then later executed after
|
||||
a crash by kdump/kexec.
|
||||
Refer to <file:Documentation/s390/zfcpdump.rst> for more details on this.
|
||||
Refer to <file:Documentation/arch/s390/zfcpdump.rst> for more details on this.
|
||||
This option also enables s390 zfcpdump.
|
||||
See also <file:Documentation/s390/zfcpdump.rst>
|
||||
See also <file:Documentation/arch/s390/zfcpdump.rst>
|
||||
|
||||
endmenu
|
||||
|
||||
@ -867,13 +879,24 @@ config APPLDATA_NET_SUM
|
||||
This can also be compiled as a module, which will be called
|
||||
appldata_net_sum.o.
|
||||
|
||||
config S390_HYPFS_FS
|
||||
config S390_HYPFS
|
||||
def_bool y
|
||||
prompt "s390 hypervisor information"
|
||||
help
|
||||
This provides several binary files at (debugfs)/s390_hypfs/ to
|
||||
provide accounting information in an s390 hypervisor environment.
|
||||
|
||||
config S390_HYPFS_FS
|
||||
def_bool n
|
||||
prompt "s390 hypervisor file system support"
|
||||
select SYS_HYPERVISOR
|
||||
depends on S390_HYPFS
|
||||
help
|
||||
This is a virtual file system intended to provide accounting
|
||||
information in an s390 hypervisor environment.
|
||||
information in an s390 hypervisor environment. This file system
|
||||
is deprecated and should not be used.
|
||||
|
||||
Say N if you are unsure.
|
||||
|
||||
source "arch/s390/kvm/Kconfig"
|
||||
|
||||
|
@ -119,7 +119,6 @@ export KBUILD_CFLAGS_DECOMPRESSOR
|
||||
OBJCOPYFLAGS := -O binary
|
||||
|
||||
libs-y += arch/s390/lib/
|
||||
drivers-y += drivers/s390/
|
||||
|
||||
boot := arch/s390/boot
|
||||
syscalls := arch/s390/kernel/syscalls
|
||||
|
@ -27,6 +27,7 @@ struct page *__bootdata_preserved(vmemmap);
|
||||
unsigned long __bootdata_preserved(vmemmap_size);
|
||||
unsigned long __bootdata_preserved(MODULES_VADDR);
|
||||
unsigned long __bootdata_preserved(MODULES_END);
|
||||
unsigned long __bootdata_preserved(max_mappable);
|
||||
unsigned long __bootdata(ident_map_size);
|
||||
|
||||
u64 __bootdata_preserved(stfle_fac_list[16]);
|
||||
@ -176,6 +177,7 @@ static unsigned long setup_kernel_memory_layout(void)
|
||||
unsigned long asce_limit;
|
||||
unsigned long rte_size;
|
||||
unsigned long pages;
|
||||
unsigned long vsize;
|
||||
unsigned long vmax;
|
||||
|
||||
pages = ident_map_size / PAGE_SIZE;
|
||||
@ -183,19 +185,19 @@ static unsigned long setup_kernel_memory_layout(void)
|
||||
vmemmap_size = SECTION_ALIGN_UP(pages) * sizeof(struct page);
|
||||
|
||||
/* choose kernel address space layout: 4 or 3 levels. */
|
||||
vmemmap_start = round_up(ident_map_size, _REGION3_SIZE);
|
||||
if (IS_ENABLED(CONFIG_KASAN) ||
|
||||
vmalloc_size > _REGION2_SIZE ||
|
||||
vmemmap_start + vmemmap_size + vmalloc_size + MODULES_LEN >
|
||||
_REGION2_SIZE) {
|
||||
vsize = round_up(ident_map_size, _REGION3_SIZE) + vmemmap_size +
|
||||
MODULES_LEN + MEMCPY_REAL_SIZE + ABS_LOWCORE_MAP_SIZE;
|
||||
vsize = size_add(vsize, vmalloc_size);
|
||||
if (IS_ENABLED(CONFIG_KASAN) || (vsize > _REGION2_SIZE)) {
|
||||
asce_limit = _REGION1_SIZE;
|
||||
rte_size = _REGION2_SIZE;
|
||||
} else {
|
||||
asce_limit = _REGION2_SIZE;
|
||||
rte_size = _REGION3_SIZE;
|
||||
}
|
||||
|
||||
/*
|
||||
* forcing modules and vmalloc area under the ultravisor
|
||||
* Forcing modules and vmalloc area under the ultravisor
|
||||
* secure storage limit, so that any vmalloc allocation
|
||||
* we do could be used to back secure guest storage.
|
||||
*/
|
||||
@ -204,7 +206,7 @@ static unsigned long setup_kernel_memory_layout(void)
|
||||
/* force vmalloc and modules below kasan shadow */
|
||||
vmax = min(vmax, KASAN_SHADOW_START);
|
||||
#endif
|
||||
__memcpy_real_area = round_down(vmax - PAGE_SIZE, PAGE_SIZE);
|
||||
__memcpy_real_area = round_down(vmax - MEMCPY_REAL_SIZE, PAGE_SIZE);
|
||||
__abs_lowcore = round_down(__memcpy_real_area - ABS_LOWCORE_MAP_SIZE,
|
||||
sizeof(struct lowcore));
|
||||
MODULES_END = round_down(__abs_lowcore, _SEGMENT_SIZE);
|
||||
@ -220,8 +222,9 @@ static unsigned long setup_kernel_memory_layout(void)
|
||||
pages = SECTION_ALIGN_UP(pages);
|
||||
/* keep vmemmap_start aligned to a top level region table entry */
|
||||
vmemmap_start = round_down(VMALLOC_START - pages * sizeof(struct page), rte_size);
|
||||
/* vmemmap_start is the future VMEM_MAX_PHYS, make sure it is within MAX_PHYSMEM */
|
||||
vmemmap_start = min(vmemmap_start, 1UL << MAX_PHYSMEM_BITS);
|
||||
/* maximum mappable address as seen by arch_get_mappable_range() */
|
||||
max_mappable = vmemmap_start;
|
||||
/* make sure identity map doesn't overlay with vmemmap */
|
||||
ident_map_size = min(ident_map_size, vmemmap_start);
|
||||
vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page);
|
||||
@ -286,8 +289,9 @@ void startup_kernel(void)
|
||||
|
||||
setup_lpp();
|
||||
safe_addr = mem_safe_offset();
|
||||
|
||||
/*
|
||||
* reserve decompressor memory together with decompression heap, buffer and
|
||||
* Reserve decompressor memory together with decompression heap, buffer and
|
||||
* memory which might be occupied by uncompressed kernel at default 1Mb
|
||||
* position (if KASLR is off or failed).
|
||||
*/
|
||||
|
@ -835,6 +835,7 @@ CONFIG_RCU_CPU_STALL_TIMEOUT=300
|
||||
# CONFIG_RCU_TRACE is not set
|
||||
CONFIG_LATENCYTOP=y
|
||||
CONFIG_BOOTTIME_TRACING=y
|
||||
CONFIG_FUNCTION_GRAPH_RETVAL=y
|
||||
CONFIG_FPROBE=y
|
||||
CONFIG_FUNCTION_PROFILER=y
|
||||
CONFIG_STACK_TRACER=y
|
||||
|
@ -787,6 +787,7 @@ CONFIG_RCU_REF_SCALE_TEST=m
|
||||
CONFIG_RCU_CPU_STALL_TIMEOUT=60
|
||||
CONFIG_LATENCYTOP=y
|
||||
CONFIG_BOOTTIME_TRACING=y
|
||||
CONFIG_FUNCTION_GRAPH_RETVAL=y
|
||||
CONFIG_FPROBE=y
|
||||
CONFIG_FUNCTION_PROFILER=y
|
||||
CONFIG_STACK_TRACER=y
|
||||
|
@ -35,7 +35,7 @@
|
||||
* and padding is also possible, the limits need to be generous.
|
||||
*/
|
||||
#define PAES_MIN_KEYSIZE 16
|
||||
#define PAES_MAX_KEYSIZE 320
|
||||
#define PAES_MAX_KEYSIZE MAXEP11AESKEYBLOBSIZE
|
||||
|
||||
static u8 *ctrblk;
|
||||
static DEFINE_MUTEX(ctrblk_lock);
|
||||
|
@ -3,7 +3,12 @@
|
||||
# Makefile for the linux hypfs filesystem routines.
|
||||
#
|
||||
|
||||
obj-$(CONFIG_S390_HYPFS_FS) += s390_hypfs.o
|
||||
obj-$(CONFIG_S390_HYPFS) += hypfs_dbfs.o
|
||||
obj-$(CONFIG_S390_HYPFS) += hypfs_diag.o
|
||||
obj-$(CONFIG_S390_HYPFS) += hypfs_diag0c.o
|
||||
obj-$(CONFIG_S390_HYPFS) += hypfs_sprp.o
|
||||
obj-$(CONFIG_S390_HYPFS) += hypfs_vm.o
|
||||
|
||||
s390_hypfs-objs := inode.o hypfs_diag.o hypfs_vm.o hypfs_dbfs.o hypfs_sprp.o
|
||||
s390_hypfs-objs += hypfs_diag0c.o
|
||||
obj-$(CONFIG_S390_HYPFS_FS) += hypfs_diag_fs.o
|
||||
obj-$(CONFIG_S390_HYPFS_FS) += hypfs_vm_fs.o
|
||||
obj-$(CONFIG_S390_HYPFS_FS) += inode.o
|
||||
|
@ -46,6 +46,15 @@ void hypfs_diag0c_exit(void);
|
||||
void hypfs_sprp_init(void);
|
||||
void hypfs_sprp_exit(void);
|
||||
|
||||
int __hypfs_fs_init(void);
|
||||
|
||||
static inline int hypfs_fs_init(void)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_S390_HYPFS_FS))
|
||||
return __hypfs_fs_init();
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* debugfs interface */
|
||||
struct hypfs_dbfs_file;
|
||||
|
||||
@ -69,7 +78,6 @@ struct hypfs_dbfs_file {
|
||||
struct dentry *dentry;
|
||||
};
|
||||
|
||||
extern void hypfs_dbfs_init(void);
|
||||
extern void hypfs_dbfs_exit(void);
|
||||
extern void hypfs_dbfs_create_file(struct hypfs_dbfs_file *df);
|
||||
extern void hypfs_dbfs_remove_file(struct hypfs_dbfs_file *df);
|
||||
|
@ -90,12 +90,33 @@ void hypfs_dbfs_remove_file(struct hypfs_dbfs_file *df)
|
||||
debugfs_remove(df->dentry);
|
||||
}
|
||||
|
||||
void hypfs_dbfs_init(void)
|
||||
static int __init hypfs_dbfs_init(void)
|
||||
{
|
||||
dbfs_dir = debugfs_create_dir("s390_hypfs", NULL);
|
||||
}
|
||||
int rc = -ENODATA;
|
||||
|
||||
void hypfs_dbfs_exit(void)
|
||||
{
|
||||
dbfs_dir = debugfs_create_dir("s390_hypfs", NULL);
|
||||
if (hypfs_diag_init())
|
||||
goto fail_dbfs_exit;
|
||||
if (hypfs_vm_init())
|
||||
goto fail_hypfs_diag_exit;
|
||||
hypfs_sprp_init();
|
||||
if (hypfs_diag0c_init())
|
||||
goto fail_hypfs_sprp_exit;
|
||||
rc = hypfs_fs_init();
|
||||
if (rc)
|
||||
goto fail_hypfs_diag0c_exit;
|
||||
return 0;
|
||||
|
||||
fail_hypfs_diag0c_exit:
|
||||
hypfs_diag0c_exit();
|
||||
fail_hypfs_sprp_exit:
|
||||
hypfs_sprp_exit();
|
||||
hypfs_vm_exit();
|
||||
fail_hypfs_diag_exit:
|
||||
hypfs_diag_exit();
|
||||
pr_err("Initialization of hypfs failed with rc=%i\n", rc);
|
||||
fail_dbfs_exit:
|
||||
debugfs_remove(dbfs_dir);
|
||||
return rc;
|
||||
}
|
||||
device_initcall(hypfs_dbfs_init)
|
||||
|
@ -18,188 +18,27 @@
|
||||
#include <linux/mm.h>
|
||||
#include <asm/diag.h>
|
||||
#include <asm/ebcdic.h>
|
||||
#include "hypfs_diag.h"
|
||||
#include "hypfs.h"
|
||||
|
||||
#define TMP_SIZE 64 /* size of temporary buffers */
|
||||
|
||||
#define DBFS_D204_HDR_VERSION 0
|
||||
|
||||
static char *diag224_cpu_names; /* diag 224 name table */
|
||||
static enum diag204_sc diag204_store_sc; /* used subcode for store */
|
||||
static enum diag204_format diag204_info_type; /* used diag 204 data format */
|
||||
|
||||
static void *diag204_buf; /* 4K aligned buffer for diag204 data */
|
||||
static void *diag204_buf_vmalloc; /* vmalloc pointer for diag204 data */
|
||||
static int diag204_buf_pages; /* number of pages for diag204 data */
|
||||
|
||||
static struct dentry *dbfs_d204_file;
|
||||
|
||||
/*
|
||||
* DIAG 204 member access functions.
|
||||
*
|
||||
* Since we have two different diag 204 data formats for old and new s390
|
||||
* machines, we do not access the structs directly, but use getter functions for
|
||||
* each struct member instead. This should make the code more readable.
|
||||
*/
|
||||
|
||||
/* Time information block */
|
||||
|
||||
static inline int info_blk_hdr__size(enum diag204_format type)
|
||||
enum diag204_format diag204_get_info_type(void)
|
||||
{
|
||||
if (type == DIAG204_INFO_SIMPLE)
|
||||
return sizeof(struct diag204_info_blk_hdr);
|
||||
else /* DIAG204_INFO_EXT */
|
||||
return sizeof(struct diag204_x_info_blk_hdr);
|
||||
return diag204_info_type;
|
||||
}
|
||||
|
||||
static inline __u8 info_blk_hdr__npar(enum diag204_format type, void *hdr)
|
||||
static void diag204_set_info_type(enum diag204_format type)
|
||||
{
|
||||
if (type == DIAG204_INFO_SIMPLE)
|
||||
return ((struct diag204_info_blk_hdr *)hdr)->npar;
|
||||
else /* DIAG204_INFO_EXT */
|
||||
return ((struct diag204_x_info_blk_hdr *)hdr)->npar;
|
||||
}
|
||||
|
||||
static inline __u8 info_blk_hdr__flags(enum diag204_format type, void *hdr)
|
||||
{
|
||||
if (type == DIAG204_INFO_SIMPLE)
|
||||
return ((struct diag204_info_blk_hdr *)hdr)->flags;
|
||||
else /* DIAG204_INFO_EXT */
|
||||
return ((struct diag204_x_info_blk_hdr *)hdr)->flags;
|
||||
}
|
||||
|
||||
/* Partition header */
|
||||
|
||||
static inline int part_hdr__size(enum diag204_format type)
|
||||
{
|
||||
if (type == DIAG204_INFO_SIMPLE)
|
||||
return sizeof(struct diag204_part_hdr);
|
||||
else /* DIAG204_INFO_EXT */
|
||||
return sizeof(struct diag204_x_part_hdr);
|
||||
}
|
||||
|
||||
static inline __u8 part_hdr__rcpus(enum diag204_format type, void *hdr)
|
||||
{
|
||||
if (type == DIAG204_INFO_SIMPLE)
|
||||
return ((struct diag204_part_hdr *)hdr)->cpus;
|
||||
else /* DIAG204_INFO_EXT */
|
||||
return ((struct diag204_x_part_hdr *)hdr)->rcpus;
|
||||
}
|
||||
|
||||
static inline void part_hdr__part_name(enum diag204_format type, void *hdr,
|
||||
char *name)
|
||||
{
|
||||
if (type == DIAG204_INFO_SIMPLE)
|
||||
memcpy(name, ((struct diag204_part_hdr *)hdr)->part_name,
|
||||
DIAG204_LPAR_NAME_LEN);
|
||||
else /* DIAG204_INFO_EXT */
|
||||
memcpy(name, ((struct diag204_x_part_hdr *)hdr)->part_name,
|
||||
DIAG204_LPAR_NAME_LEN);
|
||||
EBCASC(name, DIAG204_LPAR_NAME_LEN);
|
||||
name[DIAG204_LPAR_NAME_LEN] = 0;
|
||||
strim(name);
|
||||
}
|
||||
|
||||
/* CPU info block */
|
||||
|
||||
static inline int cpu_info__size(enum diag204_format type)
|
||||
{
|
||||
if (type == DIAG204_INFO_SIMPLE)
|
||||
return sizeof(struct diag204_cpu_info);
|
||||
else /* DIAG204_INFO_EXT */
|
||||
return sizeof(struct diag204_x_cpu_info);
|
||||
}
|
||||
|
||||
static inline __u8 cpu_info__ctidx(enum diag204_format type, void *hdr)
|
||||
{
|
||||
if (type == DIAG204_INFO_SIMPLE)
|
||||
return ((struct diag204_cpu_info *)hdr)->ctidx;
|
||||
else /* DIAG204_INFO_EXT */
|
||||
return ((struct diag204_x_cpu_info *)hdr)->ctidx;
|
||||
}
|
||||
|
||||
static inline __u16 cpu_info__cpu_addr(enum diag204_format type, void *hdr)
|
||||
{
|
||||
if (type == DIAG204_INFO_SIMPLE)
|
||||
return ((struct diag204_cpu_info *)hdr)->cpu_addr;
|
||||
else /* DIAG204_INFO_EXT */
|
||||
return ((struct diag204_x_cpu_info *)hdr)->cpu_addr;
|
||||
}
|
||||
|
||||
static inline __u64 cpu_info__acc_time(enum diag204_format type, void *hdr)
|
||||
{
|
||||
if (type == DIAG204_INFO_SIMPLE)
|
||||
return ((struct diag204_cpu_info *)hdr)->acc_time;
|
||||
else /* DIAG204_INFO_EXT */
|
||||
return ((struct diag204_x_cpu_info *)hdr)->acc_time;
|
||||
}
|
||||
|
||||
static inline __u64 cpu_info__lp_time(enum diag204_format type, void *hdr)
|
||||
{
|
||||
if (type == DIAG204_INFO_SIMPLE)
|
||||
return ((struct diag204_cpu_info *)hdr)->lp_time;
|
||||
else /* DIAG204_INFO_EXT */
|
||||
return ((struct diag204_x_cpu_info *)hdr)->lp_time;
|
||||
}
|
||||
|
||||
static inline __u64 cpu_info__online_time(enum diag204_format type, void *hdr)
|
||||
{
|
||||
if (type == DIAG204_INFO_SIMPLE)
|
||||
return 0; /* online_time not available in simple info */
|
||||
else /* DIAG204_INFO_EXT */
|
||||
return ((struct diag204_x_cpu_info *)hdr)->online_time;
|
||||
}
|
||||
|
||||
/* Physical header */
|
||||
|
||||
static inline int phys_hdr__size(enum diag204_format type)
|
||||
{
|
||||
if (type == DIAG204_INFO_SIMPLE)
|
||||
return sizeof(struct diag204_phys_hdr);
|
||||
else /* DIAG204_INFO_EXT */
|
||||
return sizeof(struct diag204_x_phys_hdr);
|
||||
}
|
||||
|
||||
static inline __u8 phys_hdr__cpus(enum diag204_format type, void *hdr)
|
||||
{
|
||||
if (type == DIAG204_INFO_SIMPLE)
|
||||
return ((struct diag204_phys_hdr *)hdr)->cpus;
|
||||
else /* DIAG204_INFO_EXT */
|
||||
return ((struct diag204_x_phys_hdr *)hdr)->cpus;
|
||||
}
|
||||
|
||||
/* Physical CPU info block */
|
||||
|
||||
static inline int phys_cpu__size(enum diag204_format type)
|
||||
{
|
||||
if (type == DIAG204_INFO_SIMPLE)
|
||||
return sizeof(struct diag204_phys_cpu);
|
||||
else /* DIAG204_INFO_EXT */
|
||||
return sizeof(struct diag204_x_phys_cpu);
|
||||
}
|
||||
|
||||
static inline __u16 phys_cpu__cpu_addr(enum diag204_format type, void *hdr)
|
||||
{
|
||||
if (type == DIAG204_INFO_SIMPLE)
|
||||
return ((struct diag204_phys_cpu *)hdr)->cpu_addr;
|
||||
else /* DIAG204_INFO_EXT */
|
||||
return ((struct diag204_x_phys_cpu *)hdr)->cpu_addr;
|
||||
}
|
||||
|
||||
static inline __u64 phys_cpu__mgm_time(enum diag204_format type, void *hdr)
|
||||
{
|
||||
if (type == DIAG204_INFO_SIMPLE)
|
||||
return ((struct diag204_phys_cpu *)hdr)->mgm_time;
|
||||
else /* DIAG204_INFO_EXT */
|
||||
return ((struct diag204_x_phys_cpu *)hdr)->mgm_time;
|
||||
}
|
||||
|
||||
static inline __u64 phys_cpu__ctidx(enum diag204_format type, void *hdr)
|
||||
{
|
||||
if (type == DIAG204_INFO_SIMPLE)
|
||||
return ((struct diag204_phys_cpu *)hdr)->ctidx;
|
||||
else /* DIAG204_INFO_EXT */
|
||||
return ((struct diag204_x_phys_cpu *)hdr)->ctidx;
|
||||
diag204_info_type = type;
|
||||
}
|
||||
|
||||
/* Diagnose 204 functions */
|
||||
@ -212,43 +51,11 @@ static inline __u64 phys_cpu__ctidx(enum diag204_format type, void *hdr)
|
||||
|
||||
static void diag204_free_buffer(void)
|
||||
{
|
||||
if (!diag204_buf)
|
||||
return;
|
||||
if (diag204_buf_vmalloc) {
|
||||
vfree(diag204_buf_vmalloc);
|
||||
diag204_buf_vmalloc = NULL;
|
||||
} else {
|
||||
free_pages((unsigned long) diag204_buf, 0);
|
||||
}
|
||||
vfree(diag204_buf);
|
||||
diag204_buf = NULL;
|
||||
}
|
||||
|
||||
static void *page_align_ptr(void *ptr)
|
||||
{
|
||||
return (void *) PAGE_ALIGN((unsigned long) ptr);
|
||||
}
|
||||
|
||||
static void *diag204_alloc_vbuf(int pages)
|
||||
{
|
||||
/* The buffer has to be page aligned! */
|
||||
diag204_buf_vmalloc = vmalloc(array_size(PAGE_SIZE, (pages + 1)));
|
||||
if (!diag204_buf_vmalloc)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
diag204_buf = page_align_ptr(diag204_buf_vmalloc);
|
||||
diag204_buf_pages = pages;
|
||||
return diag204_buf;
|
||||
}
|
||||
|
||||
static void *diag204_alloc_rbuf(void)
|
||||
{
|
||||
diag204_buf = (void*)__get_free_pages(GFP_KERNEL,0);
|
||||
if (!diag204_buf)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
diag204_buf_pages = 1;
|
||||
return diag204_buf;
|
||||
}
|
||||
|
||||
static void *diag204_get_buffer(enum diag204_format fmt, int *pages)
|
||||
void *diag204_get_buffer(enum diag204_format fmt, int *pages)
|
||||
{
|
||||
if (diag204_buf) {
|
||||
*pages = diag204_buf_pages;
|
||||
@ -256,15 +63,19 @@ static void *diag204_get_buffer(enum diag204_format fmt, int *pages)
|
||||
}
|
||||
if (fmt == DIAG204_INFO_SIMPLE) {
|
||||
*pages = 1;
|
||||
return diag204_alloc_rbuf();
|
||||
} else {/* DIAG204_INFO_EXT */
|
||||
*pages = diag204((unsigned long)DIAG204_SUBC_RSI |
|
||||
(unsigned long)DIAG204_INFO_EXT, 0, NULL);
|
||||
if (*pages <= 0)
|
||||
return ERR_PTR(-ENOSYS);
|
||||
else
|
||||
return diag204_alloc_vbuf(*pages);
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
}
|
||||
diag204_buf = __vmalloc_node(array_size(*pages, PAGE_SIZE),
|
||||
PAGE_SIZE, GFP_KERNEL, NUMA_NO_NODE,
|
||||
__builtin_return_address(0));
|
||||
if (!diag204_buf)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
diag204_buf_pages = *pages;
|
||||
return diag204_buf;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -291,13 +102,13 @@ static int diag204_probe(void)
|
||||
if (diag204((unsigned long)DIAG204_SUBC_STIB7 |
|
||||
(unsigned long)DIAG204_INFO_EXT, pages, buf) >= 0) {
|
||||
diag204_store_sc = DIAG204_SUBC_STIB7;
|
||||
diag204_info_type = DIAG204_INFO_EXT;
|
||||
diag204_set_info_type(DIAG204_INFO_EXT);
|
||||
goto out;
|
||||
}
|
||||
if (diag204((unsigned long)DIAG204_SUBC_STIB6 |
|
||||
(unsigned long)DIAG204_INFO_EXT, pages, buf) >= 0) {
|
||||
diag204_store_sc = DIAG204_SUBC_STIB6;
|
||||
diag204_info_type = DIAG204_INFO_EXT;
|
||||
diag204_set_info_type(DIAG204_INFO_EXT);
|
||||
goto out;
|
||||
}
|
||||
diag204_free_buffer();
|
||||
@ -313,10 +124,10 @@ static int diag204_probe(void)
|
||||
if (diag204((unsigned long)DIAG204_SUBC_STIB4 |
|
||||
(unsigned long)DIAG204_INFO_SIMPLE, pages, buf) >= 0) {
|
||||
diag204_store_sc = DIAG204_SUBC_STIB4;
|
||||
diag204_info_type = DIAG204_INFO_SIMPLE;
|
||||
diag204_set_info_type(DIAG204_INFO_SIMPLE);
|
||||
goto out;
|
||||
} else {
|
||||
rc = -ENOSYS;
|
||||
rc = -EOPNOTSUPP;
|
||||
goto fail_store;
|
||||
}
|
||||
out:
|
||||
@ -327,58 +138,13 @@ fail_alloc:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int diag204_do_store(void *buf, int pages)
|
||||
int diag204_store(void *buf, int pages)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = diag204((unsigned long) diag204_store_sc |
|
||||
(unsigned long) diag204_info_type, pages, buf);
|
||||
return rc < 0 ? -ENOSYS : 0;
|
||||
}
|
||||
|
||||
static void *diag204_store(void)
|
||||
{
|
||||
void *buf;
|
||||
int pages, rc;
|
||||
|
||||
buf = diag204_get_buffer(diag204_info_type, &pages);
|
||||
if (IS_ERR(buf))
|
||||
goto out;
|
||||
rc = diag204_do_store(buf, pages);
|
||||
if (rc)
|
||||
return ERR_PTR(rc);
|
||||
out:
|
||||
return buf;
|
||||
}
|
||||
|
||||
/* Diagnose 224 functions */
|
||||
|
||||
static int diag224_get_name_table(void)
|
||||
{
|
||||
/* memory must be below 2GB */
|
||||
diag224_cpu_names = (char *) __get_free_page(GFP_KERNEL | GFP_DMA);
|
||||
if (!diag224_cpu_names)
|
||||
return -ENOMEM;
|
||||
if (diag224(diag224_cpu_names)) {
|
||||
free_page((unsigned long) diag224_cpu_names);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
EBCASC(diag224_cpu_names + 16, (*diag224_cpu_names + 1) * 16);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void diag224_delete_name_table(void)
|
||||
{
|
||||
free_page((unsigned long) diag224_cpu_names);
|
||||
}
|
||||
|
||||
static int diag224_idx2name(int index, char *name)
|
||||
{
|
||||
memcpy(name, diag224_cpu_names + ((index + 1) * DIAG204_CPU_NAME_LEN),
|
||||
DIAG204_CPU_NAME_LEN);
|
||||
name[DIAG204_CPU_NAME_LEN] = 0;
|
||||
strim(name);
|
||||
return 0;
|
||||
rc = diag204((unsigned long)diag204_store_sc |
|
||||
(unsigned long)diag204_get_info_type(), pages, buf);
|
||||
return rc < 0 ? -EOPNOTSUPP : 0;
|
||||
}
|
||||
|
||||
struct dbfs_d204_hdr {
|
||||
@ -403,8 +169,8 @@ static int dbfs_d204_create(void **data, void **data_free_ptr, size_t *size)
|
||||
base = vzalloc(buf_size);
|
||||
if (!base)
|
||||
return -ENOMEM;
|
||||
d204 = page_align_ptr(base + sizeof(d204->hdr)) - sizeof(d204->hdr);
|
||||
rc = diag204_do_store(d204->buf, diag204_buf_pages);
|
||||
d204 = PTR_ALIGN(base + sizeof(d204->hdr), PAGE_SIZE) - sizeof(d204->hdr);
|
||||
rc = diag204_store(d204->buf, diag204_buf_pages);
|
||||
if (rc) {
|
||||
vfree(base);
|
||||
return rc;
|
||||
@ -433,176 +199,21 @@ __init int hypfs_diag_init(void)
|
||||
return -ENODATA;
|
||||
}
|
||||
|
||||
if (diag204_info_type == DIAG204_INFO_EXT)
|
||||
if (diag204_get_info_type() == DIAG204_INFO_EXT)
|
||||
hypfs_dbfs_create_file(&dbfs_file_d204);
|
||||
|
||||
if (MACHINE_IS_LPAR) {
|
||||
rc = diag224_get_name_table();
|
||||
if (rc) {
|
||||
pr_err("The hardware system does not provide all "
|
||||
"functions required by hypfs\n");
|
||||
debugfs_remove(dbfs_d204_file);
|
||||
return rc;
|
||||
}
|
||||
rc = hypfs_diag_fs_init();
|
||||
if (rc) {
|
||||
pr_err("The hardware system does not provide all functions required by hypfs\n");
|
||||
debugfs_remove(dbfs_d204_file);
|
||||
}
|
||||
return 0;
|
||||
return rc;
|
||||
}
|
||||
|
||||
void hypfs_diag_exit(void)
|
||||
{
|
||||
debugfs_remove(dbfs_d204_file);
|
||||
diag224_delete_name_table();
|
||||
hypfs_diag_fs_exit();
|
||||
diag204_free_buffer();
|
||||
hypfs_dbfs_remove_file(&dbfs_file_d204);
|
||||
}
|
||||
|
||||
/*
|
||||
* Functions to create the directory structure
|
||||
* *******************************************
|
||||
*/
|
||||
|
||||
static int hypfs_create_cpu_files(struct dentry *cpus_dir, void *cpu_info)
|
||||
{
|
||||
struct dentry *cpu_dir;
|
||||
char buffer[TMP_SIZE];
|
||||
void *rc;
|
||||
|
||||
snprintf(buffer, TMP_SIZE, "%d", cpu_info__cpu_addr(diag204_info_type,
|
||||
cpu_info));
|
||||
cpu_dir = hypfs_mkdir(cpus_dir, buffer);
|
||||
rc = hypfs_create_u64(cpu_dir, "mgmtime",
|
||||
cpu_info__acc_time(diag204_info_type, cpu_info) -
|
||||
cpu_info__lp_time(diag204_info_type, cpu_info));
|
||||
if (IS_ERR(rc))
|
||||
return PTR_ERR(rc);
|
||||
rc = hypfs_create_u64(cpu_dir, "cputime",
|
||||
cpu_info__lp_time(diag204_info_type, cpu_info));
|
||||
if (IS_ERR(rc))
|
||||
return PTR_ERR(rc);
|
||||
if (diag204_info_type == DIAG204_INFO_EXT) {
|
||||
rc = hypfs_create_u64(cpu_dir, "onlinetime",
|
||||
cpu_info__online_time(diag204_info_type,
|
||||
cpu_info));
|
||||
if (IS_ERR(rc))
|
||||
return PTR_ERR(rc);
|
||||
}
|
||||
diag224_idx2name(cpu_info__ctidx(diag204_info_type, cpu_info), buffer);
|
||||
rc = hypfs_create_str(cpu_dir, "type", buffer);
|
||||
return PTR_ERR_OR_ZERO(rc);
|
||||
}
|
||||
|
||||
static void *hypfs_create_lpar_files(struct dentry *systems_dir, void *part_hdr)
|
||||
{
|
||||
struct dentry *cpus_dir;
|
||||
struct dentry *lpar_dir;
|
||||
char lpar_name[DIAG204_LPAR_NAME_LEN + 1];
|
||||
void *cpu_info;
|
||||
int i;
|
||||
|
||||
part_hdr__part_name(diag204_info_type, part_hdr, lpar_name);
|
||||
lpar_name[DIAG204_LPAR_NAME_LEN] = 0;
|
||||
lpar_dir = hypfs_mkdir(systems_dir, lpar_name);
|
||||
if (IS_ERR(lpar_dir))
|
||||
return lpar_dir;
|
||||
cpus_dir = hypfs_mkdir(lpar_dir, "cpus");
|
||||
if (IS_ERR(cpus_dir))
|
||||
return cpus_dir;
|
||||
cpu_info = part_hdr + part_hdr__size(diag204_info_type);
|
||||
for (i = 0; i < part_hdr__rcpus(diag204_info_type, part_hdr); i++) {
|
||||
int rc;
|
||||
rc = hypfs_create_cpu_files(cpus_dir, cpu_info);
|
||||
if (rc)
|
||||
return ERR_PTR(rc);
|
||||
cpu_info += cpu_info__size(diag204_info_type);
|
||||
}
|
||||
return cpu_info;
|
||||
}
|
||||
|
||||
static int hypfs_create_phys_cpu_files(struct dentry *cpus_dir, void *cpu_info)
|
||||
{
|
||||
struct dentry *cpu_dir;
|
||||
char buffer[TMP_SIZE];
|
||||
void *rc;
|
||||
|
||||
snprintf(buffer, TMP_SIZE, "%i", phys_cpu__cpu_addr(diag204_info_type,
|
||||
cpu_info));
|
||||
cpu_dir = hypfs_mkdir(cpus_dir, buffer);
|
||||
if (IS_ERR(cpu_dir))
|
||||
return PTR_ERR(cpu_dir);
|
||||
rc = hypfs_create_u64(cpu_dir, "mgmtime",
|
||||
phys_cpu__mgm_time(diag204_info_type, cpu_info));
|
||||
if (IS_ERR(rc))
|
||||
return PTR_ERR(rc);
|
||||
diag224_idx2name(phys_cpu__ctidx(diag204_info_type, cpu_info), buffer);
|
||||
rc = hypfs_create_str(cpu_dir, "type", buffer);
|
||||
return PTR_ERR_OR_ZERO(rc);
|
||||
}
|
||||
|
||||
static void *hypfs_create_phys_files(struct dentry *parent_dir, void *phys_hdr)
|
||||
{
|
||||
int i;
|
||||
void *cpu_info;
|
||||
struct dentry *cpus_dir;
|
||||
|
||||
cpus_dir = hypfs_mkdir(parent_dir, "cpus");
|
||||
if (IS_ERR(cpus_dir))
|
||||
return cpus_dir;
|
||||
cpu_info = phys_hdr + phys_hdr__size(diag204_info_type);
|
||||
for (i = 0; i < phys_hdr__cpus(diag204_info_type, phys_hdr); i++) {
|
||||
int rc;
|
||||
rc = hypfs_create_phys_cpu_files(cpus_dir, cpu_info);
|
||||
if (rc)
|
||||
return ERR_PTR(rc);
|
||||
cpu_info += phys_cpu__size(diag204_info_type);
|
||||
}
|
||||
return cpu_info;
|
||||
}
|
||||
|
||||
int hypfs_diag_create_files(struct dentry *root)
|
||||
{
|
||||
struct dentry *systems_dir, *hyp_dir;
|
||||
void *time_hdr, *part_hdr;
|
||||
int i, rc;
|
||||
void *buffer, *ptr;
|
||||
|
||||
buffer = diag204_store();
|
||||
if (IS_ERR(buffer))
|
||||
return PTR_ERR(buffer);
|
||||
|
||||
systems_dir = hypfs_mkdir(root, "systems");
|
||||
if (IS_ERR(systems_dir)) {
|
||||
rc = PTR_ERR(systems_dir);
|
||||
goto err_out;
|
||||
}
|
||||
time_hdr = (struct x_info_blk_hdr *)buffer;
|
||||
part_hdr = time_hdr + info_blk_hdr__size(diag204_info_type);
|
||||
for (i = 0; i < info_blk_hdr__npar(diag204_info_type, time_hdr); i++) {
|
||||
part_hdr = hypfs_create_lpar_files(systems_dir, part_hdr);
|
||||
if (IS_ERR(part_hdr)) {
|
||||
rc = PTR_ERR(part_hdr);
|
||||
goto err_out;
|
||||
}
|
||||
}
|
||||
if (info_blk_hdr__flags(diag204_info_type, time_hdr) &
|
||||
DIAG204_LPAR_PHYS_FLG) {
|
||||
ptr = hypfs_create_phys_files(root, part_hdr);
|
||||
if (IS_ERR(ptr)) {
|
||||
rc = PTR_ERR(ptr);
|
||||
goto err_out;
|
||||
}
|
||||
}
|
||||
hyp_dir = hypfs_mkdir(root, "hyp");
|
||||
if (IS_ERR(hyp_dir)) {
|
||||
rc = PTR_ERR(hyp_dir);
|
||||
goto err_out;
|
||||
}
|
||||
ptr = hypfs_create_str(hyp_dir, "type", "LPAR Hypervisor");
|
||||
if (IS_ERR(ptr)) {
|
||||
rc = PTR_ERR(ptr);
|
||||
goto err_out;
|
||||
}
|
||||
rc = 0;
|
||||
|
||||
err_out:
|
||||
return rc;
|
||||
}
|
||||
|
35
arch/s390/hypfs/hypfs_diag.h
Normal file
35
arch/s390/hypfs/hypfs_diag.h
Normal file
@ -0,0 +1,35 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Hypervisor filesystem for Linux on s390. Diag 204 and 224
|
||||
* implementation.
|
||||
*
|
||||
* Copyright IBM Corp. 2006, 2008
|
||||
* Author(s): Michael Holzheu <holzheu@de.ibm.com>
|
||||
*/
|
||||
|
||||
#ifndef _S390_HYPFS_DIAG_H_
|
||||
#define _S390_HYPFS_DIAG_H_
|
||||
|
||||
#include <asm/diag.h>
|
||||
|
||||
enum diag204_format diag204_get_info_type(void);
|
||||
void *diag204_get_buffer(enum diag204_format fmt, int *pages);
|
||||
int diag204_store(void *buf, int pages);
|
||||
|
||||
int __hypfs_diag_fs_init(void);
|
||||
void __hypfs_diag_fs_exit(void);
|
||||
|
||||
static inline int hypfs_diag_fs_init(void)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_S390_HYPFS_FS))
|
||||
return __hypfs_diag_fs_init();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void hypfs_diag_fs_exit(void)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_S390_HYPFS_FS))
|
||||
__hypfs_diag_fs_exit();
|
||||
}
|
||||
|
||||
#endif /* _S390_HYPFS_DIAG_H_ */
|
393
arch/s390/hypfs/hypfs_diag_fs.c
Normal file
393
arch/s390/hypfs/hypfs_diag_fs.c
Normal file
@ -0,0 +1,393 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Hypervisor filesystem for Linux on s390. Diag 204 and 224
|
||||
* implementation.
|
||||
*
|
||||
* Copyright IBM Corp. 2006, 2008
|
||||
* Author(s): Michael Holzheu <holzheu@de.ibm.com>
|
||||
*/
|
||||
|
||||
#define KMSG_COMPONENT "hypfs"
|
||||
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/mm.h>
|
||||
#include <asm/diag.h>
|
||||
#include <asm/ebcdic.h>
|
||||
#include "hypfs_diag.h"
|
||||
#include "hypfs.h"
|
||||
|
||||
#define TMP_SIZE 64 /* size of temporary buffers */
|
||||
|
||||
static char *diag224_cpu_names; /* diag 224 name table */
|
||||
static int diag224_idx2name(int index, char *name);
|
||||
|
||||
/*
|
||||
* DIAG 204 member access functions.
|
||||
*
|
||||
* Since we have two different diag 204 data formats for old and new s390
|
||||
* machines, we do not access the structs directly, but use getter functions for
|
||||
* each struct member instead. This should make the code more readable.
|
||||
*/
|
||||
|
||||
/* Time information block */
|
||||
|
||||
static inline int info_blk_hdr__size(enum diag204_format type)
|
||||
{
|
||||
if (type == DIAG204_INFO_SIMPLE)
|
||||
return sizeof(struct diag204_info_blk_hdr);
|
||||
else /* DIAG204_INFO_EXT */
|
||||
return sizeof(struct diag204_x_info_blk_hdr);
|
||||
}
|
||||
|
||||
static inline __u8 info_blk_hdr__npar(enum diag204_format type, void *hdr)
|
||||
{
|
||||
if (type == DIAG204_INFO_SIMPLE)
|
||||
return ((struct diag204_info_blk_hdr *)hdr)->npar;
|
||||
else /* DIAG204_INFO_EXT */
|
||||
return ((struct diag204_x_info_blk_hdr *)hdr)->npar;
|
||||
}
|
||||
|
||||
static inline __u8 info_blk_hdr__flags(enum diag204_format type, void *hdr)
|
||||
{
|
||||
if (type == DIAG204_INFO_SIMPLE)
|
||||
return ((struct diag204_info_blk_hdr *)hdr)->flags;
|
||||
else /* DIAG204_INFO_EXT */
|
||||
return ((struct diag204_x_info_blk_hdr *)hdr)->flags;
|
||||
}
|
||||
|
||||
/* Partition header */
|
||||
|
||||
static inline int part_hdr__size(enum diag204_format type)
|
||||
{
|
||||
if (type == DIAG204_INFO_SIMPLE)
|
||||
return sizeof(struct diag204_part_hdr);
|
||||
else /* DIAG204_INFO_EXT */
|
||||
return sizeof(struct diag204_x_part_hdr);
|
||||
}
|
||||
|
||||
static inline __u8 part_hdr__rcpus(enum diag204_format type, void *hdr)
|
||||
{
|
||||
if (type == DIAG204_INFO_SIMPLE)
|
||||
return ((struct diag204_part_hdr *)hdr)->cpus;
|
||||
else /* DIAG204_INFO_EXT */
|
||||
return ((struct diag204_x_part_hdr *)hdr)->rcpus;
|
||||
}
|
||||
|
||||
static inline void part_hdr__part_name(enum diag204_format type, void *hdr,
|
||||
char *name)
|
||||
{
|
||||
if (type == DIAG204_INFO_SIMPLE)
|
||||
memcpy(name, ((struct diag204_part_hdr *)hdr)->part_name,
|
||||
DIAG204_LPAR_NAME_LEN);
|
||||
else /* DIAG204_INFO_EXT */
|
||||
memcpy(name, ((struct diag204_x_part_hdr *)hdr)->part_name,
|
||||
DIAG204_LPAR_NAME_LEN);
|
||||
EBCASC(name, DIAG204_LPAR_NAME_LEN);
|
||||
name[DIAG204_LPAR_NAME_LEN] = 0;
|
||||
strim(name);
|
||||
}
|
||||
|
||||
/* CPU info block */
|
||||
|
||||
static inline int cpu_info__size(enum diag204_format type)
|
||||
{
|
||||
if (type == DIAG204_INFO_SIMPLE)
|
||||
return sizeof(struct diag204_cpu_info);
|
||||
else /* DIAG204_INFO_EXT */
|
||||
return sizeof(struct diag204_x_cpu_info);
|
||||
}
|
||||
|
||||
static inline __u8 cpu_info__ctidx(enum diag204_format type, void *hdr)
|
||||
{
|
||||
if (type == DIAG204_INFO_SIMPLE)
|
||||
return ((struct diag204_cpu_info *)hdr)->ctidx;
|
||||
else /* DIAG204_INFO_EXT */
|
||||
return ((struct diag204_x_cpu_info *)hdr)->ctidx;
|
||||
}
|
||||
|
||||
static inline __u16 cpu_info__cpu_addr(enum diag204_format type, void *hdr)
|
||||
{
|
||||
if (type == DIAG204_INFO_SIMPLE)
|
||||
return ((struct diag204_cpu_info *)hdr)->cpu_addr;
|
||||
else /* DIAG204_INFO_EXT */
|
||||
return ((struct diag204_x_cpu_info *)hdr)->cpu_addr;
|
||||
}
|
||||
|
||||
static inline __u64 cpu_info__acc_time(enum diag204_format type, void *hdr)
|
||||
{
|
||||
if (type == DIAG204_INFO_SIMPLE)
|
||||
return ((struct diag204_cpu_info *)hdr)->acc_time;
|
||||
else /* DIAG204_INFO_EXT */
|
||||
return ((struct diag204_x_cpu_info *)hdr)->acc_time;
|
||||
}
|
||||
|
||||
static inline __u64 cpu_info__lp_time(enum diag204_format type, void *hdr)
|
||||
{
|
||||
if (type == DIAG204_INFO_SIMPLE)
|
||||
return ((struct diag204_cpu_info *)hdr)->lp_time;
|
||||
else /* DIAG204_INFO_EXT */
|
||||
return ((struct diag204_x_cpu_info *)hdr)->lp_time;
|
||||
}
|
||||
|
||||
static inline __u64 cpu_info__online_time(enum diag204_format type, void *hdr)
|
||||
{
|
||||
if (type == DIAG204_INFO_SIMPLE)
|
||||
return 0; /* online_time not available in simple info */
|
||||
else /* DIAG204_INFO_EXT */
|
||||
return ((struct diag204_x_cpu_info *)hdr)->online_time;
|
||||
}
|
||||
|
||||
/* Physical header */
|
||||
|
||||
static inline int phys_hdr__size(enum diag204_format type)
|
||||
{
|
||||
if (type == DIAG204_INFO_SIMPLE)
|
||||
return sizeof(struct diag204_phys_hdr);
|
||||
else /* DIAG204_INFO_EXT */
|
||||
return sizeof(struct diag204_x_phys_hdr);
|
||||
}
|
||||
|
||||
static inline __u8 phys_hdr__cpus(enum diag204_format type, void *hdr)
|
||||
{
|
||||
if (type == DIAG204_INFO_SIMPLE)
|
||||
return ((struct diag204_phys_hdr *)hdr)->cpus;
|
||||
else /* DIAG204_INFO_EXT */
|
||||
return ((struct diag204_x_phys_hdr *)hdr)->cpus;
|
||||
}
|
||||
|
||||
/* Physical CPU info block */
|
||||
|
||||
static inline int phys_cpu__size(enum diag204_format type)
|
||||
{
|
||||
if (type == DIAG204_INFO_SIMPLE)
|
||||
return sizeof(struct diag204_phys_cpu);
|
||||
else /* DIAG204_INFO_EXT */
|
||||
return sizeof(struct diag204_x_phys_cpu);
|
||||
}
|
||||
|
||||
static inline __u16 phys_cpu__cpu_addr(enum diag204_format type, void *hdr)
|
||||
{
|
||||
if (type == DIAG204_INFO_SIMPLE)
|
||||
return ((struct diag204_phys_cpu *)hdr)->cpu_addr;
|
||||
else /* DIAG204_INFO_EXT */
|
||||
return ((struct diag204_x_phys_cpu *)hdr)->cpu_addr;
|
||||
}
|
||||
|
||||
static inline __u64 phys_cpu__mgm_time(enum diag204_format type, void *hdr)
|
||||
{
|
||||
if (type == DIAG204_INFO_SIMPLE)
|
||||
return ((struct diag204_phys_cpu *)hdr)->mgm_time;
|
||||
else /* DIAG204_INFO_EXT */
|
||||
return ((struct diag204_x_phys_cpu *)hdr)->mgm_time;
|
||||
}
|
||||
|
||||
static inline __u64 phys_cpu__ctidx(enum diag204_format type, void *hdr)
|
||||
{
|
||||
if (type == DIAG204_INFO_SIMPLE)
|
||||
return ((struct diag204_phys_cpu *)hdr)->ctidx;
|
||||
else /* DIAG204_INFO_EXT */
|
||||
return ((struct diag204_x_phys_cpu *)hdr)->ctidx;
|
||||
}
|
||||
|
||||
/*
|
||||
* Functions to create the directory structure
|
||||
* *******************************************
|
||||
*/
|
||||
|
||||
static int hypfs_create_cpu_files(struct dentry *cpus_dir, void *cpu_info)
|
||||
{
|
||||
struct dentry *cpu_dir;
|
||||
char buffer[TMP_SIZE];
|
||||
void *rc;
|
||||
|
||||
snprintf(buffer, TMP_SIZE, "%d", cpu_info__cpu_addr(diag204_get_info_type(),
|
||||
cpu_info));
|
||||
cpu_dir = hypfs_mkdir(cpus_dir, buffer);
|
||||
rc = hypfs_create_u64(cpu_dir, "mgmtime",
|
||||
cpu_info__acc_time(diag204_get_info_type(), cpu_info) -
|
||||
cpu_info__lp_time(diag204_get_info_type(), cpu_info));
|
||||
if (IS_ERR(rc))
|
||||
return PTR_ERR(rc);
|
||||
rc = hypfs_create_u64(cpu_dir, "cputime",
|
||||
cpu_info__lp_time(diag204_get_info_type(), cpu_info));
|
||||
if (IS_ERR(rc))
|
||||
return PTR_ERR(rc);
|
||||
if (diag204_get_info_type() == DIAG204_INFO_EXT) {
|
||||
rc = hypfs_create_u64(cpu_dir, "onlinetime",
|
||||
cpu_info__online_time(diag204_get_info_type(),
|
||||
cpu_info));
|
||||
if (IS_ERR(rc))
|
||||
return PTR_ERR(rc);
|
||||
}
|
||||
diag224_idx2name(cpu_info__ctidx(diag204_get_info_type(), cpu_info), buffer);
|
||||
rc = hypfs_create_str(cpu_dir, "type", buffer);
|
||||
return PTR_ERR_OR_ZERO(rc);
|
||||
}
|
||||
|
||||
static void *hypfs_create_lpar_files(struct dentry *systems_dir, void *part_hdr)
|
||||
{
|
||||
struct dentry *cpus_dir;
|
||||
struct dentry *lpar_dir;
|
||||
char lpar_name[DIAG204_LPAR_NAME_LEN + 1];
|
||||
void *cpu_info;
|
||||
int i;
|
||||
|
||||
part_hdr__part_name(diag204_get_info_type(), part_hdr, lpar_name);
|
||||
lpar_name[DIAG204_LPAR_NAME_LEN] = 0;
|
||||
lpar_dir = hypfs_mkdir(systems_dir, lpar_name);
|
||||
if (IS_ERR(lpar_dir))
|
||||
return lpar_dir;
|
||||
cpus_dir = hypfs_mkdir(lpar_dir, "cpus");
|
||||
if (IS_ERR(cpus_dir))
|
||||
return cpus_dir;
|
||||
cpu_info = part_hdr + part_hdr__size(diag204_get_info_type());
|
||||
for (i = 0; i < part_hdr__rcpus(diag204_get_info_type(), part_hdr); i++) {
|
||||
int rc;
|
||||
|
||||
rc = hypfs_create_cpu_files(cpus_dir, cpu_info);
|
||||
if (rc)
|
||||
return ERR_PTR(rc);
|
||||
cpu_info += cpu_info__size(diag204_get_info_type());
|
||||
}
|
||||
return cpu_info;
|
||||
}
|
||||
|
||||
static int hypfs_create_phys_cpu_files(struct dentry *cpus_dir, void *cpu_info)
|
||||
{
|
||||
struct dentry *cpu_dir;
|
||||
char buffer[TMP_SIZE];
|
||||
void *rc;
|
||||
|
||||
snprintf(buffer, TMP_SIZE, "%i", phys_cpu__cpu_addr(diag204_get_info_type(),
|
||||
cpu_info));
|
||||
cpu_dir = hypfs_mkdir(cpus_dir, buffer);
|
||||
if (IS_ERR(cpu_dir))
|
||||
return PTR_ERR(cpu_dir);
|
||||
rc = hypfs_create_u64(cpu_dir, "mgmtime",
|
||||
phys_cpu__mgm_time(diag204_get_info_type(), cpu_info));
|
||||
if (IS_ERR(rc))
|
||||
return PTR_ERR(rc);
|
||||
diag224_idx2name(phys_cpu__ctidx(diag204_get_info_type(), cpu_info), buffer);
|
||||
rc = hypfs_create_str(cpu_dir, "type", buffer);
|
||||
return PTR_ERR_OR_ZERO(rc);
|
||||
}
|
||||
|
||||
static void *hypfs_create_phys_files(struct dentry *parent_dir, void *phys_hdr)
|
||||
{
|
||||
int i;
|
||||
void *cpu_info;
|
||||
struct dentry *cpus_dir;
|
||||
|
||||
cpus_dir = hypfs_mkdir(parent_dir, "cpus");
|
||||
if (IS_ERR(cpus_dir))
|
||||
return cpus_dir;
|
||||
cpu_info = phys_hdr + phys_hdr__size(diag204_get_info_type());
|
||||
for (i = 0; i < phys_hdr__cpus(diag204_get_info_type(), phys_hdr); i++) {
|
||||
int rc;
|
||||
|
||||
rc = hypfs_create_phys_cpu_files(cpus_dir, cpu_info);
|
||||
if (rc)
|
||||
return ERR_PTR(rc);
|
||||
cpu_info += phys_cpu__size(diag204_get_info_type());
|
||||
}
|
||||
return cpu_info;
|
||||
}
|
||||
|
||||
int hypfs_diag_create_files(struct dentry *root)
|
||||
{
|
||||
struct dentry *systems_dir, *hyp_dir;
|
||||
void *time_hdr, *part_hdr;
|
||||
void *buffer, *ptr;
|
||||
int i, rc, pages;
|
||||
|
||||
buffer = diag204_get_buffer(diag204_get_info_type(), &pages);
|
||||
if (IS_ERR(buffer))
|
||||
return PTR_ERR(buffer);
|
||||
rc = diag204_store(buffer, pages);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
systems_dir = hypfs_mkdir(root, "systems");
|
||||
if (IS_ERR(systems_dir)) {
|
||||
rc = PTR_ERR(systems_dir);
|
||||
goto err_out;
|
||||
}
|
||||
time_hdr = (struct x_info_blk_hdr *)buffer;
|
||||
part_hdr = time_hdr + info_blk_hdr__size(diag204_get_info_type());
|
||||
for (i = 0; i < info_blk_hdr__npar(diag204_get_info_type(), time_hdr); i++) {
|
||||
part_hdr = hypfs_create_lpar_files(systems_dir, part_hdr);
|
||||
if (IS_ERR(part_hdr)) {
|
||||
rc = PTR_ERR(part_hdr);
|
||||
goto err_out;
|
||||
}
|
||||
}
|
||||
if (info_blk_hdr__flags(diag204_get_info_type(), time_hdr) &
|
||||
DIAG204_LPAR_PHYS_FLG) {
|
||||
ptr = hypfs_create_phys_files(root, part_hdr);
|
||||
if (IS_ERR(ptr)) {
|
||||
rc = PTR_ERR(ptr);
|
||||
goto err_out;
|
||||
}
|
||||
}
|
||||
hyp_dir = hypfs_mkdir(root, "hyp");
|
||||
if (IS_ERR(hyp_dir)) {
|
||||
rc = PTR_ERR(hyp_dir);
|
||||
goto err_out;
|
||||
}
|
||||
ptr = hypfs_create_str(hyp_dir, "type", "LPAR Hypervisor");
|
||||
if (IS_ERR(ptr)) {
|
||||
rc = PTR_ERR(ptr);
|
||||
goto err_out;
|
||||
}
|
||||
rc = 0;
|
||||
|
||||
err_out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Diagnose 224 functions */
|
||||
|
||||
static int diag224_idx2name(int index, char *name)
|
||||
{
|
||||
memcpy(name, diag224_cpu_names + ((index + 1) * DIAG204_CPU_NAME_LEN),
|
||||
DIAG204_CPU_NAME_LEN);
|
||||
name[DIAG204_CPU_NAME_LEN] = 0;
|
||||
strim(name);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int diag224_get_name_table(void)
|
||||
{
|
||||
/* memory must be below 2GB */
|
||||
diag224_cpu_names = (char *)__get_free_page(GFP_KERNEL | GFP_DMA);
|
||||
if (!diag224_cpu_names)
|
||||
return -ENOMEM;
|
||||
if (diag224(diag224_cpu_names)) {
|
||||
free_page((unsigned long)diag224_cpu_names);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
EBCASC(diag224_cpu_names + 16, (*diag224_cpu_names + 1) * 16);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void diag224_delete_name_table(void)
|
||||
{
|
||||
free_page((unsigned long)diag224_cpu_names);
|
||||
}
|
||||
|
||||
int __init __hypfs_diag_fs_init(void)
|
||||
{
|
||||
if (MACHINE_IS_LPAR)
|
||||
return diag224_get_name_table();
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __hypfs_diag_fs_exit(void)
|
||||
{
|
||||
diag224_delete_name_table();
|
||||
}
|
@ -14,47 +14,15 @@
|
||||
#include <asm/diag.h>
|
||||
#include <asm/ebcdic.h>
|
||||
#include <asm/timex.h>
|
||||
#include "hypfs_vm.h"
|
||||
#include "hypfs.h"
|
||||
|
||||
#define NAME_LEN 8
|
||||
#define DBFS_D2FC_HDR_VERSION 0
|
||||
|
||||
static char local_guest[] = " ";
|
||||
static char all_guests[] = "* ";
|
||||
static char *all_groups = all_guests;
|
||||
static char *guest_query;
|
||||
|
||||
struct diag2fc_data {
|
||||
__u32 version;
|
||||
__u32 flags;
|
||||
__u64 used_cpu;
|
||||
__u64 el_time;
|
||||
__u64 mem_min_kb;
|
||||
__u64 mem_max_kb;
|
||||
__u64 mem_share_kb;
|
||||
__u64 mem_used_kb;
|
||||
__u32 pcpus;
|
||||
__u32 lcpus;
|
||||
__u32 vcpus;
|
||||
__u32 ocpus;
|
||||
__u32 cpu_max;
|
||||
__u32 cpu_shares;
|
||||
__u32 cpu_use_samp;
|
||||
__u32 cpu_delay_samp;
|
||||
__u32 page_wait_samp;
|
||||
__u32 idle_samp;
|
||||
__u32 other_samp;
|
||||
__u32 total_samp;
|
||||
char guest_name[NAME_LEN];
|
||||
};
|
||||
|
||||
struct diag2fc_parm_list {
|
||||
char userid[NAME_LEN];
|
||||
char aci_grp[NAME_LEN];
|
||||
__u64 addr;
|
||||
__u32 size;
|
||||
__u32 fmt;
|
||||
};
|
||||
char *diag2fc_guest_query;
|
||||
|
||||
static int diag2fc(int size, char* query, void *addr)
|
||||
{
|
||||
@ -62,10 +30,10 @@ static int diag2fc(int size, char* query, void *addr)
|
||||
unsigned long rc;
|
||||
struct diag2fc_parm_list parm_list;
|
||||
|
||||
memcpy(parm_list.userid, query, NAME_LEN);
|
||||
ASCEBC(parm_list.userid, NAME_LEN);
|
||||
memcpy(parm_list.aci_grp, all_groups, NAME_LEN);
|
||||
ASCEBC(parm_list.aci_grp, NAME_LEN);
|
||||
memcpy(parm_list.userid, query, DIAG2FC_NAME_LEN);
|
||||
ASCEBC(parm_list.userid, DIAG2FC_NAME_LEN);
|
||||
memcpy(parm_list.aci_grp, all_groups, DIAG2FC_NAME_LEN);
|
||||
ASCEBC(parm_list.aci_grp, DIAG2FC_NAME_LEN);
|
||||
parm_list.addr = (unsigned long)addr;
|
||||
parm_list.size = size;
|
||||
parm_list.fmt = 0x02;
|
||||
@ -87,7 +55,7 @@ static int diag2fc(int size, char* query, void *addr)
|
||||
/*
|
||||
* Allocate buffer for "query" and store diag 2fc at "offset"
|
||||
*/
|
||||
static void *diag2fc_store(char *query, unsigned int *count, int offset)
|
||||
void *diag2fc_store(char *query, unsigned int *count, int offset)
|
||||
{
|
||||
void *data;
|
||||
int size;
|
||||
@ -108,132 +76,11 @@ static void *diag2fc_store(char *query, unsigned int *count, int offset)
|
||||
return data;
|
||||
}
|
||||
|
||||
static void diag2fc_free(const void *data)
|
||||
void diag2fc_free(const void *data)
|
||||
{
|
||||
vfree(data);
|
||||
}
|
||||
|
||||
#define ATTRIBUTE(dir, name, member) \
|
||||
do { \
|
||||
void *rc; \
|
||||
rc = hypfs_create_u64(dir, name, member); \
|
||||
if (IS_ERR(rc)) \
|
||||
return PTR_ERR(rc); \
|
||||
} while(0)
|
||||
|
||||
static int hypfs_vm_create_guest(struct dentry *systems_dir,
|
||||
struct diag2fc_data *data)
|
||||
{
|
||||
char guest_name[NAME_LEN + 1] = {};
|
||||
struct dentry *guest_dir, *cpus_dir, *samples_dir, *mem_dir;
|
||||
int dedicated_flag, capped_value;
|
||||
|
||||
capped_value = (data->flags & 0x00000006) >> 1;
|
||||
dedicated_flag = (data->flags & 0x00000008) >> 3;
|
||||
|
||||
/* guest dir */
|
||||
memcpy(guest_name, data->guest_name, NAME_LEN);
|
||||
EBCASC(guest_name, NAME_LEN);
|
||||
strim(guest_name);
|
||||
guest_dir = hypfs_mkdir(systems_dir, guest_name);
|
||||
if (IS_ERR(guest_dir))
|
||||
return PTR_ERR(guest_dir);
|
||||
ATTRIBUTE(guest_dir, "onlinetime_us", data->el_time);
|
||||
|
||||
/* logical cpu information */
|
||||
cpus_dir = hypfs_mkdir(guest_dir, "cpus");
|
||||
if (IS_ERR(cpus_dir))
|
||||
return PTR_ERR(cpus_dir);
|
||||
ATTRIBUTE(cpus_dir, "cputime_us", data->used_cpu);
|
||||
ATTRIBUTE(cpus_dir, "capped", capped_value);
|
||||
ATTRIBUTE(cpus_dir, "dedicated", dedicated_flag);
|
||||
ATTRIBUTE(cpus_dir, "count", data->vcpus);
|
||||
/*
|
||||
* Note: The "weight_min" attribute got the wrong name.
|
||||
* The value represents the number of non-stopped (operating)
|
||||
* CPUS.
|
||||
*/
|
||||
ATTRIBUTE(cpus_dir, "weight_min", data->ocpus);
|
||||
ATTRIBUTE(cpus_dir, "weight_max", data->cpu_max);
|
||||
ATTRIBUTE(cpus_dir, "weight_cur", data->cpu_shares);
|
||||
|
||||
/* memory information */
|
||||
mem_dir = hypfs_mkdir(guest_dir, "mem");
|
||||
if (IS_ERR(mem_dir))
|
||||
return PTR_ERR(mem_dir);
|
||||
ATTRIBUTE(mem_dir, "min_KiB", data->mem_min_kb);
|
||||
ATTRIBUTE(mem_dir, "max_KiB", data->mem_max_kb);
|
||||
ATTRIBUTE(mem_dir, "used_KiB", data->mem_used_kb);
|
||||
ATTRIBUTE(mem_dir, "share_KiB", data->mem_share_kb);
|
||||
|
||||
/* samples */
|
||||
samples_dir = hypfs_mkdir(guest_dir, "samples");
|
||||
if (IS_ERR(samples_dir))
|
||||
return PTR_ERR(samples_dir);
|
||||
ATTRIBUTE(samples_dir, "cpu_using", data->cpu_use_samp);
|
||||
ATTRIBUTE(samples_dir, "cpu_delay", data->cpu_delay_samp);
|
||||
ATTRIBUTE(samples_dir, "mem_delay", data->page_wait_samp);
|
||||
ATTRIBUTE(samples_dir, "idle", data->idle_samp);
|
||||
ATTRIBUTE(samples_dir, "other", data->other_samp);
|
||||
ATTRIBUTE(samples_dir, "total", data->total_samp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int hypfs_vm_create_files(struct dentry *root)
|
||||
{
|
||||
struct dentry *dir, *file;
|
||||
struct diag2fc_data *data;
|
||||
unsigned int count = 0;
|
||||
int rc, i;
|
||||
|
||||
data = diag2fc_store(guest_query, &count, 0);
|
||||
if (IS_ERR(data))
|
||||
return PTR_ERR(data);
|
||||
|
||||
/* Hypervisor Info */
|
||||
dir = hypfs_mkdir(root, "hyp");
|
||||
if (IS_ERR(dir)) {
|
||||
rc = PTR_ERR(dir);
|
||||
goto failed;
|
||||
}
|
||||
file = hypfs_create_str(dir, "type", "z/VM Hypervisor");
|
||||
if (IS_ERR(file)) {
|
||||
rc = PTR_ERR(file);
|
||||
goto failed;
|
||||
}
|
||||
|
||||
/* physical cpus */
|
||||
dir = hypfs_mkdir(root, "cpus");
|
||||
if (IS_ERR(dir)) {
|
||||
rc = PTR_ERR(dir);
|
||||
goto failed;
|
||||
}
|
||||
file = hypfs_create_u64(dir, "count", data->lcpus);
|
||||
if (IS_ERR(file)) {
|
||||
rc = PTR_ERR(file);
|
||||
goto failed;
|
||||
}
|
||||
|
||||
/* guests */
|
||||
dir = hypfs_mkdir(root, "systems");
|
||||
if (IS_ERR(dir)) {
|
||||
rc = PTR_ERR(dir);
|
||||
goto failed;
|
||||
}
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
rc = hypfs_vm_create_guest(dir, &(data[i]));
|
||||
if (rc)
|
||||
goto failed;
|
||||
}
|
||||
diag2fc_free(data);
|
||||
return 0;
|
||||
|
||||
failed:
|
||||
diag2fc_free(data);
|
||||
return rc;
|
||||
}
|
||||
|
||||
struct dbfs_d2fc_hdr {
|
||||
u64 len; /* Length of d2fc buffer without header */
|
||||
u16 version; /* Version of header */
|
||||
@ -252,7 +99,7 @@ static int dbfs_diag2fc_create(void **data, void **data_free_ptr, size_t *size)
|
||||
struct dbfs_d2fc *d2fc;
|
||||
unsigned int count;
|
||||
|
||||
d2fc = diag2fc_store(guest_query, &count, sizeof(d2fc->hdr));
|
||||
d2fc = diag2fc_store(diag2fc_guest_query, &count, sizeof(d2fc->hdr));
|
||||
if (IS_ERR(d2fc))
|
||||
return PTR_ERR(d2fc);
|
||||
store_tod_clock_ext(&d2fc->hdr.tod_ext);
|
||||
@ -277,9 +124,9 @@ int hypfs_vm_init(void)
|
||||
if (!MACHINE_IS_VM)
|
||||
return 0;
|
||||
if (diag2fc(0, all_guests, NULL) > 0)
|
||||
guest_query = all_guests;
|
||||
diag2fc_guest_query = all_guests;
|
||||
else if (diag2fc(0, local_guest, NULL) > 0)
|
||||
guest_query = local_guest;
|
||||
diag2fc_guest_query = local_guest;
|
||||
else
|
||||
return -EACCES;
|
||||
hypfs_dbfs_create_file(&dbfs_file_2fc);
|
||||
|
50
arch/s390/hypfs/hypfs_vm.h
Normal file
50
arch/s390/hypfs/hypfs_vm.h
Normal file
@ -0,0 +1,50 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Hypervisor filesystem for Linux on s390. z/VM implementation.
|
||||
*
|
||||
* Copyright IBM Corp. 2006
|
||||
* Author(s): Michael Holzheu <holzheu@de.ibm.com>
|
||||
*/
|
||||
|
||||
#ifndef _S390_HYPFS_VM_H_
|
||||
#define _S390_HYPFS_VM_H_
|
||||
|
||||
#define DIAG2FC_NAME_LEN 8
|
||||
|
||||
struct diag2fc_data {
|
||||
__u32 version;
|
||||
__u32 flags;
|
||||
__u64 used_cpu;
|
||||
__u64 el_time;
|
||||
__u64 mem_min_kb;
|
||||
__u64 mem_max_kb;
|
||||
__u64 mem_share_kb;
|
||||
__u64 mem_used_kb;
|
||||
__u32 pcpus;
|
||||
__u32 lcpus;
|
||||
__u32 vcpus;
|
||||
__u32 ocpus;
|
||||
__u32 cpu_max;
|
||||
__u32 cpu_shares;
|
||||
__u32 cpu_use_samp;
|
||||
__u32 cpu_delay_samp;
|
||||
__u32 page_wait_samp;
|
||||
__u32 idle_samp;
|
||||
__u32 other_samp;
|
||||
__u32 total_samp;
|
||||
char guest_name[DIAG2FC_NAME_LEN];
|
||||
};
|
||||
|
||||
struct diag2fc_parm_list {
|
||||
char userid[DIAG2FC_NAME_LEN];
|
||||
char aci_grp[DIAG2FC_NAME_LEN];
|
||||
__u64 addr;
|
||||
__u32 size;
|
||||
__u32 fmt;
|
||||
};
|
||||
|
||||
void *diag2fc_store(char *query, unsigned int *count, int offset);
|
||||
void diag2fc_free(const void *data);
|
||||
extern char *diag2fc_guest_query;
|
||||
|
||||
#endif /* _S390_HYPFS_VM_H_ */
|
139
arch/s390/hypfs/hypfs_vm_fs.c
Normal file
139
arch/s390/hypfs/hypfs_vm_fs.c
Normal file
@ -0,0 +1,139 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Hypervisor filesystem for Linux on s390. z/VM implementation.
|
||||
*
|
||||
* Copyright IBM Corp. 2006
|
||||
* Author(s): Michael Holzheu <holzheu@de.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <asm/extable.h>
|
||||
#include <asm/diag.h>
|
||||
#include <asm/ebcdic.h>
|
||||
#include <asm/timex.h>
|
||||
#include "hypfs_vm.h"
|
||||
#include "hypfs.h"
|
||||
|
||||
#define ATTRIBUTE(dir, name, member) \
|
||||
do { \
|
||||
void *rc; \
|
||||
rc = hypfs_create_u64(dir, name, member); \
|
||||
if (IS_ERR(rc)) \
|
||||
return PTR_ERR(rc); \
|
||||
} while (0)
|
||||
|
||||
static int hypfs_vm_create_guest(struct dentry *systems_dir,
|
||||
struct diag2fc_data *data)
|
||||
{
|
||||
char guest_name[DIAG2FC_NAME_LEN + 1] = {};
|
||||
struct dentry *guest_dir, *cpus_dir, *samples_dir, *mem_dir;
|
||||
int dedicated_flag, capped_value;
|
||||
|
||||
capped_value = (data->flags & 0x00000006) >> 1;
|
||||
dedicated_flag = (data->flags & 0x00000008) >> 3;
|
||||
|
||||
/* guest dir */
|
||||
memcpy(guest_name, data->guest_name, DIAG2FC_NAME_LEN);
|
||||
EBCASC(guest_name, DIAG2FC_NAME_LEN);
|
||||
strim(guest_name);
|
||||
guest_dir = hypfs_mkdir(systems_dir, guest_name);
|
||||
if (IS_ERR(guest_dir))
|
||||
return PTR_ERR(guest_dir);
|
||||
ATTRIBUTE(guest_dir, "onlinetime_us", data->el_time);
|
||||
|
||||
/* logical cpu information */
|
||||
cpus_dir = hypfs_mkdir(guest_dir, "cpus");
|
||||
if (IS_ERR(cpus_dir))
|
||||
return PTR_ERR(cpus_dir);
|
||||
ATTRIBUTE(cpus_dir, "cputime_us", data->used_cpu);
|
||||
ATTRIBUTE(cpus_dir, "capped", capped_value);
|
||||
ATTRIBUTE(cpus_dir, "dedicated", dedicated_flag);
|
||||
ATTRIBUTE(cpus_dir, "count", data->vcpus);
|
||||
/*
|
||||
* Note: The "weight_min" attribute got the wrong name.
|
||||
* The value represents the number of non-stopped (operating)
|
||||
* CPUS.
|
||||
*/
|
||||
ATTRIBUTE(cpus_dir, "weight_min", data->ocpus);
|
||||
ATTRIBUTE(cpus_dir, "weight_max", data->cpu_max);
|
||||
ATTRIBUTE(cpus_dir, "weight_cur", data->cpu_shares);
|
||||
|
||||
/* memory information */
|
||||
mem_dir = hypfs_mkdir(guest_dir, "mem");
|
||||
if (IS_ERR(mem_dir))
|
||||
return PTR_ERR(mem_dir);
|
||||
ATTRIBUTE(mem_dir, "min_KiB", data->mem_min_kb);
|
||||
ATTRIBUTE(mem_dir, "max_KiB", data->mem_max_kb);
|
||||
ATTRIBUTE(mem_dir, "used_KiB", data->mem_used_kb);
|
||||
ATTRIBUTE(mem_dir, "share_KiB", data->mem_share_kb);
|
||||
|
||||
/* samples */
|
||||
samples_dir = hypfs_mkdir(guest_dir, "samples");
|
||||
if (IS_ERR(samples_dir))
|
||||
return PTR_ERR(samples_dir);
|
||||
ATTRIBUTE(samples_dir, "cpu_using", data->cpu_use_samp);
|
||||
ATTRIBUTE(samples_dir, "cpu_delay", data->cpu_delay_samp);
|
||||
ATTRIBUTE(samples_dir, "mem_delay", data->page_wait_samp);
|
||||
ATTRIBUTE(samples_dir, "idle", data->idle_samp);
|
||||
ATTRIBUTE(samples_dir, "other", data->other_samp);
|
||||
ATTRIBUTE(samples_dir, "total", data->total_samp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int hypfs_vm_create_files(struct dentry *root)
|
||||
{
|
||||
struct dentry *dir, *file;
|
||||
struct diag2fc_data *data;
|
||||
unsigned int count = 0;
|
||||
int rc, i;
|
||||
|
||||
data = diag2fc_store(diag2fc_guest_query, &count, 0);
|
||||
if (IS_ERR(data))
|
||||
return PTR_ERR(data);
|
||||
|
||||
/* Hypervisor Info */
|
||||
dir = hypfs_mkdir(root, "hyp");
|
||||
if (IS_ERR(dir)) {
|
||||
rc = PTR_ERR(dir);
|
||||
goto failed;
|
||||
}
|
||||
file = hypfs_create_str(dir, "type", "z/VM Hypervisor");
|
||||
if (IS_ERR(file)) {
|
||||
rc = PTR_ERR(file);
|
||||
goto failed;
|
||||
}
|
||||
|
||||
/* physical cpus */
|
||||
dir = hypfs_mkdir(root, "cpus");
|
||||
if (IS_ERR(dir)) {
|
||||
rc = PTR_ERR(dir);
|
||||
goto failed;
|
||||
}
|
||||
file = hypfs_create_u64(dir, "count", data->lcpus);
|
||||
if (IS_ERR(file)) {
|
||||
rc = PTR_ERR(file);
|
||||
goto failed;
|
||||
}
|
||||
|
||||
/* guests */
|
||||
dir = hypfs_mkdir(root, "systems");
|
||||
if (IS_ERR(dir)) {
|
||||
rc = PTR_ERR(dir);
|
||||
goto failed;
|
||||
}
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
rc = hypfs_vm_create_guest(dir, &data[i]);
|
||||
if (rc)
|
||||
goto failed;
|
||||
}
|
||||
diag2fc_free(data);
|
||||
return 0;
|
||||
|
||||
failed:
|
||||
diag2fc_free(data);
|
||||
return rc;
|
||||
}
|
@ -460,45 +460,18 @@ static const struct super_operations hypfs_s_ops = {
|
||||
.show_options = hypfs_show_options,
|
||||
};
|
||||
|
||||
static int __init hypfs_init(void)
|
||||
int __init __hypfs_fs_init(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
hypfs_dbfs_init();
|
||||
|
||||
if (hypfs_diag_init()) {
|
||||
rc = -ENODATA;
|
||||
goto fail_dbfs_exit;
|
||||
}
|
||||
if (hypfs_vm_init()) {
|
||||
rc = -ENODATA;
|
||||
goto fail_hypfs_diag_exit;
|
||||
}
|
||||
hypfs_sprp_init();
|
||||
if (hypfs_diag0c_init()) {
|
||||
rc = -ENODATA;
|
||||
goto fail_hypfs_sprp_exit;
|
||||
}
|
||||
rc = sysfs_create_mount_point(hypervisor_kobj, "s390");
|
||||
if (rc)
|
||||
goto fail_hypfs_diag0c_exit;
|
||||
return rc;
|
||||
rc = register_filesystem(&hypfs_type);
|
||||
if (rc)
|
||||
goto fail_filesystem;
|
||||
goto fail;
|
||||
return 0;
|
||||
|
||||
fail_filesystem:
|
||||
fail:
|
||||
sysfs_remove_mount_point(hypervisor_kobj, "s390");
|
||||
fail_hypfs_diag0c_exit:
|
||||
hypfs_diag0c_exit();
|
||||
fail_hypfs_sprp_exit:
|
||||
hypfs_sprp_exit();
|
||||
hypfs_vm_exit();
|
||||
fail_hypfs_diag_exit:
|
||||
hypfs_diag_exit();
|
||||
pr_err("Initialization of hypfs failed with rc=%i\n", rc);
|
||||
fail_dbfs_exit:
|
||||
hypfs_dbfs_exit();
|
||||
return rc;
|
||||
}
|
||||
device_initcall(hypfs_init)
|
||||
|
@ -5,6 +5,5 @@ generated-y += syscall_table.h
|
||||
generated-y += unistd_nr.h
|
||||
|
||||
generic-y += asm-offsets.h
|
||||
generic-y += export.h
|
||||
generic-y += kvm_types.h
|
||||
generic-y += mcs_spinlock.h
|
||||
|
@ -222,7 +222,7 @@ static inline debug_entry_t *debug_text_event(debug_info_t *id, int level,
|
||||
|
||||
/*
|
||||
* IMPORTANT: Use "%s" in sprintf format strings with care! Only pointers are
|
||||
* stored in the s390dbf. See Documentation/s390/s390dbf.rst for more details!
|
||||
* stored in the s390dbf. See Documentation/arch/s390/s390dbf.rst for more details!
|
||||
*/
|
||||
extern debug_entry_t *
|
||||
__debug_sprintf_event(debug_info_t *id, int level, char *string, ...)
|
||||
@ -350,7 +350,7 @@ static inline debug_entry_t *debug_text_exception(debug_info_t *id, int level,
|
||||
|
||||
/*
|
||||
* IMPORTANT: Use "%s" in sprintf format strings with care! Only pointers are
|
||||
* stored in the s390dbf. See Documentation/s390/s390dbf.rst for more details!
|
||||
* stored in the s390dbf. See Documentation/arch/s390/s390dbf.rst for more details!
|
||||
*/
|
||||
extern debug_entry_t *
|
||||
__debug_sprintf_exception(debug_info_t *id, int level, char *string, ...)
|
||||
|
@ -36,6 +36,7 @@ enum diag_stat_enum {
|
||||
DIAG_STAT_X304,
|
||||
DIAG_STAT_X308,
|
||||
DIAG_STAT_X318,
|
||||
DIAG_STAT_X320,
|
||||
DIAG_STAT_X500,
|
||||
NR_DIAG_STAT
|
||||
};
|
||||
@ -108,6 +109,8 @@ enum diag204_sc {
|
||||
DIAG204_SUBC_STIB7 = 7
|
||||
};
|
||||
|
||||
#define DIAG204_SUBCODE_MASK 0xffff
|
||||
|
||||
/* The two available diag 204 data formats */
|
||||
enum diag204_format {
|
||||
DIAG204_INFO_SIMPLE = 0,
|
||||
|
@ -54,6 +54,23 @@ static __always_inline struct pt_regs *arch_ftrace_get_regs(struct ftrace_regs *
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
struct fgraph_ret_regs {
|
||||
unsigned long gpr2;
|
||||
unsigned long fp;
|
||||
};
|
||||
|
||||
static __always_inline unsigned long fgraph_ret_regs_return_value(struct fgraph_ret_regs *ret_regs)
|
||||
{
|
||||
return ret_regs->gpr2;
|
||||
}
|
||||
|
||||
static __always_inline unsigned long fgraph_ret_regs_frame_pointer(struct fgraph_ret_regs *ret_regs)
|
||||
{
|
||||
return ret_regs->fp;
|
||||
}
|
||||
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|
||||
|
||||
static __always_inline unsigned long
|
||||
ftrace_regs_get_instruction_pointer(const struct ftrace_regs *fregs)
|
||||
{
|
||||
|
@ -35,7 +35,7 @@ static __always_inline void kfence_split_mapping(void)
|
||||
|
||||
static inline bool kfence_protect_page(unsigned long addr, bool protect)
|
||||
{
|
||||
__kernel_map_pages(virt_to_page(addr), 1, !protect);
|
||||
__kernel_map_pages(virt_to_page((void *)addr), 1, !protect);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -1028,6 +1028,9 @@ static inline int sie64a(struct kvm_s390_sie_block *sie_block, u64 *rsa)
|
||||
|
||||
extern char sie_exit;
|
||||
|
||||
bool kvm_s390_pv_is_protected(struct kvm *kvm);
|
||||
bool kvm_s390_pv_cpu_is_protected(struct kvm_vcpu *vcpu);
|
||||
|
||||
extern int kvm_s390_gisc_register(struct kvm *kvm, u32 gisc);
|
||||
extern int kvm_s390_gisc_unregister(struct kvm *kvm, u32 gisc);
|
||||
|
||||
|
@ -4,6 +4,9 @@
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#define MEMCPY_REAL_SIZE PAGE_SIZE
|
||||
#define MEMCPY_REAL_MASK PAGE_MASK
|
||||
|
||||
struct iov_iter;
|
||||
|
||||
extern unsigned long __memcpy_real_area;
|
||||
|
@ -191,8 +191,16 @@ int arch_make_page_accessible(struct page *page);
|
||||
#define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys))
|
||||
#define page_to_phys(page) pfn_to_phys(page_to_pfn(page))
|
||||
|
||||
#define pfn_to_virt(pfn) __va(pfn_to_phys(pfn))
|
||||
#define virt_to_pfn(kaddr) (phys_to_pfn(__pa(kaddr)))
|
||||
static inline void *pfn_to_virt(unsigned long pfn)
|
||||
{
|
||||
return __va(pfn_to_phys(pfn));
|
||||
}
|
||||
|
||||
static inline unsigned long virt_to_pfn(const void *kaddr)
|
||||
{
|
||||
return phys_to_pfn(__pa(kaddr));
|
||||
}
|
||||
|
||||
#define pfn_to_kaddr(pfn) pfn_to_virt(pfn)
|
||||
|
||||
#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
|
||||
|
26
arch/s390/include/asm/pfault.h
Normal file
26
arch/s390/include/asm/pfault.h
Normal file
@ -0,0 +1,26 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright IBM Corp. 1999, 2023
|
||||
*/
|
||||
#ifndef _ASM_S390_PFAULT_H
|
||||
#define _ASM_S390_PFAULT_H
|
||||
|
||||
#include <linux/errno.h>
|
||||
|
||||
int __pfault_init(void);
|
||||
void __pfault_fini(void);
|
||||
|
||||
static inline int pfault_init(void)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_PFAULT))
|
||||
return __pfault_init();
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline void pfault_fini(void)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_PFAULT))
|
||||
__pfault_fini();
|
||||
}
|
||||
|
||||
#endif /* _ASM_S390_PFAULT_H */
|
@ -89,8 +89,6 @@ extern unsigned long __bootdata_preserved(VMALLOC_END);
|
||||
extern struct page *__bootdata_preserved(vmemmap);
|
||||
extern unsigned long __bootdata_preserved(vmemmap_size);
|
||||
|
||||
#define VMEM_MAX_PHYS ((unsigned long) vmemmap)
|
||||
|
||||
extern unsigned long __bootdata_preserved(MODULES_VADDR);
|
||||
extern unsigned long __bootdata_preserved(MODULES_END);
|
||||
#define MODULES_VADDR MODULES_VADDR
|
||||
|
@ -86,6 +86,7 @@ struct sclp_info {
|
||||
unsigned char has_kss : 1;
|
||||
unsigned char has_gisaf : 1;
|
||||
unsigned char has_diag318 : 1;
|
||||
unsigned char has_diag320 : 1;
|
||||
unsigned char has_sipl : 1;
|
||||
unsigned char has_sipl_eckd : 1;
|
||||
unsigned char has_dirq : 1;
|
||||
|
@ -74,6 +74,7 @@ extern unsigned int zlib_dfltcc_support;
|
||||
|
||||
extern int noexec_disabled;
|
||||
extern unsigned long ident_map_size;
|
||||
extern unsigned long max_mappable;
|
||||
|
||||
/* The Write Back bit position in the physaddr is given by the SLPC PCI */
|
||||
extern unsigned long mio_wb_bit_mask;
|
||||
@ -117,14 +118,6 @@ extern unsigned int console_irq;
|
||||
#define SET_CONSOLE_VT220 do { console_mode = 4; } while (0)
|
||||
#define SET_CONSOLE_HVC do { console_mode = 5; } while (0)
|
||||
|
||||
#ifdef CONFIG_PFAULT
|
||||
extern int pfault_init(void);
|
||||
extern void pfault_fini(void);
|
||||
#else /* CONFIG_PFAULT */
|
||||
#define pfault_init() ({-1;})
|
||||
#define pfault_fini() do { } while (0)
|
||||
#endif /* CONFIG_PFAULT */
|
||||
|
||||
#ifdef CONFIG_VMCP
|
||||
void vmcp_cma_reserve(void);
|
||||
#else
|
||||
|
@ -463,6 +463,7 @@ static inline int is_prot_virt_host(void)
|
||||
return prot_virt_host;
|
||||
}
|
||||
|
||||
int uv_pin_shared(unsigned long paddr);
|
||||
int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb);
|
||||
int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr);
|
||||
int uv_destroy_owned_page(unsigned long paddr);
|
||||
@ -475,6 +476,11 @@ void setup_uv(void);
|
||||
#define is_prot_virt_host() 0
|
||||
static inline void setup_uv(void) {}
|
||||
|
||||
static inline int uv_pin_shared(unsigned long paddr)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int uv_destroy_owned_page(unsigned long paddr)
|
||||
{
|
||||
return 0;
|
||||
|
@ -26,7 +26,7 @@
|
||||
#define MAXCLRKEYSIZE 32 /* a clear key value may be up to 32 bytes */
|
||||
#define MAXAESCIPHERKEYSIZE 136 /* our aes cipher keys have always 136 bytes */
|
||||
#define MINEP11AESKEYBLOBSIZE 256 /* min EP11 AES key blob size */
|
||||
#define MAXEP11AESKEYBLOBSIZE 320 /* max EP11 AES key blob size */
|
||||
#define MAXEP11AESKEYBLOBSIZE 336 /* max EP11 AES key blob size */
|
||||
|
||||
/* Minimum size of a key blob */
|
||||
#define MINKEYBLOBSIZE SECKEYBLOBSIZE
|
||||
|
@ -37,9 +37,9 @@ CFLAGS_unwind_bc.o += -fno-optimize-sibling-calls
|
||||
obj-y := head64.o traps.o time.o process.o earlypgm.o early.o setup.o idle.o vtime.o
|
||||
obj-y += processor.o syscall.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
|
||||
obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o cpufeature.o
|
||||
obj-y += sysinfo.o lgr.o os_info.o machine_kexec.o
|
||||
obj-y += sysinfo.o lgr.o os_info.o
|
||||
obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o
|
||||
obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o alternative.o
|
||||
obj-y += entry.o reipl.o kdebugfs.o alternative.o
|
||||
obj-y += nospec-branch.o ipl_vmparm.o machine_kexec_reloc.o unwind_bc.o
|
||||
obj-y += smp.o text_amode31.o stacktrace.o abs_lowcore.o
|
||||
|
||||
@ -63,12 +63,13 @@ obj-$(CONFIG_RETHOOK) += rethook.o
|
||||
obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o
|
||||
obj-$(CONFIG_FUNCTION_TRACER) += mcount.o
|
||||
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
|
||||
obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o
|
||||
obj-$(CONFIG_UPROBES) += uprobes.o
|
||||
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
|
||||
|
||||
obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_image.o
|
||||
obj-$(CONFIG_KEXEC_FILE) += kexec_elf.o
|
||||
|
||||
obj-$(CONFIG_CERT_STORE) += cert_store.o
|
||||
obj-$(CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT) += ima_arch.o
|
||||
|
||||
obj-$(CONFIG_PERF_EVENTS) += perf_event.o
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <linux/sched.h>
|
||||
#include <linux/purgatory.h>
|
||||
#include <linux/pgtable.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <asm/idle.h>
|
||||
#include <asm/gmap.h>
|
||||
#include <asm/stacktrace.h>
|
||||
@ -177,5 +178,13 @@ int main(void)
|
||||
DEFINE(OLDMEM_SIZE, PARMAREA + offsetof(struct parmarea, oldmem_size));
|
||||
DEFINE(COMMAND_LINE, PARMAREA + offsetof(struct parmarea, command_line));
|
||||
DEFINE(MAX_COMMAND_LINE_SIZE, PARMAREA + offsetof(struct parmarea, max_command_line_size));
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
/* function graph return value tracing */
|
||||
OFFSET(__FGRAPH_RET_GPR2, fgraph_ret_regs, gpr2);
|
||||
OFFSET(__FGRAPH_RET_FP, fgraph_ret_regs, fp);
|
||||
DEFINE(__FGRAPH_RET_SIZE, sizeof(struct fgraph_ret_regs));
|
||||
#endif
|
||||
OFFSET(__FTRACE_REGS_PT_REGS, ftrace_regs, regs);
|
||||
DEFINE(__FTRACE_REGS_SIZE, sizeof(struct ftrace_regs));
|
||||
return 0;
|
||||
}
|
||||
|
811
arch/s390/kernel/cert_store.c
Normal file
811
arch/s390/kernel/cert_store.c
Normal file
@ -0,0 +1,811 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* DIAG 0x320 support and certificate store handling
|
||||
*
|
||||
* Copyright IBM Corp. 2023
|
||||
* Author(s): Anastasia Eskova <anastasia.eskova@ibm.com>
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/key-type.h>
|
||||
#include <linux/key.h>
|
||||
#include <linux/keyctl.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/sysfs.h>
|
||||
#include <crypto/sha2.h>
|
||||
#include <keys/user-type.h>
|
||||
#include <asm/debug.h>
|
||||
#include <asm/diag.h>
|
||||
#include <asm/ebcdic.h>
|
||||
#include <asm/sclp.h>
|
||||
|
||||
#define DIAG_MAX_RETRIES 10
|
||||
|
||||
#define VCE_FLAGS_VALID_MASK 0x80
|
||||
|
||||
#define ISM_LEN_DWORDS 4
|
||||
#define VCSSB_LEN_BYTES 128
|
||||
#define VCSSB_LEN_NO_CERTS 4
|
||||
#define VCB_LEN_NO_CERTS 64
|
||||
#define VC_NAME_LEN_BYTES 64
|
||||
|
||||
#define CERT_STORE_KEY_TYPE_NAME "cert_store_key"
|
||||
#define CERT_STORE_KEYRING_NAME "cert_store"
|
||||
|
||||
static debug_info_t *cert_store_dbf;
|
||||
static debug_info_t *cert_store_hexdump;
|
||||
|
||||
#define pr_dbf_msg(fmt, ...) \
|
||||
debug_sprintf_event(cert_store_dbf, 3, fmt "\n", ## __VA_ARGS__)
|
||||
|
||||
enum diag320_subcode {
|
||||
DIAG320_SUBCODES = 0,
|
||||
DIAG320_STORAGE = 1,
|
||||
DIAG320_CERT_BLOCK = 2,
|
||||
};
|
||||
|
||||
enum diag320_rc {
|
||||
DIAG320_RC_OK = 0x0001,
|
||||
DIAG320_RC_CS_NOMATCH = 0x0306,
|
||||
};
|
||||
|
||||
/* Verification Certificates Store Support Block (VCSSB). */
|
||||
struct vcssb {
|
||||
u32 vcssb_length;
|
||||
u8 pad_0x04[3];
|
||||
u8 version;
|
||||
u8 pad_0x08[8];
|
||||
u32 cs_token;
|
||||
u8 pad_0x14[12];
|
||||
u16 total_vc_index_count;
|
||||
u16 max_vc_index_count;
|
||||
u8 pad_0x24[28];
|
||||
u32 max_vce_length;
|
||||
u32 max_vcxe_length;
|
||||
u8 pad_0x48[8];
|
||||
u32 max_single_vcb_length;
|
||||
u32 total_vcb_length;
|
||||
u32 max_single_vcxb_length;
|
||||
u32 total_vcxb_length;
|
||||
u8 pad_0x60[32];
|
||||
} __packed __aligned(8);
|
||||
|
||||
/* Verification Certificate Entry (VCE) Header. */
|
||||
struct vce_header {
|
||||
u32 vce_length;
|
||||
u8 flags;
|
||||
u8 key_type;
|
||||
u16 vc_index;
|
||||
u8 vc_name[VC_NAME_LEN_BYTES]; /* EBCDIC */
|
||||
u8 vc_format;
|
||||
u8 pad_0x49;
|
||||
u16 key_id_length;
|
||||
u8 pad_0x4c;
|
||||
u8 vc_hash_type;
|
||||
u16 vc_hash_length;
|
||||
u8 pad_0x50[4];
|
||||
u32 vc_length;
|
||||
u8 pad_0x58[8];
|
||||
u16 vc_hash_offset;
|
||||
u16 vc_offset;
|
||||
u8 pad_0x64[28];
|
||||
} __packed __aligned(4);
|
||||
|
||||
/* Verification Certificate Block (VCB) Header. */
|
||||
struct vcb_header {
|
||||
u32 vcb_input_length;
|
||||
u8 pad_0x04[4];
|
||||
u16 first_vc_index;
|
||||
u16 last_vc_index;
|
||||
u32 pad_0x0c;
|
||||
u32 cs_token;
|
||||
u8 pad_0x14[12];
|
||||
u32 vcb_output_length;
|
||||
u8 pad_0x24[3];
|
||||
u8 version;
|
||||
u16 stored_vc_count;
|
||||
u16 remaining_vc_count;
|
||||
u8 pad_0x2c[20];
|
||||
} __packed __aligned(4);
|
||||
|
||||
/* Verification Certificate Block (VCB). */
|
||||
struct vcb {
|
||||
struct vcb_header vcb_hdr;
|
||||
u8 vcb_buf[];
|
||||
} __packed __aligned(4);
|
||||
|
||||
/* Verification Certificate Entry (VCE). */
|
||||
struct vce {
|
||||
struct vce_header vce_hdr;
|
||||
u8 cert_data_buf[];
|
||||
} __packed __aligned(4);
|
||||
|
||||
static void cert_store_key_describe(const struct key *key, struct seq_file *m)
|
||||
{
|
||||
char ascii[VC_NAME_LEN_BYTES + 1];
|
||||
|
||||
/*
|
||||
* First 64 bytes of the key description is key name in EBCDIC CP 500.
|
||||
* Convert it to ASCII for displaying in /proc/keys.
|
||||
*/
|
||||
strscpy(ascii, key->description, sizeof(ascii));
|
||||
EBCASC_500(ascii, VC_NAME_LEN_BYTES);
|
||||
seq_puts(m, ascii);
|
||||
|
||||
seq_puts(m, &key->description[VC_NAME_LEN_BYTES]);
|
||||
if (key_is_positive(key))
|
||||
seq_printf(m, ": %u", key->datalen);
|
||||
}
|
||||
|
||||
/*
|
||||
* Certificate store key type takes over properties of
|
||||
* user key but cannot be updated.
|
||||
*/
|
||||
static struct key_type key_type_cert_store_key = {
|
||||
.name = CERT_STORE_KEY_TYPE_NAME,
|
||||
.preparse = user_preparse,
|
||||
.free_preparse = user_free_preparse,
|
||||
.instantiate = generic_key_instantiate,
|
||||
.revoke = user_revoke,
|
||||
.destroy = user_destroy,
|
||||
.describe = cert_store_key_describe,
|
||||
.read = user_read,
|
||||
};
|
||||
|
||||
/* Logging functions. */
|
||||
static void pr_dbf_vcb(const struct vcb *b)
|
||||
{
|
||||
pr_dbf_msg("VCB Header:");
|
||||
pr_dbf_msg("vcb_input_length: %d", b->vcb_hdr.vcb_input_length);
|
||||
pr_dbf_msg("first_vc_index: %d", b->vcb_hdr.first_vc_index);
|
||||
pr_dbf_msg("last_vc_index: %d", b->vcb_hdr.last_vc_index);
|
||||
pr_dbf_msg("cs_token: %d", b->vcb_hdr.cs_token);
|
||||
pr_dbf_msg("vcb_output_length: %d", b->vcb_hdr.vcb_output_length);
|
||||
pr_dbf_msg("version: %d", b->vcb_hdr.version);
|
||||
pr_dbf_msg("stored_vc_count: %d", b->vcb_hdr.stored_vc_count);
|
||||
pr_dbf_msg("remaining_vc_count: %d", b->vcb_hdr.remaining_vc_count);
|
||||
}
|
||||
|
||||
static void pr_dbf_vce(const struct vce *e)
|
||||
{
|
||||
unsigned char vc_name[VC_NAME_LEN_BYTES + 1];
|
||||
char log_string[VC_NAME_LEN_BYTES + 40];
|
||||
|
||||
pr_dbf_msg("VCE Header:");
|
||||
pr_dbf_msg("vce_hdr.vce_length: %d", e->vce_hdr.vce_length);
|
||||
pr_dbf_msg("vce_hdr.flags: %d", e->vce_hdr.flags);
|
||||
pr_dbf_msg("vce_hdr.key_type: %d", e->vce_hdr.key_type);
|
||||
pr_dbf_msg("vce_hdr.vc_index: %d", e->vce_hdr.vc_index);
|
||||
pr_dbf_msg("vce_hdr.vc_format: %d", e->vce_hdr.vc_format);
|
||||
pr_dbf_msg("vce_hdr.key_id_length: %d", e->vce_hdr.key_id_length);
|
||||
pr_dbf_msg("vce_hdr.vc_hash_type: %d", e->vce_hdr.vc_hash_type);
|
||||
pr_dbf_msg("vce_hdr.vc_hash_length: %d", e->vce_hdr.vc_hash_length);
|
||||
pr_dbf_msg("vce_hdr.vc_hash_offset: %d", e->vce_hdr.vc_hash_offset);
|
||||
pr_dbf_msg("vce_hdr.vc_length: %d", e->vce_hdr.vc_length);
|
||||
pr_dbf_msg("vce_hdr.vc_offset: %d", e->vce_hdr.vc_offset);
|
||||
|
||||
/* Certificate name in ASCII. */
|
||||
memcpy(vc_name, e->vce_hdr.vc_name, VC_NAME_LEN_BYTES);
|
||||
EBCASC_500(vc_name, VC_NAME_LEN_BYTES);
|
||||
vc_name[VC_NAME_LEN_BYTES] = '\0';
|
||||
|
||||
snprintf(log_string, sizeof(log_string),
|
||||
"index: %d vce_hdr.vc_name (ASCII): %s",
|
||||
e->vce_hdr.vc_index, vc_name);
|
||||
debug_text_event(cert_store_hexdump, 3, log_string);
|
||||
|
||||
/* Certificate data. */
|
||||
debug_text_event(cert_store_hexdump, 3, "VCE: Certificate data start");
|
||||
debug_event(cert_store_hexdump, 3, (u8 *)e->cert_data_buf, 128);
|
||||
debug_text_event(cert_store_hexdump, 3, "VCE: Certificate data end");
|
||||
debug_event(cert_store_hexdump, 3,
|
||||
(u8 *)e->cert_data_buf + e->vce_hdr.vce_length - 128, 128);
|
||||
}
|
||||
|
||||
static void pr_dbf_vcssb(const struct vcssb *s)
|
||||
{
|
||||
debug_text_event(cert_store_hexdump, 3, "DIAG320 Subcode1");
|
||||
debug_event(cert_store_hexdump, 3, (u8 *)s, VCSSB_LEN_BYTES);
|
||||
|
||||
pr_dbf_msg("VCSSB:");
|
||||
pr_dbf_msg("vcssb_length: %u", s->vcssb_length);
|
||||
pr_dbf_msg("version: %u", s->version);
|
||||
pr_dbf_msg("cs_token: %u", s->cs_token);
|
||||
pr_dbf_msg("total_vc_index_count: %u", s->total_vc_index_count);
|
||||
pr_dbf_msg("max_vc_index_count: %u", s->max_vc_index_count);
|
||||
pr_dbf_msg("max_vce_length: %u", s->max_vce_length);
|
||||
pr_dbf_msg("max_vcxe_length: %u", s->max_vce_length);
|
||||
pr_dbf_msg("max_single_vcb_length: %u", s->max_single_vcb_length);
|
||||
pr_dbf_msg("total_vcb_length: %u", s->total_vcb_length);
|
||||
pr_dbf_msg("max_single_vcxb_length: %u", s->max_single_vcxb_length);
|
||||
pr_dbf_msg("total_vcxb_length: %u", s->total_vcxb_length);
|
||||
}
|
||||
|
||||
static int __diag320(unsigned long subcode, void *addr)
|
||||
{
|
||||
union register_pair rp = { .even = (unsigned long)addr, };
|
||||
|
||||
asm volatile(
|
||||
" diag %[rp],%[subcode],0x320\n"
|
||||
"0: nopr %%r7\n"
|
||||
EX_TABLE(0b, 0b)
|
||||
: [rp] "+d" (rp.pair)
|
||||
: [subcode] "d" (subcode)
|
||||
: "cc", "memory");
|
||||
|
||||
return rp.odd;
|
||||
}
|
||||
|
||||
static int diag320(unsigned long subcode, void *addr)
|
||||
{
|
||||
diag_stat_inc(DIAG_STAT_X320);
|
||||
|
||||
return __diag320(subcode, addr);
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate SHA256 hash of the VCE certificate and compare it to hash stored in
|
||||
* VCE. Return -EINVAL if hashes don't match.
|
||||
*/
|
||||
static int check_certificate_hash(const struct vce *vce)
|
||||
{
|
||||
u8 hash[SHA256_DIGEST_SIZE];
|
||||
u16 vc_hash_length;
|
||||
u8 *vce_hash;
|
||||
|
||||
vce_hash = (u8 *)vce + vce->vce_hdr.vc_hash_offset;
|
||||
vc_hash_length = vce->vce_hdr.vc_hash_length;
|
||||
sha256((u8 *)vce + vce->vce_hdr.vc_offset, vce->vce_hdr.vc_length, hash);
|
||||
if (memcmp(vce_hash, hash, vc_hash_length) == 0)
|
||||
return 0;
|
||||
|
||||
pr_dbf_msg("SHA256 hash of received certificate does not match");
|
||||
debug_text_event(cert_store_hexdump, 3, "VCE hash:");
|
||||
debug_event(cert_store_hexdump, 3, vce_hash, SHA256_DIGEST_SIZE);
|
||||
debug_text_event(cert_store_hexdump, 3, "Calculated hash:");
|
||||
debug_event(cert_store_hexdump, 3, hash, SHA256_DIGEST_SIZE);
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int check_certificate_valid(const struct vce *vce)
|
||||
{
|
||||
if (!(vce->vce_hdr.flags & VCE_FLAGS_VALID_MASK)) {
|
||||
pr_dbf_msg("Certificate entry is invalid");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (vce->vce_hdr.vc_format != 1) {
|
||||
pr_dbf_msg("Certificate format is not supported");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (vce->vce_hdr.vc_hash_type != 1) {
|
||||
pr_dbf_msg("Hash type is not supported");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return check_certificate_hash(vce);
|
||||
}
|
||||
|
||||
static struct key *get_user_session_keyring(void)
|
||||
{
|
||||
key_ref_t us_keyring_ref;
|
||||
|
||||
us_keyring_ref = lookup_user_key(KEY_SPEC_USER_SESSION_KEYRING,
|
||||
KEY_LOOKUP_CREATE, KEY_NEED_LINK);
|
||||
if (IS_ERR(us_keyring_ref)) {
|
||||
pr_dbf_msg("Couldn't get user session keyring: %ld",
|
||||
PTR_ERR(us_keyring_ref));
|
||||
return ERR_PTR(-ENOKEY);
|
||||
}
|
||||
key_ref_put(us_keyring_ref);
|
||||
return key_ref_to_ptr(us_keyring_ref);
|
||||
}
|
||||
|
||||
/* Invalidate all keys from cert_store keyring. */
|
||||
static int invalidate_keyring_keys(struct key *keyring)
|
||||
{
|
||||
unsigned long num_keys, key_index;
|
||||
size_t keyring_payload_len;
|
||||
key_serial_t *key_array;
|
||||
struct key *current_key;
|
||||
int rc;
|
||||
|
||||
keyring_payload_len = key_type_keyring.read(keyring, NULL, 0);
|
||||
num_keys = keyring_payload_len / sizeof(key_serial_t);
|
||||
key_array = kcalloc(num_keys, sizeof(key_serial_t), GFP_KERNEL);
|
||||
if (!key_array)
|
||||
return -ENOMEM;
|
||||
|
||||
rc = key_type_keyring.read(keyring, (char *)key_array, keyring_payload_len);
|
||||
if (rc != keyring_payload_len) {
|
||||
pr_dbf_msg("Couldn't read keyring payload");
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (key_index = 0; key_index < num_keys; key_index++) {
|
||||
current_key = key_lookup(key_array[key_index]);
|
||||
pr_dbf_msg("Invalidating key %08x", current_key->serial);
|
||||
|
||||
key_invalidate(current_key);
|
||||
key_put(current_key);
|
||||
rc = key_unlink(keyring, current_key);
|
||||
if (rc) {
|
||||
pr_dbf_msg("Couldn't unlink key %08x: %d", current_key->serial, rc);
|
||||
break;
|
||||
}
|
||||
}
|
||||
out:
|
||||
kfree(key_array);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static struct key *find_cs_keyring(void)
|
||||
{
|
||||
key_ref_t cs_keyring_ref;
|
||||
struct key *cs_keyring;
|
||||
|
||||
cs_keyring_ref = keyring_search(make_key_ref(get_user_session_keyring(), true),
|
||||
&key_type_keyring, CERT_STORE_KEYRING_NAME,
|
||||
false);
|
||||
if (!IS_ERR(cs_keyring_ref)) {
|
||||
cs_keyring = key_ref_to_ptr(cs_keyring_ref);
|
||||
key_ref_put(cs_keyring_ref);
|
||||
goto found;
|
||||
}
|
||||
/* Search default locations: thread, process, session keyrings */
|
||||
cs_keyring = request_key(&key_type_keyring, CERT_STORE_KEYRING_NAME, NULL);
|
||||
if (IS_ERR(cs_keyring))
|
||||
return NULL;
|
||||
key_put(cs_keyring);
|
||||
found:
|
||||
return cs_keyring;
|
||||
}
|
||||
|
||||
static void cleanup_cs_keys(void)
|
||||
{
|
||||
struct key *cs_keyring;
|
||||
|
||||
cs_keyring = find_cs_keyring();
|
||||
if (!cs_keyring)
|
||||
return;
|
||||
|
||||
pr_dbf_msg("Found cert_store keyring. Purging...");
|
||||
/*
|
||||
* Remove cert_store_key_type in case invalidation
|
||||
* of old cert_store keys failed (= severe error).
|
||||
*/
|
||||
if (invalidate_keyring_keys(cs_keyring))
|
||||
unregister_key_type(&key_type_cert_store_key);
|
||||
|
||||
keyring_clear(cs_keyring);
|
||||
key_invalidate(cs_keyring);
|
||||
key_put(cs_keyring);
|
||||
key_unlink(get_user_session_keyring(), cs_keyring);
|
||||
}
|
||||
|
||||
static struct key *create_cs_keyring(void)
|
||||
{
|
||||
static struct key *cs_keyring;
|
||||
|
||||
/* Cleanup previous cs_keyring and all associated keys if any. */
|
||||
cleanup_cs_keys();
|
||||
cs_keyring = keyring_alloc(CERT_STORE_KEYRING_NAME, GLOBAL_ROOT_UID,
|
||||
GLOBAL_ROOT_GID, current_cred(),
|
||||
(KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW | KEY_USR_READ,
|
||||
KEY_ALLOC_NOT_IN_QUOTA | KEY_ALLOC_SET_KEEP,
|
||||
NULL, get_user_session_keyring());
|
||||
if (IS_ERR(cs_keyring)) {
|
||||
pr_dbf_msg("Can't allocate cert_store keyring");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pr_dbf_msg("Successfully allocated cert_store keyring: %08x", cs_keyring->serial);
|
||||
|
||||
/*
|
||||
* In case a previous clean-up ran into an
|
||||
* error and unregistered key type.
|
||||
*/
|
||||
register_key_type(&key_type_cert_store_key);
|
||||
|
||||
return cs_keyring;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate memory and create key description in format
|
||||
* [key name in EBCDIC]:[VCE index]:[CS token].
|
||||
* Return a pointer to key description or NULL if memory
|
||||
* allocation failed. Memory should be freed by caller.
|
||||
*/
|
||||
static char *get_key_description(struct vcssb *vcssb, const struct vce *vce)
|
||||
{
|
||||
size_t len, name_len;
|
||||
u32 cs_token;
|
||||
char *desc;
|
||||
|
||||
cs_token = vcssb->cs_token;
|
||||
/* Description string contains "%64s:%04u:%08u\0". */
|
||||
name_len = sizeof(vce->vce_hdr.vc_name);
|
||||
len = name_len + 1 + 4 + 1 + 8 + 1;
|
||||
desc = kmalloc(len, GFP_KERNEL);
|
||||
if (!desc)
|
||||
return NULL;
|
||||
|
||||
memcpy(desc, vce->vce_hdr.vc_name, name_len);
|
||||
sprintf(desc + name_len, ":%04u:%08u", vce->vce_hdr.vc_index, cs_token);
|
||||
|
||||
return desc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Create a key of type "cert_store_key" using the data from VCE for key
|
||||
* payload and key description. Link the key to "cert_store" keyring.
|
||||
*/
|
||||
static int create_key_from_vce(struct vcssb *vcssb, struct vce *vce,
|
||||
struct key *keyring)
|
||||
{
|
||||
key_ref_t newkey;
|
||||
char *desc;
|
||||
int rc;
|
||||
|
||||
desc = get_key_description(vcssb, vce);
|
||||
if (!desc)
|
||||
return -ENOMEM;
|
||||
|
||||
newkey = key_create_or_update(
|
||||
make_key_ref(keyring, true), CERT_STORE_KEY_TYPE_NAME,
|
||||
desc, (u8 *)vce + vce->vce_hdr.vc_offset,
|
||||
vce->vce_hdr.vc_length,
|
||||
(KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW | KEY_USR_READ,
|
||||
KEY_ALLOC_NOT_IN_QUOTA);
|
||||
|
||||
rc = PTR_ERR_OR_ZERO(newkey);
|
||||
if (rc) {
|
||||
pr_dbf_msg("Couldn't create a key from Certificate Entry (%d)", rc);
|
||||
rc = -ENOKEY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
key_ref_put(newkey);
|
||||
out:
|
||||
kfree(desc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Get Verification Certificate Storage Size block with DIAG320 subcode2. */
|
||||
static int get_vcssb(struct vcssb *vcssb)
|
||||
{
|
||||
int diag320_rc;
|
||||
|
||||
memset(vcssb, 0, sizeof(*vcssb));
|
||||
vcssb->vcssb_length = VCSSB_LEN_BYTES;
|
||||
diag320_rc = diag320(DIAG320_STORAGE, vcssb);
|
||||
pr_dbf_vcssb(vcssb);
|
||||
|
||||
if (diag320_rc != DIAG320_RC_OK) {
|
||||
pr_dbf_msg("Diag 320 Subcode 1 returned bad RC: %04x", diag320_rc);
|
||||
return -EIO;
|
||||
}
|
||||
if (vcssb->vcssb_length == VCSSB_LEN_NO_CERTS) {
|
||||
pr_dbf_msg("No certificates available for current configuration");
|
||||
return -ENOKEY;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u32 get_4k_mult_vcb_size(struct vcssb *vcssb)
|
||||
{
|
||||
return round_up(vcssb->max_single_vcb_length, PAGE_SIZE);
|
||||
}
|
||||
|
||||
/* Fill input fields of single-entry VCB that will be read by LPAR. */
|
||||
static void fill_vcb_input(struct vcssb *vcssb, struct vcb *vcb, u16 index)
|
||||
{
|
||||
memset(vcb, 0, sizeof(*vcb));
|
||||
vcb->vcb_hdr.vcb_input_length = get_4k_mult_vcb_size(vcssb);
|
||||
vcb->vcb_hdr.cs_token = vcssb->cs_token;
|
||||
|
||||
/* Request single entry. */
|
||||
vcb->vcb_hdr.first_vc_index = index;
|
||||
vcb->vcb_hdr.last_vc_index = index;
|
||||
}
|
||||
|
||||
static void extract_vce_from_sevcb(struct vcb *vcb, struct vce *vce)
|
||||
{
|
||||
struct vce *extracted_vce;
|
||||
|
||||
extracted_vce = (struct vce *)vcb->vcb_buf;
|
||||
memcpy(vce, vcb->vcb_buf, extracted_vce->vce_hdr.vce_length);
|
||||
pr_dbf_vce(vce);
|
||||
}
|
||||
|
||||
static int get_sevcb(struct vcssb *vcssb, u16 index, struct vcb *vcb)
|
||||
{
|
||||
int rc, diag320_rc;
|
||||
|
||||
fill_vcb_input(vcssb, vcb, index);
|
||||
|
||||
diag320_rc = diag320(DIAG320_CERT_BLOCK, vcb);
|
||||
pr_dbf_msg("Diag 320 Subcode2 RC %2x", diag320_rc);
|
||||
pr_dbf_vcb(vcb);
|
||||
|
||||
switch (diag320_rc) {
|
||||
case DIAG320_RC_OK:
|
||||
rc = 0;
|
||||
if (vcb->vcb_hdr.vcb_output_length == VCB_LEN_NO_CERTS) {
|
||||
pr_dbf_msg("No certificate entry for index %u", index);
|
||||
rc = -ENOKEY;
|
||||
} else if (vcb->vcb_hdr.remaining_vc_count != 0) {
|
||||
/* Retry on insufficient space. */
|
||||
pr_dbf_msg("Couldn't get all requested certificates");
|
||||
rc = -EAGAIN;
|
||||
}
|
||||
break;
|
||||
case DIAG320_RC_CS_NOMATCH:
|
||||
pr_dbf_msg("Certificate Store token mismatch");
|
||||
rc = -EAGAIN;
|
||||
break;
|
||||
default:
|
||||
pr_dbf_msg("Diag 320 Subcode2 returned bad rc (0x%4x)", diag320_rc);
|
||||
rc = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate memory for single-entry VCB, get VCB via DIAG320 subcode 2 call,
|
||||
* extract VCE and create a key from its' certificate.
|
||||
*/
|
||||
static int create_key_from_sevcb(struct vcssb *vcssb, u16 index,
|
||||
struct key *keyring)
|
||||
{
|
||||
struct vcb *vcb;
|
||||
struct vce *vce;
|
||||
int rc;
|
||||
|
||||
rc = -ENOMEM;
|
||||
vcb = vmalloc(get_4k_mult_vcb_size(vcssb));
|
||||
vce = vmalloc(vcssb->max_single_vcb_length - sizeof(vcb->vcb_hdr));
|
||||
if (!vcb || !vce)
|
||||
goto out;
|
||||
|
||||
rc = get_sevcb(vcssb, index, vcb);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
extract_vce_from_sevcb(vcb, vce);
|
||||
rc = check_certificate_valid(vce);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
rc = create_key_from_vce(vcssb, vce, keyring);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
pr_dbf_msg("Successfully created key from Certificate Entry %d", index);
|
||||
out:
|
||||
vfree(vce);
|
||||
vfree(vcb);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Request a single-entry VCB for each VCE available for the partition.
|
||||
* Create a key from it and link it to cert_store keyring. If no keys
|
||||
* could be created (i.e. VCEs were invalid) return -ENOKEY.
|
||||
*/
|
||||
static int add_certificates_to_keyring(struct vcssb *vcssb, struct key *keyring)
|
||||
{
|
||||
int rc, index, count, added;
|
||||
|
||||
count = 0;
|
||||
added = 0;
|
||||
/* Certificate Store entries indices start with 1 and have no gaps. */
|
||||
for (index = 1; index < vcssb->total_vc_index_count + 1; index++) {
|
||||
pr_dbf_msg("Creating key from VCE %u", index);
|
||||
rc = create_key_from_sevcb(vcssb, index, keyring);
|
||||
count++;
|
||||
|
||||
if (rc == -EAGAIN)
|
||||
return rc;
|
||||
|
||||
if (rc)
|
||||
pr_dbf_msg("Creating key from VCE %u failed (%d)", index, rc);
|
||||
else
|
||||
added++;
|
||||
}
|
||||
|
||||
if (added == 0) {
|
||||
pr_dbf_msg("Processed %d entries. No keys created", count);
|
||||
return -ENOKEY;
|
||||
}
|
||||
|
||||
pr_info("Added %d of %d keys to cert_store keyring", added, count);
|
||||
|
||||
/*
|
||||
* Do not allow to link more keys to certificate store keyring after all
|
||||
* the VCEs were processed.
|
||||
*/
|
||||
rc = keyring_restrict(make_key_ref(keyring, true), NULL, NULL);
|
||||
if (rc)
|
||||
pr_dbf_msg("Failed to set restriction to cert_store keyring (%d)", rc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check which DIAG320 subcodes are installed.
|
||||
* Return -ENOENT if subcodes 1 or 2 are not available.
|
||||
*/
|
||||
static int query_diag320_subcodes(void)
|
||||
{
|
||||
unsigned long ism[ISM_LEN_DWORDS];
|
||||
int rc;
|
||||
|
||||
rc = diag320(0, ism);
|
||||
if (rc != DIAG320_RC_OK) {
|
||||
pr_dbf_msg("DIAG320 subcode query returned %04x", rc);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
debug_text_event(cert_store_hexdump, 3, "DIAG320 Subcode 0");
|
||||
debug_event(cert_store_hexdump, 3, ism, sizeof(ism));
|
||||
|
||||
if (!test_bit_inv(1, ism) || !test_bit_inv(2, ism)) {
|
||||
pr_dbf_msg("Not all required DIAG320 subcodes are installed");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if Certificate Store is supported by the firmware and DIAG320 subcodes
|
||||
* 1 and 2 are installed. Create cert_store keyring and link all certificates
|
||||
* available for the current partition to it as "cert_store_key" type
|
||||
* keys. On refresh or error invalidate cert_store keyring and destroy
|
||||
* all keys of "cert_store_key" type.
|
||||
*/
|
||||
static int fill_cs_keyring(void)
|
||||
{
|
||||
struct key *cs_keyring;
|
||||
struct vcssb *vcssb;
|
||||
int rc;
|
||||
|
||||
rc = -ENOMEM;
|
||||
vcssb = kmalloc(VCSSB_LEN_BYTES, GFP_KERNEL);
|
||||
if (!vcssb)
|
||||
goto cleanup_keys;
|
||||
|
||||
rc = -ENOENT;
|
||||
if (!sclp.has_diag320) {
|
||||
pr_dbf_msg("Certificate Store is not supported");
|
||||
goto cleanup_keys;
|
||||
}
|
||||
|
||||
rc = query_diag320_subcodes();
|
||||
if (rc)
|
||||
goto cleanup_keys;
|
||||
|
||||
rc = get_vcssb(vcssb);
|
||||
if (rc)
|
||||
goto cleanup_keys;
|
||||
|
||||
rc = -ENOMEM;
|
||||
cs_keyring = create_cs_keyring();
|
||||
if (!cs_keyring)
|
||||
goto cleanup_keys;
|
||||
|
||||
rc = add_certificates_to_keyring(vcssb, cs_keyring);
|
||||
if (rc)
|
||||
goto cleanup_cs_keyring;
|
||||
|
||||
goto out;
|
||||
|
||||
cleanup_cs_keyring:
|
||||
key_put(cs_keyring);
|
||||
cleanup_keys:
|
||||
cleanup_cs_keys();
|
||||
out:
|
||||
kfree(vcssb);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static DEFINE_MUTEX(cs_refresh_lock);
|
||||
static int cs_status_val = -1;
|
||||
|
||||
static ssize_t cs_status_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
if (cs_status_val == -1)
|
||||
return sysfs_emit(buf, "uninitialized\n");
|
||||
else if (cs_status_val == 0)
|
||||
return sysfs_emit(buf, "ok\n");
|
||||
|
||||
return sysfs_emit(buf, "failed (%d)\n", cs_status_val);
|
||||
}
|
||||
|
||||
static struct kobj_attribute cs_status_attr = __ATTR_RO(cs_status);
|
||||
|
||||
static ssize_t refresh_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
int rc, retries;
|
||||
|
||||
pr_dbf_msg("Refresh certificate store information requested");
|
||||
rc = mutex_lock_interruptible(&cs_refresh_lock);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
for (retries = 0; retries < DIAG_MAX_RETRIES; retries++) {
|
||||
/* Request certificates from certificate store. */
|
||||
rc = fill_cs_keyring();
|
||||
if (rc)
|
||||
pr_dbf_msg("Failed to refresh certificate store information (%d)", rc);
|
||||
if (rc != -EAGAIN)
|
||||
break;
|
||||
}
|
||||
cs_status_val = rc;
|
||||
mutex_unlock(&cs_refresh_lock);
|
||||
|
||||
return rc ?: count;
|
||||
}
|
||||
|
||||
static struct kobj_attribute refresh_attr = __ATTR_WO(refresh);
|
||||
|
||||
static const struct attribute *cert_store_attrs[] __initconst = {
|
||||
&cs_status_attr.attr,
|
||||
&refresh_attr.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct kobject *cert_store_kobj;
|
||||
|
||||
static int __init cert_store_init(void)
|
||||
{
|
||||
int rc = -ENOMEM;
|
||||
|
||||
cert_store_dbf = debug_register("cert_store_msg", 10, 1, 64);
|
||||
if (!cert_store_dbf)
|
||||
goto cleanup_dbf;
|
||||
|
||||
cert_store_hexdump = debug_register("cert_store_hexdump", 3, 1, 128);
|
||||
if (!cert_store_hexdump)
|
||||
goto cleanup_dbf;
|
||||
|
||||
debug_register_view(cert_store_hexdump, &debug_hex_ascii_view);
|
||||
debug_register_view(cert_store_dbf, &debug_sprintf_view);
|
||||
|
||||
/* Create directory /sys/firmware/cert_store. */
|
||||
cert_store_kobj = kobject_create_and_add("cert_store", firmware_kobj);
|
||||
if (!cert_store_kobj)
|
||||
goto cleanup_dbf;
|
||||
|
||||
rc = sysfs_create_files(cert_store_kobj, cert_store_attrs);
|
||||
if (rc)
|
||||
goto cleanup_kobj;
|
||||
|
||||
register_key_type(&key_type_cert_store_key);
|
||||
|
||||
return rc;
|
||||
|
||||
cleanup_kobj:
|
||||
kobject_put(cert_store_kobj);
|
||||
cleanup_dbf:
|
||||
debug_unregister(cert_store_dbf);
|
||||
debug_unregister(cert_store_hexdump);
|
||||
|
||||
return rc;
|
||||
}
|
||||
device_initcall(cert_store_init);
|
@ -11,6 +11,7 @@
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <asm/asm-extable.h>
|
||||
#include <asm/diag.h>
|
||||
#include <asm/trace/diag.h>
|
||||
@ -50,6 +51,7 @@ static const struct diag_desc diag_map[NR_DIAG_STAT] = {
|
||||
[DIAG_STAT_X304] = { .code = 0x304, .name = "Partition-Resource Service" },
|
||||
[DIAG_STAT_X308] = { .code = 0x308, .name = "List-Directed IPL" },
|
||||
[DIAG_STAT_X318] = { .code = 0x318, .name = "CP Name and Version Codes" },
|
||||
[DIAG_STAT_X320] = { .code = 0x320, .name = "Certificate Store" },
|
||||
[DIAG_STAT_X500] = { .code = 0x500, .name = "Virtio Service" },
|
||||
};
|
||||
|
||||
@ -167,8 +169,29 @@ static inline int __diag204(unsigned long *subcode, unsigned long size, void *ad
|
||||
return rp.odd;
|
||||
}
|
||||
|
||||
/**
|
||||
* diag204() - Issue diagnose 204 call.
|
||||
* @subcode: Subcode of diagnose 204 to be executed.
|
||||
* @size: Size of area in pages which @area points to, if given.
|
||||
* @addr: Vmalloc'ed memory area where the result is written to.
|
||||
*
|
||||
* Execute diagnose 204 with the given subcode and write the result to the
|
||||
* memory area specified with @addr. For subcodes which do not write a
|
||||
* result to memory both @size and @addr must be zero. If @addr is
|
||||
* specified it must be page aligned and must have been allocated with
|
||||
* vmalloc(). Conversion to real / physical addresses will be handled by
|
||||
* this function if required.
|
||||
*/
|
||||
int diag204(unsigned long subcode, unsigned long size, void *addr)
|
||||
{
|
||||
if (addr) {
|
||||
if (WARN_ON_ONCE(!is_vmalloc_addr(addr)))
|
||||
return -1;
|
||||
if (WARN_ON_ONCE(!IS_ALIGNED((unsigned long)addr, PAGE_SIZE)))
|
||||
return -1;
|
||||
}
|
||||
if ((subcode & DIAG204_SUBCODE_MASK) == DIAG204_SUBC_STIB4)
|
||||
addr = (void *)pfn_to_phys(vmalloc_to_pfn(addr));
|
||||
diag_stat_inc(DIAG_STAT_X204);
|
||||
size = __diag204(&subcode, size, addr);
|
||||
if (subcode)
|
||||
@ -200,7 +223,7 @@ int diag210(struct diag210 *addr)
|
||||
EXPORT_SYMBOL(diag210);
|
||||
|
||||
/*
|
||||
* Diagnose 210: Get information about a virtual device
|
||||
* Diagnose 8C: Access 3270 Display Device Information
|
||||
*/
|
||||
int diag8c(struct diag8c *addr, struct ccw_dev_id *devno)
|
||||
{
|
||||
|
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* ECBDIC -> ASCII, ASCII -> ECBDIC,
|
||||
* EBCDIC -> ASCII, ASCII -> EBCDIC,
|
||||
* upper to lower case (EBCDIC) conversion tables.
|
||||
*
|
||||
* S390 version
|
||||
|
@ -8,6 +8,7 @@
|
||||
* Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
|
||||
*/
|
||||
|
||||
#include <linux/export.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/asm-extable.h>
|
||||
@ -26,7 +27,6 @@
|
||||
#include <asm/vx-insn.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/nmi.h>
|
||||
#include <asm/export.h>
|
||||
#include <asm/nospec-insn.h>
|
||||
|
||||
_LPP_OFFSET = __LC_LPP
|
||||
|
@ -266,7 +266,7 @@ static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \
|
||||
struct kobj_attribute *attr, \
|
||||
const char *buf, size_t len) \
|
||||
{ \
|
||||
strncpy(_value, buf, sizeof(_value) - 1); \
|
||||
strscpy(_value, buf, sizeof(_value)); \
|
||||
strim(_value); \
|
||||
return len; \
|
||||
} \
|
||||
@ -557,15 +557,12 @@ static struct kobj_attribute sys_ipl_ccw_loadparm_attr =
|
||||
__ATTR(loadparm, 0444, ipl_ccw_loadparm_show, NULL);
|
||||
|
||||
static struct attribute *ipl_fcp_attrs[] = {
|
||||
&sys_ipl_type_attr.attr,
|
||||
&sys_ipl_device_attr.attr,
|
||||
&sys_ipl_fcp_wwpn_attr.attr,
|
||||
&sys_ipl_fcp_lun_attr.attr,
|
||||
&sys_ipl_fcp_bootprog_attr.attr,
|
||||
&sys_ipl_fcp_br_lba_attr.attr,
|
||||
&sys_ipl_ccw_loadparm_attr.attr,
|
||||
&sys_ipl_secure_attr.attr,
|
||||
&sys_ipl_has_secure_attr.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
@ -575,14 +572,11 @@ static struct attribute_group ipl_fcp_attr_group = {
|
||||
};
|
||||
|
||||
static struct attribute *ipl_nvme_attrs[] = {
|
||||
&sys_ipl_type_attr.attr,
|
||||
&sys_ipl_nvme_fid_attr.attr,
|
||||
&sys_ipl_nvme_nsid_attr.attr,
|
||||
&sys_ipl_nvme_bootprog_attr.attr,
|
||||
&sys_ipl_nvme_br_lba_attr.attr,
|
||||
&sys_ipl_ccw_loadparm_attr.attr,
|
||||
&sys_ipl_secure_attr.attr,
|
||||
&sys_ipl_has_secure_attr.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
@ -592,13 +586,10 @@ static struct attribute_group ipl_nvme_attr_group = {
|
||||
};
|
||||
|
||||
static struct attribute *ipl_eckd_attrs[] = {
|
||||
&sys_ipl_type_attr.attr,
|
||||
&sys_ipl_eckd_bootprog_attr.attr,
|
||||
&sys_ipl_eckd_br_chr_attr.attr,
|
||||
&sys_ipl_ccw_loadparm_attr.attr,
|
||||
&sys_ipl_device_attr.attr,
|
||||
&sys_ipl_secure_attr.attr,
|
||||
&sys_ipl_has_secure_attr.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
@ -610,21 +601,15 @@ static struct attribute_group ipl_eckd_attr_group = {
|
||||
/* CCW ipl device attributes */
|
||||
|
||||
static struct attribute *ipl_ccw_attrs_vm[] = {
|
||||
&sys_ipl_type_attr.attr,
|
||||
&sys_ipl_device_attr.attr,
|
||||
&sys_ipl_ccw_loadparm_attr.attr,
|
||||
&sys_ipl_vm_parm_attr.attr,
|
||||
&sys_ipl_secure_attr.attr,
|
||||
&sys_ipl_has_secure_attr.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute *ipl_ccw_attrs_lpar[] = {
|
||||
&sys_ipl_type_attr.attr,
|
||||
&sys_ipl_device_attr.attr,
|
||||
&sys_ipl_ccw_loadparm_attr.attr,
|
||||
&sys_ipl_secure_attr.attr,
|
||||
&sys_ipl_has_secure_attr.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
@ -636,15 +621,15 @@ static struct attribute_group ipl_ccw_attr_group_lpar = {
|
||||
.attrs = ipl_ccw_attrs_lpar
|
||||
};
|
||||
|
||||
/* UNKNOWN ipl device attributes */
|
||||
|
||||
static struct attribute *ipl_unknown_attrs[] = {
|
||||
static struct attribute *ipl_common_attrs[] = {
|
||||
&sys_ipl_type_attr.attr,
|
||||
&sys_ipl_secure_attr.attr,
|
||||
&sys_ipl_has_secure_attr.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group ipl_unknown_attr_group = {
|
||||
.attrs = ipl_unknown_attrs,
|
||||
static struct attribute_group ipl_common_attr_group = {
|
||||
.attrs = ipl_common_attrs,
|
||||
};
|
||||
|
||||
static struct kset *ipl_kset;
|
||||
@ -668,6 +653,9 @@ static int __init ipl_init(void)
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
rc = sysfs_create_group(&ipl_kset->kobj, &ipl_common_attr_group);
|
||||
if (rc)
|
||||
goto out;
|
||||
switch (ipl_info.type) {
|
||||
case IPL_TYPE_CCW:
|
||||
if (MACHINE_IS_VM)
|
||||
@ -689,8 +677,6 @@ static int __init ipl_init(void)
|
||||
rc = sysfs_create_group(&ipl_kset->kobj, &ipl_nvme_attr_group);
|
||||
break;
|
||||
default:
|
||||
rc = sysfs_create_group(&ipl_kset->kobj,
|
||||
&ipl_unknown_attr_group);
|
||||
break;
|
||||
}
|
||||
out:
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/debug_locks.h>
|
||||
#include <asm/pfault.h>
|
||||
#include <asm/cio.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/smp.h>
|
||||
|
@ -188,7 +188,7 @@ static int kexec_file_add_ipl_report(struct kimage *image,
|
||||
data->memsz = ALIGN(data->memsz, PAGE_SIZE);
|
||||
buf.mem = data->memsz;
|
||||
|
||||
ptr = (void *)ipl_cert_list_addr;
|
||||
ptr = __va(ipl_cert_list_addr);
|
||||
end = ptr + ipl_cert_list_size;
|
||||
ncerts = 0;
|
||||
while (ptr < end) {
|
||||
@ -200,7 +200,7 @@ static int kexec_file_add_ipl_report(struct kimage *image,
|
||||
|
||||
addr = data->memsz + data->report->size;
|
||||
addr += ncerts * sizeof(struct ipl_rb_certificate_entry);
|
||||
ptr = (void *)ipl_cert_list_addr;
|
||||
ptr = __va(ipl_cert_list_addr);
|
||||
while (ptr < end) {
|
||||
len = *(unsigned int *)ptr;
|
||||
ptr += sizeof(len);
|
||||
|
@ -9,15 +9,20 @@
|
||||
#include <asm/ftrace.h>
|
||||
#include <asm/nospec-insn.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/export.h>
|
||||
|
||||
#define STACK_FRAME_SIZE_PTREGS (STACK_FRAME_OVERHEAD + __PT_SIZE)
|
||||
#define STACK_PTREGS (STACK_FRAME_OVERHEAD)
|
||||
#define STACK_PTREGS_GPRS (STACK_PTREGS + __PT_GPRS)
|
||||
#define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW)
|
||||
|
||||
#define STACK_FRAME_SIZE_FREGS (STACK_FRAME_OVERHEAD + __FTRACE_REGS_SIZE)
|
||||
#define STACK_FREGS (STACK_FRAME_OVERHEAD)
|
||||
#define STACK_FREGS_PTREGS (STACK_FRAME_OVERHEAD + __FTRACE_REGS_PT_REGS)
|
||||
#define STACK_FREGS_PTREGS_GPRS (STACK_FREGS_PTREGS + __PT_GPRS)
|
||||
#define STACK_FREGS_PTREGS_PSW (STACK_FREGS_PTREGS + __PT_PSW)
|
||||
#define STACK_FREGS_PTREGS_ORIG_GPR2 (STACK_FREGS_PTREGS + __PT_ORIG_GPR2)
|
||||
#define STACK_FREGS_PTREGS_FLAGS (STACK_FREGS_PTREGS + __PT_FLAGS)
|
||||
|
||||
#define STACK_FRAME_SIZE (STACK_FRAME_OVERHEAD + __PT_SIZE)
|
||||
#define STACK_PTREGS (STACK_FRAME_OVERHEAD)
|
||||
#define STACK_PTREGS_GPRS (STACK_PTREGS + __PT_GPRS)
|
||||
#define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW)
|
||||
#define STACK_PTREGS_ORIG_GPR2 (STACK_PTREGS + __PT_ORIG_GPR2)
|
||||
#define STACK_PTREGS_FLAGS (STACK_PTREGS + __PT_FLAGS)
|
||||
/* packed stack: allocate just enough for r14, r15 and backchain */
|
||||
#define TRACED_FUNC_FRAME_SIZE 24
|
||||
|
||||
@ -53,23 +58,23 @@ SYM_CODE_END(ftrace_stub_direct_tramp)
|
||||
stg %r1,__SF_BACKCHAIN(%r15)
|
||||
stg %r0,(__SF_GPRS+8*8)(%r15)
|
||||
stg %r15,(__SF_GPRS+9*8)(%r15)
|
||||
# allocate pt_regs and stack frame for ftrace_trace_function
|
||||
aghi %r15,-STACK_FRAME_SIZE
|
||||
stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15)
|
||||
xc STACK_PTREGS_ORIG_GPR2(8,%r15),STACK_PTREGS_ORIG_GPR2(%r15)
|
||||
# allocate ftrace_regs and stack frame for ftrace_trace_function
|
||||
aghi %r15,-STACK_FRAME_SIZE_FREGS
|
||||
stg %r1,(STACK_FREGS_PTREGS_GPRS+15*8)(%r15)
|
||||
xc STACK_FREGS_PTREGS_ORIG_GPR2(8,%r15),STACK_FREGS_PTREGS_ORIG_GPR2(%r15)
|
||||
|
||||
.if \allregs == 1
|
||||
stg %r14,(STACK_PTREGS_PSW)(%r15)
|
||||
mvghi STACK_PTREGS_FLAGS(%r15),_PIF_FTRACE_FULL_REGS
|
||||
stg %r14,(STACK_FREGS_PTREGS_PSW)(%r15)
|
||||
mvghi STACK_FREGS_PTREGS_FLAGS(%r15),_PIF_FTRACE_FULL_REGS
|
||||
.else
|
||||
xc STACK_PTREGS_FLAGS(8,%r15),STACK_PTREGS_FLAGS(%r15)
|
||||
xc STACK_FREGS_PTREGS_FLAGS(8,%r15),STACK_FREGS_PTREGS_FLAGS(%r15)
|
||||
.endif
|
||||
|
||||
lg %r14,(__SF_GPRS+8*8)(%r1) # restore original return address
|
||||
aghi %r1,-TRACED_FUNC_FRAME_SIZE
|
||||
stg %r1,__SF_BACKCHAIN(%r15)
|
||||
stg %r0,(STACK_PTREGS_PSW+8)(%r15)
|
||||
stmg %r2,%r14,(STACK_PTREGS_GPRS+2*8)(%r15)
|
||||
stg %r0,(STACK_FREGS_PTREGS_PSW+8)(%r15)
|
||||
stmg %r2,%r14,(STACK_FREGS_PTREGS_GPRS+2*8)(%r15)
|
||||
.endm
|
||||
|
||||
SYM_CODE_START(ftrace_regs_caller)
|
||||
@ -96,30 +101,30 @@ SYM_CODE_START(ftrace_common)
|
||||
lg %r1,0(%r1)
|
||||
#endif
|
||||
lgr %r3,%r14
|
||||
la %r5,STACK_PTREGS(%r15)
|
||||
la %r5,STACK_FREGS(%r15)
|
||||
BASR_EX %r14,%r1
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
# The j instruction gets runtime patched to a nop instruction.
|
||||
# See ftrace_enable_ftrace_graph_caller.
|
||||
SYM_INNER_LABEL(ftrace_graph_caller, SYM_L_GLOBAL)
|
||||
j .Lftrace_graph_caller_end
|
||||
lmg %r2,%r3,(STACK_PTREGS_GPRS+14*8)(%r15)
|
||||
lg %r4,(STACK_PTREGS_PSW+8)(%r15)
|
||||
lmg %r2,%r3,(STACK_FREGS_PTREGS_GPRS+14*8)(%r15)
|
||||
lg %r4,(STACK_FREGS_PTREGS_PSW+8)(%r15)
|
||||
brasl %r14,prepare_ftrace_return
|
||||
stg %r2,(STACK_PTREGS_GPRS+14*8)(%r15)
|
||||
stg %r2,(STACK_FREGS_PTREGS_GPRS+14*8)(%r15)
|
||||
.Lftrace_graph_caller_end:
|
||||
#endif
|
||||
lg %r0,(STACK_PTREGS_PSW+8)(%r15)
|
||||
lg %r0,(STACK_FREGS_PTREGS_PSW+8)(%r15)
|
||||
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
|
||||
ltg %r1,STACK_PTREGS_ORIG_GPR2(%r15)
|
||||
ltg %r1,STACK_FREGS_PTREGS_ORIG_GPR2(%r15)
|
||||
locgrz %r1,%r0
|
||||
#else
|
||||
lg %r1,STACK_PTREGS_ORIG_GPR2(%r15)
|
||||
lg %r1,STACK_FREGS_PTREGS_ORIG_GPR2(%r15)
|
||||
ltgr %r1,%r1
|
||||
jnz 0f
|
||||
lgr %r1,%r0
|
||||
#endif
|
||||
0: lmg %r2,%r15,(STACK_PTREGS_GPRS+2*8)(%r15)
|
||||
0: lmg %r2,%r15,(STACK_FREGS_PTREGS_GPRS+2*8)(%r15)
|
||||
BR_EX %r1
|
||||
SYM_CODE_END(ftrace_common)
|
||||
|
||||
@ -128,10 +133,14 @@ SYM_CODE_END(ftrace_common)
|
||||
SYM_FUNC_START(return_to_handler)
|
||||
stmg %r2,%r5,32(%r15)
|
||||
lgr %r1,%r15
|
||||
aghi %r15,-STACK_FRAME_OVERHEAD
|
||||
aghi %r15,-(STACK_FRAME_OVERHEAD+__FGRAPH_RET_SIZE)
|
||||
stg %r1,__SF_BACKCHAIN(%r15)
|
||||
la %r3,STACK_FRAME_OVERHEAD(%r15)
|
||||
stg %r1,__FGRAPH_RET_FP(%r3)
|
||||
stg %r2,__FGRAPH_RET_GPR2(%r3)
|
||||
lgr %r2,%r3
|
||||
brasl %r14,ftrace_return_to_handler
|
||||
aghi %r15,STACK_FRAME_OVERHEAD
|
||||
aghi %r15,STACK_FRAME_OVERHEAD+__FGRAPH_RET_SIZE
|
||||
lgr %r14,%r2
|
||||
lmg %r2,%r5,32(%r15)
|
||||
BR_EX %r14
|
||||
@ -160,11 +169,11 @@ SYM_CODE_END(ftrace_shared_hotpatch_trampoline_exrl)
|
||||
|
||||
SYM_CODE_START(arch_rethook_trampoline)
|
||||
stg %r14,(__SF_GPRS+8*8)(%r15)
|
||||
lay %r15,-STACK_FRAME_SIZE(%r15)
|
||||
lay %r15,-STACK_FRAME_SIZE_PTREGS(%r15)
|
||||
stmg %r0,%r14,STACK_PTREGS_GPRS(%r15)
|
||||
|
||||
# store original stack pointer in backchain and pt_regs
|
||||
lay %r7,STACK_FRAME_SIZE(%r15)
|
||||
lay %r7,STACK_FRAME_SIZE_PTREGS(%r15)
|
||||
stg %r7,__SF_BACKCHAIN(%r15)
|
||||
stg %r7,STACK_PTREGS_GPRS+(15*8)(%r15)
|
||||
|
||||
|
@ -146,6 +146,7 @@ static u32 __amode31_ref *__ctl_linkage_stack = __ctl_linkage_stack_amode31;
|
||||
static u32 __amode31_ref *__ctl_duct = __ctl_duct_amode31;
|
||||
|
||||
int __bootdata(noexec_disabled);
|
||||
unsigned long __bootdata_preserved(max_mappable);
|
||||
unsigned long __bootdata(ident_map_size);
|
||||
struct physmem_info __bootdata(physmem_info);
|
||||
|
||||
@ -874,7 +875,7 @@ static void __init log_component_list(void)
|
||||
pr_info("Linux is running with Secure-IPL enabled\n");
|
||||
else
|
||||
pr_info("Linux is running with Secure-IPL disabled\n");
|
||||
ptr = (void *) early_ipl_comp_list_addr;
|
||||
ptr = __va(early_ipl_comp_list_addr);
|
||||
end = (void *) ptr + early_ipl_comp_list_size;
|
||||
pr_info("The IPL report contains the following components:\n");
|
||||
while (ptr < end) {
|
||||
|
@ -37,6 +37,7 @@
|
||||
#include <linux/crash_dump.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/pfault.h>
|
||||
#include <asm/diag.h>
|
||||
#include <asm/switch_to.h>
|
||||
#include <asm/facility.h>
|
||||
@ -252,8 +253,9 @@ static void pcpu_free_lowcore(struct pcpu *pcpu)
|
||||
|
||||
static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
|
||||
{
|
||||
struct lowcore *lc = lowcore_ptr[cpu];
|
||||
struct lowcore *lc, *abs_lc;
|
||||
|
||||
lc = lowcore_ptr[cpu];
|
||||
cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
|
||||
cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
|
||||
lc->cpu_nr = cpu;
|
||||
@ -266,7 +268,9 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
|
||||
lc->machine_flags = S390_lowcore.machine_flags;
|
||||
lc->user_timer = lc->system_timer =
|
||||
lc->steal_timer = lc->avg_steal_timer = 0;
|
||||
__ctl_store(lc->cregs_save_area, 0, 15);
|
||||
abs_lc = get_abs_lowcore();
|
||||
memcpy(lc->cregs_save_area, abs_lc->cregs_save_area, sizeof(lc->cregs_save_area));
|
||||
put_abs_lowcore(abs_lc);
|
||||
lc->cregs_save_area[1] = lc->kernel_asce;
|
||||
lc->cregs_save_area[7] = lc->user_asce;
|
||||
save_access_regs((unsigned int *) lc->access_regs_save_area);
|
||||
@ -606,8 +610,8 @@ void smp_ctl_set_clear_bit(int cr, int bit, bool set)
|
||||
ctlreg = (ctlreg & parms.andval) | parms.orval;
|
||||
abs_lc->cregs_save_area[cr] = ctlreg;
|
||||
put_abs_lowcore(abs_lc);
|
||||
spin_unlock(&ctl_lock);
|
||||
on_each_cpu(smp_ctl_bit_callback, &parms, 1);
|
||||
spin_unlock(&ctl_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(smp_ctl_set_clear_bit);
|
||||
|
||||
@ -927,12 +931,18 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
|
||||
rc = pcpu_alloc_lowcore(pcpu, cpu);
|
||||
if (rc)
|
||||
return rc;
|
||||
/*
|
||||
* Make sure global control register contents do not change
|
||||
* until new CPU has initialized control registers.
|
||||
*/
|
||||
spin_lock(&ctl_lock);
|
||||
pcpu_prepare_secondary(pcpu, cpu);
|
||||
pcpu_attach_task(pcpu, tidle);
|
||||
pcpu_start_fn(pcpu, smp_start_secondary, NULL);
|
||||
/* Wait until cpu puts itself in the online & active maps */
|
||||
while (!cpu_online(cpu))
|
||||
cpu_relax();
|
||||
spin_unlock(&ctl_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -317,7 +317,9 @@ static void fill_diag(struct sthyi_sctns *sctns)
|
||||
if (pages <= 0)
|
||||
return;
|
||||
|
||||
diag204_buf = vmalloc(array_size(pages, PAGE_SIZE));
|
||||
diag204_buf = __vmalloc_node(array_size(pages, PAGE_SIZE),
|
||||
PAGE_SIZE, GFP_KERNEL, NUMA_NO_NODE,
|
||||
__builtin_return_address(0));
|
||||
if (!diag204_buf)
|
||||
return;
|
||||
|
||||
|
@ -88,7 +88,7 @@ fail:
|
||||
* Requests the Ultravisor to pin the page in the shared state. This will
|
||||
* cause an intercept when the guest attempts to unshare the pinned page.
|
||||
*/
|
||||
static int uv_pin_shared(unsigned long paddr)
|
||||
int uv_pin_shared(unsigned long paddr)
|
||||
{
|
||||
struct uv_cb_cfs uvcb = {
|
||||
.header.cmd = UVC_CMD_PIN_PAGE_SHARED,
|
||||
@ -100,6 +100,7 @@ static int uv_pin_shared(unsigned long paddr)
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(uv_pin_shared);
|
||||
|
||||
/*
|
||||
* Requests the Ultravisor to destroy a guest page and make it
|
||||
|
@ -270,18 +270,6 @@ static inline u64 kvm_s390_pv_cpu_get_handle(struct kvm_vcpu *vcpu)
|
||||
return vcpu->arch.pv.handle;
|
||||
}
|
||||
|
||||
static inline bool kvm_s390_pv_is_protected(struct kvm *kvm)
|
||||
{
|
||||
lockdep_assert_held(&kvm->lock);
|
||||
return !!kvm_s390_pv_get_handle(kvm);
|
||||
}
|
||||
|
||||
static inline bool kvm_s390_pv_cpu_is_protected(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
lockdep_assert_held(&vcpu->mutex);
|
||||
return !!kvm_s390_pv_cpu_get_handle(vcpu);
|
||||
}
|
||||
|
||||
/* implemented in interrupt.c */
|
||||
int kvm_s390_handle_wait(struct kvm_vcpu *vcpu);
|
||||
void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu);
|
||||
|
@ -18,6 +18,20 @@
|
||||
#include <linux/mmu_notifier.h>
|
||||
#include "kvm-s390.h"
|
||||
|
||||
bool kvm_s390_pv_is_protected(struct kvm *kvm)
|
||||
{
|
||||
lockdep_assert_held(&kvm->lock);
|
||||
return !!kvm_s390_pv_get_handle(kvm);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_s390_pv_is_protected);
|
||||
|
||||
bool kvm_s390_pv_cpu_is_protected(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
lockdep_assert_held(&vcpu->mutex);
|
||||
return !!kvm_s390_pv_cpu_get_handle(vcpu);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_s390_pv_cpu_is_protected);
|
||||
|
||||
/**
|
||||
* struct pv_vm_to_be_destroyed - Represents a protected VM that needs to
|
||||
* be destroyed
|
||||
|
@ -5,8 +5,8 @@
|
||||
* Copyright IBM Corp. 2012
|
||||
*/
|
||||
|
||||
#include <linux/export.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/export.h>
|
||||
#include <asm/nospec-insn.h>
|
||||
|
||||
GEN_BR_THUNK %r14
|
||||
|
@ -1,8 +1,8 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
|
||||
#include <linux/export.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/nospec-insn.h>
|
||||
#include <asm/export.h>
|
||||
|
||||
.section .noinstr.text, "ax"
|
||||
|
||||
|
@ -10,3 +10,4 @@ obj-$(CONFIG_CMM) += cmm.o
|
||||
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
|
||||
obj-$(CONFIG_PTDUMP_CORE) += dump_pagetables.o
|
||||
obj-$(CONFIG_PGSTE) += gmap.o
|
||||
obj-$(CONFIG_PFAULT) += pfault.o
|
||||
|
@ -90,7 +90,7 @@ static long cmm_alloc_pages(long nr, long *counter,
|
||||
} else
|
||||
free_page((unsigned long) npa);
|
||||
}
|
||||
diag10_range(virt_to_pfn(addr), 1);
|
||||
diag10_range(virt_to_pfn((void *)addr), 1);
|
||||
pa->pages[pa->index++] = addr;
|
||||
(*counter)++;
|
||||
spin_unlock(&cmm_lock);
|
||||
|
@ -297,7 +297,7 @@ static int pt_dump_init(void)
|
||||
address_markers[ABS_LOWCORE_NR].start_address = __abs_lowcore;
|
||||
address_markers[ABS_LOWCORE_END_NR].start_address = __abs_lowcore + ABS_LOWCORE_MAP_SIZE;
|
||||
address_markers[MEMCPY_REAL_NR].start_address = __memcpy_real_area;
|
||||
address_markers[MEMCPY_REAL_END_NR].start_address = __memcpy_real_area + PAGE_SIZE;
|
||||
address_markers[MEMCPY_REAL_END_NR].start_address = __memcpy_real_area + MEMCPY_REAL_SIZE;
|
||||
address_markers[VMEMMAP_NR].start_address = (unsigned long) vmemmap;
|
||||
address_markers[VMEMMAP_END_NR].start_address = (unsigned long)vmemmap + vmemmap_size;
|
||||
address_markers[VMALLOC_NR].start_address = VMALLOC_START;
|
||||
|
@ -640,10 +640,13 @@ void segment_warning(int rc, char *seg_name)
|
||||
pr_err("There is not enough memory to load or query "
|
||||
"DCSS %s\n", seg_name);
|
||||
break;
|
||||
case -ERANGE:
|
||||
pr_err("DCSS %s exceeds the kernel mapping range (%lu) "
|
||||
"and cannot be loaded\n", seg_name, VMEM_MAX_PHYS);
|
||||
case -ERANGE: {
|
||||
struct range mhp_range = arch_get_mappable_range();
|
||||
|
||||
pr_err("DCSS %s exceeds the kernel mapping range (%llu) "
|
||||
"and cannot be loaded\n", seg_name, mhp_range.end + 1);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -43,8 +43,6 @@
|
||||
#include "../kernel/entry.h"
|
||||
|
||||
#define __FAIL_ADDR_MASK -4096L
|
||||
#define __SUBCODE_MASK 0x0600
|
||||
#define __PF_RES_FIELD 0x8000000000000000ULL
|
||||
|
||||
/*
|
||||
* Allocate private vm_fault_reason from top. Please make sure it won't
|
||||
@ -583,232 +581,6 @@ void do_dat_exception(struct pt_regs *regs)
|
||||
}
|
||||
NOKPROBE_SYMBOL(do_dat_exception);
|
||||
|
||||
#ifdef CONFIG_PFAULT
|
||||
/*
|
||||
* 'pfault' pseudo page faults routines.
|
||||
*/
|
||||
static int pfault_disable;
|
||||
|
||||
static int __init nopfault(char *str)
|
||||
{
|
||||
pfault_disable = 1;
|
||||
return 1;
|
||||
}
|
||||
|
||||
__setup("nopfault", nopfault);
|
||||
|
||||
struct pfault_refbk {
|
||||
u16 refdiagc;
|
||||
u16 reffcode;
|
||||
u16 refdwlen;
|
||||
u16 refversn;
|
||||
u64 refgaddr;
|
||||
u64 refselmk;
|
||||
u64 refcmpmk;
|
||||
u64 reserved;
|
||||
} __attribute__ ((packed, aligned(8)));
|
||||
|
||||
static struct pfault_refbk pfault_init_refbk = {
|
||||
.refdiagc = 0x258,
|
||||
.reffcode = 0,
|
||||
.refdwlen = 5,
|
||||
.refversn = 2,
|
||||
.refgaddr = __LC_LPP,
|
||||
.refselmk = 1ULL << 48,
|
||||
.refcmpmk = 1ULL << 48,
|
||||
.reserved = __PF_RES_FIELD
|
||||
};
|
||||
|
||||
int pfault_init(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (pfault_disable)
|
||||
return -1;
|
||||
diag_stat_inc(DIAG_STAT_X258);
|
||||
asm volatile(
|
||||
" diag %1,%0,0x258\n"
|
||||
"0: j 2f\n"
|
||||
"1: la %0,8\n"
|
||||
"2:\n"
|
||||
EX_TABLE(0b,1b)
|
||||
: "=d" (rc)
|
||||
: "a" (&pfault_init_refbk), "m" (pfault_init_refbk) : "cc");
|
||||
return rc;
|
||||
}
|
||||
|
||||
static struct pfault_refbk pfault_fini_refbk = {
|
||||
.refdiagc = 0x258,
|
||||
.reffcode = 1,
|
||||
.refdwlen = 5,
|
||||
.refversn = 2,
|
||||
};
|
||||
|
||||
void pfault_fini(void)
|
||||
{
|
||||
|
||||
if (pfault_disable)
|
||||
return;
|
||||
diag_stat_inc(DIAG_STAT_X258);
|
||||
asm volatile(
|
||||
" diag %0,0,0x258\n"
|
||||
"0: nopr %%r7\n"
|
||||
EX_TABLE(0b,0b)
|
||||
: : "a" (&pfault_fini_refbk), "m" (pfault_fini_refbk) : "cc");
|
||||
}
|
||||
|
||||
static DEFINE_SPINLOCK(pfault_lock);
|
||||
static LIST_HEAD(pfault_list);
|
||||
|
||||
#define PF_COMPLETE 0x0080
|
||||
|
||||
/*
|
||||
* The mechanism of our pfault code: if Linux is running as guest, runs a user
|
||||
* space process and the user space process accesses a page that the host has
|
||||
* paged out we get a pfault interrupt.
|
||||
*
|
||||
* This allows us, within the guest, to schedule a different process. Without
|
||||
* this mechanism the host would have to suspend the whole virtual cpu until
|
||||
* the page has been paged in.
|
||||
*
|
||||
* So when we get such an interrupt then we set the state of the current task
|
||||
* to uninterruptible and also set the need_resched flag. Both happens within
|
||||
* interrupt context(!). If we later on want to return to user space we
|
||||
* recognize the need_resched flag and then call schedule(). It's not very
|
||||
* obvious how this works...
|
||||
*
|
||||
* Of course we have a lot of additional fun with the completion interrupt (->
|
||||
* host signals that a page of a process has been paged in and the process can
|
||||
* continue to run). This interrupt can arrive on any cpu and, since we have
|
||||
* virtual cpus, actually appear before the interrupt that signals that a page
|
||||
* is missing.
|
||||
*/
|
||||
static void pfault_interrupt(struct ext_code ext_code,
|
||||
unsigned int param32, unsigned long param64)
|
||||
{
|
||||
struct task_struct *tsk;
|
||||
__u16 subcode;
|
||||
pid_t pid;
|
||||
|
||||
/*
|
||||
* Get the external interruption subcode & pfault initial/completion
|
||||
* signal bit. VM stores this in the 'cpu address' field associated
|
||||
* with the external interrupt.
|
||||
*/
|
||||
subcode = ext_code.subcode;
|
||||
if ((subcode & 0xff00) != __SUBCODE_MASK)
|
||||
return;
|
||||
inc_irq_stat(IRQEXT_PFL);
|
||||
/* Get the token (= pid of the affected task). */
|
||||
pid = param64 & LPP_PID_MASK;
|
||||
rcu_read_lock();
|
||||
tsk = find_task_by_pid_ns(pid, &init_pid_ns);
|
||||
if (tsk)
|
||||
get_task_struct(tsk);
|
||||
rcu_read_unlock();
|
||||
if (!tsk)
|
||||
return;
|
||||
spin_lock(&pfault_lock);
|
||||
if (subcode & PF_COMPLETE) {
|
||||
/* signal bit is set -> a page has been swapped in by VM */
|
||||
if (tsk->thread.pfault_wait == 1) {
|
||||
/* Initial interrupt was faster than the completion
|
||||
* interrupt. pfault_wait is valid. Set pfault_wait
|
||||
* back to zero and wake up the process. This can
|
||||
* safely be done because the task is still sleeping
|
||||
* and can't produce new pfaults. */
|
||||
tsk->thread.pfault_wait = 0;
|
||||
list_del(&tsk->thread.list);
|
||||
wake_up_process(tsk);
|
||||
put_task_struct(tsk);
|
||||
} else {
|
||||
/* Completion interrupt was faster than initial
|
||||
* interrupt. Set pfault_wait to -1 so the initial
|
||||
* interrupt doesn't put the task to sleep.
|
||||
* If the task is not running, ignore the completion
|
||||
* interrupt since it must be a leftover of a PFAULT
|
||||
* CANCEL operation which didn't remove all pending
|
||||
* completion interrupts. */
|
||||
if (task_is_running(tsk))
|
||||
tsk->thread.pfault_wait = -1;
|
||||
}
|
||||
} else {
|
||||
/* signal bit not set -> a real page is missing. */
|
||||
if (WARN_ON_ONCE(tsk != current))
|
||||
goto out;
|
||||
if (tsk->thread.pfault_wait == 1) {
|
||||
/* Already on the list with a reference: put to sleep */
|
||||
goto block;
|
||||
} else if (tsk->thread.pfault_wait == -1) {
|
||||
/* Completion interrupt was faster than the initial
|
||||
* interrupt (pfault_wait == -1). Set pfault_wait
|
||||
* back to zero and exit. */
|
||||
tsk->thread.pfault_wait = 0;
|
||||
} else {
|
||||
/* Initial interrupt arrived before completion
|
||||
* interrupt. Let the task sleep.
|
||||
* An extra task reference is needed since a different
|
||||
* cpu may set the task state to TASK_RUNNING again
|
||||
* before the scheduler is reached. */
|
||||
get_task_struct(tsk);
|
||||
tsk->thread.pfault_wait = 1;
|
||||
list_add(&tsk->thread.list, &pfault_list);
|
||||
block:
|
||||
/* Since this must be a userspace fault, there
|
||||
* is no kernel task state to trample. Rely on the
|
||||
* return to userspace schedule() to block. */
|
||||
__set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
set_tsk_need_resched(tsk);
|
||||
set_preempt_need_resched();
|
||||
}
|
||||
}
|
||||
out:
|
||||
spin_unlock(&pfault_lock);
|
||||
put_task_struct(tsk);
|
||||
}
|
||||
|
||||
static int pfault_cpu_dead(unsigned int cpu)
|
||||
{
|
||||
struct thread_struct *thread, *next;
|
||||
struct task_struct *tsk;
|
||||
|
||||
spin_lock_irq(&pfault_lock);
|
||||
list_for_each_entry_safe(thread, next, &pfault_list, list) {
|
||||
thread->pfault_wait = 0;
|
||||
list_del(&thread->list);
|
||||
tsk = container_of(thread, struct task_struct, thread);
|
||||
wake_up_process(tsk);
|
||||
put_task_struct(tsk);
|
||||
}
|
||||
spin_unlock_irq(&pfault_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init pfault_irq_init(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
|
||||
if (rc)
|
||||
goto out_extint;
|
||||
rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
|
||||
if (rc)
|
||||
goto out_pfault;
|
||||
irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
|
||||
cpuhp_setup_state_nocalls(CPUHP_S390_PFAULT_DEAD, "s390/pfault:dead",
|
||||
NULL, pfault_cpu_dead);
|
||||
return 0;
|
||||
|
||||
out_pfault:
|
||||
unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
|
||||
out_extint:
|
||||
pfault_disable = 1;
|
||||
return rc;
|
||||
}
|
||||
early_initcall(pfault_irq_init);
|
||||
|
||||
#endif /* CONFIG_PFAULT */
|
||||
|
||||
#if IS_ENABLED(CONFIG_PGSTE)
|
||||
|
||||
void do_secure_storage_access(struct pt_regs *regs)
|
||||
|
@ -86,11 +86,12 @@ size_t memcpy_real_iter(struct iov_iter *iter, unsigned long src, size_t count)
|
||||
void *chunk;
|
||||
pte_t pte;
|
||||
|
||||
BUILD_BUG_ON(MEMCPY_REAL_SIZE != PAGE_SIZE);
|
||||
while (count) {
|
||||
phys = src & PAGE_MASK;
|
||||
offset = src & ~PAGE_MASK;
|
||||
phys = src & MEMCPY_REAL_MASK;
|
||||
offset = src & ~MEMCPY_REAL_MASK;
|
||||
chunk = (void *)(__memcpy_real_area + offset);
|
||||
len = min(count, PAGE_SIZE - offset);
|
||||
len = min(count, MEMCPY_REAL_SIZE - offset);
|
||||
pte = mk_pte_phys(phys, PAGE_KERNEL_RO);
|
||||
|
||||
mutex_lock(&memcpy_real_mutex);
|
||||
|
248
arch/s390/mm/pfault.c
Normal file
248
arch/s390/mm/pfault.c
Normal file
@ -0,0 +1,248 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright IBM Corp. 1999, 2023
|
||||
*/
|
||||
|
||||
#include <linux/cpuhotplug.h>
|
||||
#include <linux/sched/task.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/irq.h>
|
||||
#include <asm/asm-extable.h>
|
||||
#include <asm/pfault.h>
|
||||
#include <asm/diag.h>
|
||||
|
||||
#define __SUBCODE_MASK 0x0600
|
||||
#define __PF_RES_FIELD 0x8000000000000000UL
|
||||
|
||||
/*
|
||||
* 'pfault' pseudo page faults routines.
|
||||
*/
|
||||
static int pfault_disable;
|
||||
|
||||
static int __init nopfault(char *str)
|
||||
{
|
||||
pfault_disable = 1;
|
||||
return 1;
|
||||
}
|
||||
early_param("nopfault", nopfault);
|
||||
|
||||
struct pfault_refbk {
|
||||
u16 refdiagc;
|
||||
u16 reffcode;
|
||||
u16 refdwlen;
|
||||
u16 refversn;
|
||||
u64 refgaddr;
|
||||
u64 refselmk;
|
||||
u64 refcmpmk;
|
||||
u64 reserved;
|
||||
};
|
||||
|
||||
static struct pfault_refbk pfault_init_refbk = {
|
||||
.refdiagc = 0x258,
|
||||
.reffcode = 0,
|
||||
.refdwlen = 5,
|
||||
.refversn = 2,
|
||||
.refgaddr = __LC_LPP,
|
||||
.refselmk = 1UL << 48,
|
||||
.refcmpmk = 1UL << 48,
|
||||
.reserved = __PF_RES_FIELD
|
||||
};
|
||||
|
||||
int __pfault_init(void)
|
||||
{
|
||||
int rc = -EOPNOTSUPP;
|
||||
|
||||
if (pfault_disable)
|
||||
return rc;
|
||||
diag_stat_inc(DIAG_STAT_X258);
|
||||
asm volatile(
|
||||
" diag %[refbk],%[rc],0x258\n"
|
||||
"0: nopr %%r7\n"
|
||||
EX_TABLE(0b, 0b)
|
||||
: [rc] "+d" (rc)
|
||||
: [refbk] "a" (&pfault_init_refbk), "m" (pfault_init_refbk)
|
||||
: "cc");
|
||||
return rc;
|
||||
}
|
||||
|
||||
static struct pfault_refbk pfault_fini_refbk = {
|
||||
.refdiagc = 0x258,
|
||||
.reffcode = 1,
|
||||
.refdwlen = 5,
|
||||
.refversn = 2,
|
||||
};
|
||||
|
||||
void __pfault_fini(void)
|
||||
{
|
||||
if (pfault_disable)
|
||||
return;
|
||||
diag_stat_inc(DIAG_STAT_X258);
|
||||
asm volatile(
|
||||
" diag %[refbk],0,0x258\n"
|
||||
"0: nopr %%r7\n"
|
||||
EX_TABLE(0b, 0b)
|
||||
:
|
||||
: [refbk] "a" (&pfault_fini_refbk), "m" (pfault_fini_refbk)
|
||||
: "cc");
|
||||
}
|
||||
|
||||
static DEFINE_SPINLOCK(pfault_lock);
|
||||
static LIST_HEAD(pfault_list);
|
||||
|
||||
#define PF_COMPLETE 0x0080
|
||||
|
||||
/*
|
||||
* The mechanism of our pfault code: if Linux is running as guest, runs a user
|
||||
* space process and the user space process accesses a page that the host has
|
||||
* paged out we get a pfault interrupt.
|
||||
*
|
||||
* This allows us, within the guest, to schedule a different process. Without
|
||||
* this mechanism the host would have to suspend the whole virtual cpu until
|
||||
* the page has been paged in.
|
||||
*
|
||||
* So when we get such an interrupt then we set the state of the current task
|
||||
* to uninterruptible and also set the need_resched flag. Both happens within
|
||||
* interrupt context(!). If we later on want to return to user space we
|
||||
* recognize the need_resched flag and then call schedule(). It's not very
|
||||
* obvious how this works...
|
||||
*
|
||||
* Of course we have a lot of additional fun with the completion interrupt (->
|
||||
* host signals that a page of a process has been paged in and the process can
|
||||
* continue to run). This interrupt can arrive on any cpu and, since we have
|
||||
* virtual cpus, actually appear before the interrupt that signals that a page
|
||||
* is missing.
|
||||
*/
|
||||
static void pfault_interrupt(struct ext_code ext_code,
|
||||
unsigned int param32, unsigned long param64)
|
||||
{
|
||||
struct task_struct *tsk;
|
||||
__u16 subcode;
|
||||
pid_t pid;
|
||||
|
||||
/*
|
||||
* Get the external interruption subcode & pfault initial/completion
|
||||
* signal bit. VM stores this in the 'cpu address' field associated
|
||||
* with the external interrupt.
|
||||
*/
|
||||
subcode = ext_code.subcode;
|
||||
if ((subcode & 0xff00) != __SUBCODE_MASK)
|
||||
return;
|
||||
inc_irq_stat(IRQEXT_PFL);
|
||||
/* Get the token (= pid of the affected task). */
|
||||
pid = param64 & LPP_PID_MASK;
|
||||
rcu_read_lock();
|
||||
tsk = find_task_by_pid_ns(pid, &init_pid_ns);
|
||||
if (tsk)
|
||||
get_task_struct(tsk);
|
||||
rcu_read_unlock();
|
||||
if (!tsk)
|
||||
return;
|
||||
spin_lock(&pfault_lock);
|
||||
if (subcode & PF_COMPLETE) {
|
||||
/* signal bit is set -> a page has been swapped in by VM */
|
||||
if (tsk->thread.pfault_wait == 1) {
|
||||
/*
|
||||
* Initial interrupt was faster than the completion
|
||||
* interrupt. pfault_wait is valid. Set pfault_wait
|
||||
* back to zero and wake up the process. This can
|
||||
* safely be done because the task is still sleeping
|
||||
* and can't produce new pfaults.
|
||||
*/
|
||||
tsk->thread.pfault_wait = 0;
|
||||
list_del(&tsk->thread.list);
|
||||
wake_up_process(tsk);
|
||||
put_task_struct(tsk);
|
||||
} else {
|
||||
/*
|
||||
* Completion interrupt was faster than initial
|
||||
* interrupt. Set pfault_wait to -1 so the initial
|
||||
* interrupt doesn't put the task to sleep.
|
||||
* If the task is not running, ignore the completion
|
||||
* interrupt since it must be a leftover of a PFAULT
|
||||
* CANCEL operation which didn't remove all pending
|
||||
* completion interrupts.
|
||||
*/
|
||||
if (task_is_running(tsk))
|
||||
tsk->thread.pfault_wait = -1;
|
||||
}
|
||||
} else {
|
||||
/* signal bit not set -> a real page is missing. */
|
||||
if (WARN_ON_ONCE(tsk != current))
|
||||
goto out;
|
||||
if (tsk->thread.pfault_wait == 1) {
|
||||
/* Already on the list with a reference: put to sleep */
|
||||
goto block;
|
||||
} else if (tsk->thread.pfault_wait == -1) {
|
||||
/*
|
||||
* Completion interrupt was faster than the initial
|
||||
* interrupt (pfault_wait == -1). Set pfault_wait
|
||||
* back to zero and exit.
|
||||
*/
|
||||
tsk->thread.pfault_wait = 0;
|
||||
} else {
|
||||
/*
|
||||
* Initial interrupt arrived before completion
|
||||
* interrupt. Let the task sleep.
|
||||
* An extra task reference is needed since a different
|
||||
* cpu may set the task state to TASK_RUNNING again
|
||||
* before the scheduler is reached.
|
||||
*/
|
||||
get_task_struct(tsk);
|
||||
tsk->thread.pfault_wait = 1;
|
||||
list_add(&tsk->thread.list, &pfault_list);
|
||||
block:
|
||||
/*
|
||||
* Since this must be a userspace fault, there
|
||||
* is no kernel task state to trample. Rely on the
|
||||
* return to userspace schedule() to block.
|
||||
*/
|
||||
__set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
set_tsk_need_resched(tsk);
|
||||
set_preempt_need_resched();
|
||||
}
|
||||
}
|
||||
out:
|
||||
spin_unlock(&pfault_lock);
|
||||
put_task_struct(tsk);
|
||||
}
|
||||
|
||||
static int pfault_cpu_dead(unsigned int cpu)
|
||||
{
|
||||
struct thread_struct *thread, *next;
|
||||
struct task_struct *tsk;
|
||||
|
||||
spin_lock_irq(&pfault_lock);
|
||||
list_for_each_entry_safe(thread, next, &pfault_list, list) {
|
||||
thread->pfault_wait = 0;
|
||||
list_del(&thread->list);
|
||||
tsk = container_of(thread, struct task_struct, thread);
|
||||
wake_up_process(tsk);
|
||||
put_task_struct(tsk);
|
||||
}
|
||||
spin_unlock_irq(&pfault_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init pfault_irq_init(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
|
||||
if (rc)
|
||||
goto out_extint;
|
||||
rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
|
||||
if (rc)
|
||||
goto out_pfault;
|
||||
irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
|
||||
cpuhp_setup_state_nocalls(CPUHP_S390_PFAULT_DEAD, "s390/pfault:dead",
|
||||
NULL, pfault_cpu_dead);
|
||||
return 0;
|
||||
|
||||
out_pfault:
|
||||
unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
|
||||
out_extint:
|
||||
pfault_disable = 1;
|
||||
return rc;
|
||||
}
|
||||
early_initcall(pfault_irq_init);
|
@ -36,7 +36,7 @@ static void vmem_free_pages(unsigned long addr, int order)
|
||||
{
|
||||
/* We don't expect boot memory to be removed ever. */
|
||||
if (!slab_is_available() ||
|
||||
WARN_ON_ONCE(PageReserved(virt_to_page(addr))))
|
||||
WARN_ON_ONCE(PageReserved(virt_to_page((void *)addr))))
|
||||
return;
|
||||
free_pages(addr, order);
|
||||
}
|
||||
@ -531,7 +531,7 @@ struct range arch_get_mappable_range(void)
|
||||
struct range mhp_range;
|
||||
|
||||
mhp_range.start = 0;
|
||||
mhp_range.end = VMEM_MAX_PHYS - 1;
|
||||
mhp_range.end = max_mappable - 1;
|
||||
return mhp_range;
|
||||
}
|
||||
|
||||
|
@ -666,9 +666,4 @@ static struct miscdevice clp_misc_device = {
|
||||
.fops = &clp_misc_fops,
|
||||
};
|
||||
|
||||
static int __init clp_misc_init(void)
|
||||
{
|
||||
return misc_register(&clp_misc_device);
|
||||
}
|
||||
|
||||
device_initcall(clp_misc_init);
|
||||
builtin_misc_device(clp_misc_device);
|
||||
|
@ -195,3 +195,5 @@ obj-$(CONFIG_PECI) += peci/
|
||||
obj-$(CONFIG_HTE) += hte/
|
||||
obj-$(CONFIG_DRM_ACCEL) += accel/
|
||||
obj-$(CONFIG_CDX_BUS) += cdx/
|
||||
|
||||
obj-$(CONFIG_S390) += s390/
|
||||
|
@ -70,10 +70,9 @@ config ZCRYPT
|
||||
select HW_RANDOM
|
||||
help
|
||||
Select this option if you want to enable support for
|
||||
s390 cryptographic adapters like:
|
||||
+ Crypto Express 2 up to 7 Coprocessor (CEXxC)
|
||||
+ Crypto Express 2 up to 7 Accelerator (CEXxA)
|
||||
+ Crypto Express 4 up to 7 EP11 Coprocessor (CEXxP)
|
||||
s390 cryptographic adapters like Crypto Express 4 up
|
||||
to 8 in Coprocessor (CEXxC), EP11 Coprocessor (CEXxP)
|
||||
or Accelerator (CEXxA) mode.
|
||||
|
||||
config ZCRYPT_DEBUG
|
||||
bool "Enable debug features for s390 cryptographic adapters"
|
||||
|
@ -412,6 +412,7 @@ removeseg:
|
||||
}
|
||||
list_del(&dev_info->lh);
|
||||
|
||||
dax_remove_host(dev_info->gd);
|
||||
kill_dax(dev_info->dax_dev);
|
||||
put_dax(dev_info->dax_dev);
|
||||
del_gendisk(dev_info->gd);
|
||||
@ -707,9 +708,9 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
|
||||
goto out;
|
||||
|
||||
out_dax_host:
|
||||
put_device(&dev_info->dev);
|
||||
dax_remove_host(dev_info->gd);
|
||||
out_dax:
|
||||
put_device(&dev_info->dev);
|
||||
kill_dax(dev_info->dax_dev);
|
||||
put_dax(dev_info->dax_dev);
|
||||
put_dev:
|
||||
@ -789,6 +790,7 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch
|
||||
}
|
||||
|
||||
list_del(&dev_info->lh);
|
||||
dax_remove_host(dev_info->gd);
|
||||
kill_dax(dev_info->dax_dev);
|
||||
put_dax(dev_info->dax_dev);
|
||||
del_gendisk(dev_info->gd);
|
||||
@ -860,7 +862,7 @@ dcssblk_submit_bio(struct bio *bio)
|
||||
struct bio_vec bvec;
|
||||
struct bvec_iter iter;
|
||||
unsigned long index;
|
||||
unsigned long page_addr;
|
||||
void *page_addr;
|
||||
unsigned long source_addr;
|
||||
unsigned long bytes_done;
|
||||
|
||||
@ -868,8 +870,8 @@ dcssblk_submit_bio(struct bio *bio)
|
||||
dev_info = bio->bi_bdev->bd_disk->private_data;
|
||||
if (dev_info == NULL)
|
||||
goto fail;
|
||||
if ((bio->bi_iter.bi_sector & 7) != 0 ||
|
||||
(bio->bi_iter.bi_size & 4095) != 0)
|
||||
if (!IS_ALIGNED(bio->bi_iter.bi_sector, 8) ||
|
||||
!IS_ALIGNED(bio->bi_iter.bi_size, PAGE_SIZE))
|
||||
/* Request is not page-aligned. */
|
||||
goto fail;
|
||||
/* verify data transfer direction */
|
||||
@ -889,18 +891,16 @@ dcssblk_submit_bio(struct bio *bio)
|
||||
|
||||
index = (bio->bi_iter.bi_sector >> 3);
|
||||
bio_for_each_segment(bvec, bio, iter) {
|
||||
page_addr = (unsigned long)bvec_virt(&bvec);
|
||||
page_addr = bvec_virt(&bvec);
|
||||
source_addr = dev_info->start + (index<<12) + bytes_done;
|
||||
if (unlikely((page_addr & 4095) != 0) || (bvec.bv_len & 4095) != 0)
|
||||
if (unlikely(!IS_ALIGNED((unsigned long)page_addr, PAGE_SIZE) ||
|
||||
!IS_ALIGNED(bvec.bv_len, PAGE_SIZE)))
|
||||
// More paranoia.
|
||||
goto fail;
|
||||
if (bio_data_dir(bio) == READ) {
|
||||
memcpy((void*)page_addr, (void*)source_addr,
|
||||
bvec.bv_len);
|
||||
} else {
|
||||
memcpy((void*)source_addr, (void*)page_addr,
|
||||
bvec.bv_len);
|
||||
}
|
||||
if (bio_data_dir(bio) == READ)
|
||||
memcpy(page_addr, __va(source_addr), bvec.bv_len);
|
||||
else
|
||||
memcpy(__va(source_addr), page_addr, bvec.bv_len);
|
||||
bytes_done += bvec.bv_len;
|
||||
}
|
||||
bio_endio(bio);
|
||||
|
@ -134,7 +134,7 @@ static void scm_request_done(struct scm_request *scmrq)
|
||||
|
||||
if ((msb->flags & MSB_FLAG_IDA) && aidaw &&
|
||||
IS_ALIGNED(aidaw, PAGE_SIZE))
|
||||
mempool_free(virt_to_page(aidaw), aidaw_pool);
|
||||
mempool_free(virt_to_page((void *)aidaw), aidaw_pool);
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&list_lock, flags);
|
||||
|
@ -392,10 +392,6 @@ static void __init add_memory_merged(u16 rn)
|
||||
goto skip_add;
|
||||
start = rn2addr(first_rn);
|
||||
size = (unsigned long long) num * sclp.rzm;
|
||||
if (start >= VMEM_MAX_PHYS)
|
||||
goto skip_add;
|
||||
if (start + size > VMEM_MAX_PHYS)
|
||||
size = VMEM_MAX_PHYS - start;
|
||||
if (start >= ident_map_size)
|
||||
goto skip_add;
|
||||
if (start + size > ident_map_size)
|
||||
|
@ -55,6 +55,7 @@ static void __init sclp_early_facilities_detect(void)
|
||||
S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_GUEST;
|
||||
if (sccb->cpuoff > 134) {
|
||||
sclp.has_diag318 = !!(sccb->byte_134 & 0x80);
|
||||
sclp.has_diag320 = !!(sccb->byte_134 & 0x04);
|
||||
sclp.has_iplcc = !!(sccb->byte_134 & 0x02);
|
||||
}
|
||||
if (sccb->cpuoff > 137) {
|
||||
|
@ -89,7 +89,7 @@ static void vmcp_response_free(struct vmcp_session *session)
|
||||
order = get_order(session->bufsize);
|
||||
nr_pages = ALIGN(session->bufsize, PAGE_SIZE) >> PAGE_SHIFT;
|
||||
if (session->cma_alloc) {
|
||||
page = virt_to_page((unsigned long)session->response);
|
||||
page = virt_to_page(session->response);
|
||||
cma_release(vmcp_cma, page, nr_pages);
|
||||
session->cma_alloc = 0;
|
||||
} else {
|
||||
|
@ -3,7 +3,7 @@
|
||||
* zcore module to export memory content and register sets for creating system
|
||||
* dumps on SCSI/NVMe disks (zfcp/nvme dump).
|
||||
*
|
||||
* For more information please refer to Documentation/s390/zfcpdump.rst
|
||||
* For more information please refer to Documentation/arch/s390/zfcpdump.rst
|
||||
*
|
||||
* Copyright IBM Corp. 2003, 2008
|
||||
* Author(s): Michael Holzheu
|
||||
|
@ -11,7 +11,7 @@ zcrypt-objs += zcrypt_msgtype6.o zcrypt_msgtype50.o
|
||||
zcrypt-objs += zcrypt_ccamisc.o zcrypt_ep11misc.o
|
||||
obj-$(CONFIG_ZCRYPT) += zcrypt.o
|
||||
# adapter drivers depend on ap.o and zcrypt.o
|
||||
obj-$(CONFIG_ZCRYPT) += zcrypt_cex2c.o zcrypt_cex2a.o zcrypt_cex4.o
|
||||
obj-$(CONFIG_ZCRYPT) += zcrypt_cex4.o
|
||||
|
||||
# pkey kernel module
|
||||
pkey-objs := pkey_api.o
|
||||
|
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/*
|
||||
* Copyright IBM Corp. 2006, 2021
|
||||
* Copyright IBM Corp. 2006, 2023
|
||||
* Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
|
||||
* Martin Schwidefsky <schwidefsky@de.ibm.com>
|
||||
* Ralph Wuerthner <rwuerthn@de.ibm.com>
|
||||
@ -218,6 +218,15 @@ int ap_sb_available(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* ap_is_se_guest(): Check for SE guest with AP pass-through support.
|
||||
*/
|
||||
bool ap_is_se_guest(void)
|
||||
{
|
||||
return is_prot_virt_guest() && ap_sb_available();
|
||||
}
|
||||
EXPORT_SYMBOL(ap_is_se_guest);
|
||||
|
||||
/*
|
||||
* ap_fetch_qci_info(): Fetch cryptographic config info
|
||||
*
|
||||
@ -387,23 +396,6 @@ static int ap_queue_info(ap_qid_t qid, int *q_type, unsigned int *q_fac,
|
||||
*q_ml = tapq_info.ml;
|
||||
*q_decfg = status.response_code == AP_RESPONSE_DECONFIGURED;
|
||||
*q_cstop = status.response_code == AP_RESPONSE_CHECKSTOPPED;
|
||||
switch (*q_type) {
|
||||
/* For CEX2 and CEX3 the available functions
|
||||
* are not reflected by the facilities bits.
|
||||
* Instead it is coded into the type. So here
|
||||
* modify the function bits based on the type.
|
||||
*/
|
||||
case AP_DEVICE_TYPE_CEX2A:
|
||||
case AP_DEVICE_TYPE_CEX3A:
|
||||
*q_fac |= 0x08000000;
|
||||
break;
|
||||
case AP_DEVICE_TYPE_CEX2C:
|
||||
case AP_DEVICE_TYPE_CEX3C:
|
||||
*q_fac |= 0x10000000;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return 1;
|
||||
default:
|
||||
/*
|
||||
@ -1678,8 +1670,8 @@ static int ap_get_compatible_type(ap_qid_t qid, int rawtype, unsigned int func)
|
||||
{
|
||||
int comp_type = 0;
|
||||
|
||||
/* < CEX2A is not supported */
|
||||
if (rawtype < AP_DEVICE_TYPE_CEX2A) {
|
||||
/* < CEX4 is not supported */
|
||||
if (rawtype < AP_DEVICE_TYPE_CEX4) {
|
||||
AP_DBF_WARN("%s queue=%02x.%04x unsupported type %d\n",
|
||||
__func__, AP_QID_CARD(qid),
|
||||
AP_QID_QUEUE(qid), rawtype);
|
||||
@ -1701,7 +1693,7 @@ static int ap_get_compatible_type(ap_qid_t qid, int rawtype, unsigned int func)
|
||||
apinfo.cat = AP_DEVICE_TYPE_CEX8;
|
||||
status = ap_qact(qid, 0, &apinfo);
|
||||
if (status.response_code == AP_RESPONSE_NORMAL &&
|
||||
apinfo.cat >= AP_DEVICE_TYPE_CEX2A &&
|
||||
apinfo.cat >= AP_DEVICE_TYPE_CEX4 &&
|
||||
apinfo.cat <= AP_DEVICE_TYPE_CEX8)
|
||||
comp_type = apinfo.cat;
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0+ */
|
||||
/*
|
||||
* Copyright IBM Corp. 2006, 2019
|
||||
* Copyright IBM Corp. 2006, 2023
|
||||
* Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
|
||||
* Martin Schwidefsky <schwidefsky@de.ibm.com>
|
||||
* Ralph Wuerthner <rwuerthn@de.ibm.com>
|
||||
@ -67,15 +67,8 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr)
|
||||
#define AP_RESPONSE_INVALID_DOMAIN 0x42
|
||||
|
||||
/*
|
||||
* Known device types
|
||||
* Supported AP device types
|
||||
*/
|
||||
#define AP_DEVICE_TYPE_PCICC 3
|
||||
#define AP_DEVICE_TYPE_PCICA 4
|
||||
#define AP_DEVICE_TYPE_PCIXCC 5
|
||||
#define AP_DEVICE_TYPE_CEX2A 6
|
||||
#define AP_DEVICE_TYPE_CEX2C 7
|
||||
#define AP_DEVICE_TYPE_CEX3A 8
|
||||
#define AP_DEVICE_TYPE_CEX3C 9
|
||||
#define AP_DEVICE_TYPE_CEX4 10
|
||||
#define AP_DEVICE_TYPE_CEX5 11
|
||||
#define AP_DEVICE_TYPE_CEX6 12
|
||||
@ -272,14 +265,6 @@ static inline void ap_release_message(struct ap_message *ap_msg)
|
||||
kfree_sensitive(ap_msg->private);
|
||||
}
|
||||
|
||||
/*
|
||||
* Note: don't use ap_send/ap_recv after using ap_queue_message
|
||||
* for the first time. Otherwise the ap message queue will get
|
||||
* confused.
|
||||
*/
|
||||
int ap_send(ap_qid_t qid, unsigned long psmid, void *msg, size_t msglen);
|
||||
int ap_recv(ap_qid_t qid, unsigned long *psmid, void *msg, size_t msglen);
|
||||
|
||||
enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event);
|
||||
enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event);
|
||||
|
||||
@ -289,6 +274,7 @@ void ap_flush_queue(struct ap_queue *aq);
|
||||
|
||||
void *ap_airq_ptr(void);
|
||||
int ap_sb_available(void);
|
||||
bool ap_is_se_guest(void);
|
||||
void ap_wait(enum ap_sm_wait wait);
|
||||
void ap_request_timeout(struct timer_list *t);
|
||||
void ap_bus_force_rescan(void);
|
||||
|
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright IBM Corp. 2016
|
||||
* Copyright IBM Corp. 2016, 2023
|
||||
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
|
||||
*
|
||||
* Adjunct processor bus, queue related code.
|
||||
@ -93,51 +93,6 @@ __ap_send(ap_qid_t qid, unsigned long psmid, void *msg, size_t msglen,
|
||||
return ap_nqap(qid, psmid, msg, msglen);
|
||||
}
|
||||
|
||||
int ap_send(ap_qid_t qid, unsigned long psmid, void *msg, size_t msglen)
|
||||
{
|
||||
struct ap_queue_status status;
|
||||
|
||||
status = __ap_send(qid, psmid, msg, msglen, 0);
|
||||
if (status.async)
|
||||
return -EPERM;
|
||||
switch (status.response_code) {
|
||||
case AP_RESPONSE_NORMAL:
|
||||
return 0;
|
||||
case AP_RESPONSE_Q_FULL:
|
||||
case AP_RESPONSE_RESET_IN_PROGRESS:
|
||||
return -EBUSY;
|
||||
case AP_RESPONSE_REQ_FAC_NOT_INST:
|
||||
return -EINVAL;
|
||||
default: /* Device is gone. */
|
||||
return -ENODEV;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(ap_send);
|
||||
|
||||
int ap_recv(ap_qid_t qid, unsigned long *psmid, void *msg, size_t msglen)
|
||||
{
|
||||
struct ap_queue_status status;
|
||||
|
||||
if (!msg)
|
||||
return -EINVAL;
|
||||
status = ap_dqap(qid, psmid, msg, msglen, NULL, NULL, NULL);
|
||||
if (status.async)
|
||||
return -EPERM;
|
||||
switch (status.response_code) {
|
||||
case AP_RESPONSE_NORMAL:
|
||||
return 0;
|
||||
case AP_RESPONSE_NO_PENDING_REPLY:
|
||||
if (status.queue_empty)
|
||||
return -ENOENT;
|
||||
return -EBUSY;
|
||||
case AP_RESPONSE_RESET_IN_PROGRESS:
|
||||
return -EBUSY;
|
||||
default:
|
||||
return -ENODEV;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(ap_recv);
|
||||
|
||||
/* State machine definitions and helpers */
|
||||
|
||||
static enum ap_sm_wait ap_sm_nop(struct ap_queue *aq)
|
||||
|
@ -263,7 +263,9 @@ static int pkey_clr2ep11key(const u8 *clrkey, size_t clrkeylen,
|
||||
|
||||
/* build a list of apqns suitable for ep11 keys with cpacf support */
|
||||
rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
|
||||
ZCRYPT_CEX7, EP11_API_V, NULL);
|
||||
ZCRYPT_CEX7,
|
||||
ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4,
|
||||
NULL);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
@ -272,7 +274,8 @@ static int pkey_clr2ep11key(const u8 *clrkey, size_t clrkeylen,
|
||||
card = apqns[i] >> 16;
|
||||
dom = apqns[i] & 0xFFFF;
|
||||
rc = ep11_clr2keyblob(card, dom, clrkeylen * 8,
|
||||
0, clrkey, keybuf, keybuflen);
|
||||
0, clrkey, keybuf, keybuflen,
|
||||
PKEY_TYPE_EP11);
|
||||
if (rc == 0)
|
||||
break;
|
||||
}
|
||||
@ -287,10 +290,9 @@ out:
|
||||
/*
|
||||
* Find card and transform EP11 secure key into protected key.
|
||||
*/
|
||||
static int pkey_ep11key2pkey(const u8 *key, u8 *protkey,
|
||||
u32 *protkeylen, u32 *protkeytype)
|
||||
static int pkey_ep11key2pkey(const u8 *key, size_t keylen,
|
||||
u8 *protkey, u32 *protkeylen, u32 *protkeytype)
|
||||
{
|
||||
struct ep11keyblob *kb = (struct ep11keyblob *)key;
|
||||
u32 nr_apqns, *apqns = NULL;
|
||||
u16 card, dom;
|
||||
int i, rc;
|
||||
@ -299,7 +301,9 @@ static int pkey_ep11key2pkey(const u8 *key, u8 *protkey,
|
||||
|
||||
/* build a list of apqns suitable for this key */
|
||||
rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
|
||||
ZCRYPT_CEX7, EP11_API_V, kb->wkvp);
|
||||
ZCRYPT_CEX7,
|
||||
ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4,
|
||||
ep11_kb_wkvp(key, keylen));
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
@ -307,7 +311,7 @@ static int pkey_ep11key2pkey(const u8 *key, u8 *protkey,
|
||||
for (rc = -ENODEV, i = 0; i < nr_apqns; i++) {
|
||||
card = apqns[i] >> 16;
|
||||
dom = apqns[i] & 0xFFFF;
|
||||
rc = ep11_kblob2protkey(card, dom, key, kb->head.len,
|
||||
rc = ep11_kblob2protkey(card, dom, key, keylen,
|
||||
protkey, protkeylen, protkeytype);
|
||||
if (rc == 0)
|
||||
break;
|
||||
@ -495,7 +499,7 @@ try_via_ep11:
|
||||
tmpbuf, &tmpbuflen);
|
||||
if (rc)
|
||||
goto failure;
|
||||
rc = pkey_ep11key2pkey(tmpbuf,
|
||||
rc = pkey_ep11key2pkey(tmpbuf, tmpbuflen,
|
||||
protkey, protkeylen, protkeytype);
|
||||
if (!rc)
|
||||
goto out;
|
||||
@ -611,7 +615,7 @@ static int pkey_nonccatok2pkey(const u8 *key, u32 keylen,
|
||||
rc = ep11_check_aes_key(debug_info, 3, key, keylen, 1);
|
||||
if (rc)
|
||||
goto out;
|
||||
rc = pkey_ep11key2pkey(key,
|
||||
rc = pkey_ep11key2pkey(key, keylen,
|
||||
protkey, protkeylen, protkeytype);
|
||||
break;
|
||||
}
|
||||
@ -620,7 +624,7 @@ static int pkey_nonccatok2pkey(const u8 *key, u32 keylen,
|
||||
rc = ep11_check_aes_key_with_hdr(debug_info, 3, key, keylen, 1);
|
||||
if (rc)
|
||||
goto out;
|
||||
rc = pkey_ep11key2pkey(key + sizeof(struct ep11kblob_header),
|
||||
rc = pkey_ep11key2pkey(key, keylen,
|
||||
protkey, protkeylen, protkeytype);
|
||||
break;
|
||||
default:
|
||||
@ -713,6 +717,11 @@ static int pkey_genseckey2(const struct pkey_apqn *apqns, size_t nr_apqns,
|
||||
if (*keybufsize < MINEP11AESKEYBLOBSIZE)
|
||||
return -EINVAL;
|
||||
break;
|
||||
case PKEY_TYPE_EP11_AES:
|
||||
if (*keybufsize < (sizeof(struct ep11kblob_header) +
|
||||
MINEP11AESKEYBLOBSIZE))
|
||||
return -EINVAL;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -729,9 +738,10 @@ static int pkey_genseckey2(const struct pkey_apqn *apqns, size_t nr_apqns,
|
||||
for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
|
||||
card = apqns[i].card;
|
||||
dom = apqns[i].domain;
|
||||
if (ktype == PKEY_TYPE_EP11) {
|
||||
if (ktype == PKEY_TYPE_EP11 ||
|
||||
ktype == PKEY_TYPE_EP11_AES) {
|
||||
rc = ep11_genaeskey(card, dom, ksize, kflags,
|
||||
keybuf, keybufsize);
|
||||
keybuf, keybufsize, ktype);
|
||||
} else if (ktype == PKEY_TYPE_CCA_DATA) {
|
||||
rc = cca_genseckey(card, dom, ksize, keybuf);
|
||||
*keybufsize = (rc ? 0 : SECKEYBLOBSIZE);
|
||||
@ -769,6 +779,11 @@ static int pkey_clr2seckey2(const struct pkey_apqn *apqns, size_t nr_apqns,
|
||||
if (*keybufsize < MINEP11AESKEYBLOBSIZE)
|
||||
return -EINVAL;
|
||||
break;
|
||||
case PKEY_TYPE_EP11_AES:
|
||||
if (*keybufsize < (sizeof(struct ep11kblob_header) +
|
||||
MINEP11AESKEYBLOBSIZE))
|
||||
return -EINVAL;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -787,9 +802,11 @@ static int pkey_clr2seckey2(const struct pkey_apqn *apqns, size_t nr_apqns,
|
||||
for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
|
||||
card = apqns[i].card;
|
||||
dom = apqns[i].domain;
|
||||
if (ktype == PKEY_TYPE_EP11) {
|
||||
if (ktype == PKEY_TYPE_EP11 ||
|
||||
ktype == PKEY_TYPE_EP11_AES) {
|
||||
rc = ep11_clr2keyblob(card, dom, ksize, kflags,
|
||||
clrkey, keybuf, keybufsize);
|
||||
clrkey, keybuf, keybufsize,
|
||||
ktype);
|
||||
} else if (ktype == PKEY_TYPE_CCA_DATA) {
|
||||
rc = cca_clr2seckey(card, dom, ksize,
|
||||
clrkey, keybuf);
|
||||
@ -888,6 +905,7 @@ static int pkey_verifykey2(const u8 *key, size_t keylen,
|
||||
} else if (hdr->type == TOKTYPE_NON_CCA &&
|
||||
hdr->version == TOKVER_EP11_AES) {
|
||||
struct ep11keyblob *kb = (struct ep11keyblob *)key;
|
||||
int api;
|
||||
|
||||
rc = ep11_check_aes_key(debug_info, 3, key, keylen, 1);
|
||||
if (rc)
|
||||
@ -895,10 +913,12 @@ static int pkey_verifykey2(const u8 *key, size_t keylen,
|
||||
if (ktype)
|
||||
*ktype = PKEY_TYPE_EP11;
|
||||
if (ksize)
|
||||
*ksize = kb->head.keybitlen;
|
||||
*ksize = kb->head.bitlen;
|
||||
|
||||
api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
|
||||
rc = ep11_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain,
|
||||
ZCRYPT_CEX7, EP11_API_V, kb->wkvp);
|
||||
ZCRYPT_CEX7, api,
|
||||
ep11_kb_wkvp(key, keylen));
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
@ -908,6 +928,32 @@ static int pkey_verifykey2(const u8 *key, size_t keylen,
|
||||
*cardnr = ((struct pkey_apqn *)_apqns)->card;
|
||||
*domain = ((struct pkey_apqn *)_apqns)->domain;
|
||||
|
||||
} else if (hdr->type == TOKTYPE_NON_CCA &&
|
||||
hdr->version == TOKVER_EP11_AES_WITH_HEADER) {
|
||||
struct ep11kblob_header *kh = (struct ep11kblob_header *)key;
|
||||
int api;
|
||||
|
||||
rc = ep11_check_aes_key_with_hdr(debug_info, 3,
|
||||
key, keylen, 1);
|
||||
if (rc)
|
||||
goto out;
|
||||
if (ktype)
|
||||
*ktype = PKEY_TYPE_EP11_AES;
|
||||
if (ksize)
|
||||
*ksize = kh->bitlen;
|
||||
|
||||
api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
|
||||
rc = ep11_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain,
|
||||
ZCRYPT_CEX7, api,
|
||||
ep11_kb_wkvp(key, keylen));
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
if (flags)
|
||||
*flags = PKEY_FLAGS_MATCH_CUR_MKVP;
|
||||
|
||||
*cardnr = ((struct pkey_apqn *)_apqns)->card;
|
||||
*domain = ((struct pkey_apqn *)_apqns)->domain;
|
||||
} else {
|
||||
rc = -EINVAL;
|
||||
}
|
||||
@ -949,10 +995,12 @@ static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns,
|
||||
}
|
||||
} else if (hdr->type == TOKTYPE_NON_CCA) {
|
||||
if (hdr->version == TOKVER_EP11_AES) {
|
||||
if (keylen < sizeof(struct ep11keyblob))
|
||||
return -EINVAL;
|
||||
if (ep11_check_aes_key(debug_info, 3, key, keylen, 1))
|
||||
return -EINVAL;
|
||||
} else if (hdr->version == TOKVER_EP11_AES_WITH_HEADER) {
|
||||
if (ep11_check_aes_key_with_hdr(debug_info, 3,
|
||||
key, keylen, 1))
|
||||
return -EINVAL;
|
||||
} else {
|
||||
return pkey_nonccatok2pkey(key, keylen,
|
||||
protkey, protkeylen,
|
||||
@ -980,10 +1028,7 @@ static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns,
|
||||
protkey, protkeylen,
|
||||
protkeytype);
|
||||
} else {
|
||||
/* EP11 AES secure key blob */
|
||||
struct ep11keyblob *kb = (struct ep11keyblob *)key;
|
||||
|
||||
rc = ep11_kblob2protkey(card, dom, key, kb->head.len,
|
||||
rc = ep11_kblob2protkey(card, dom, key, keylen,
|
||||
protkey, protkeylen,
|
||||
protkeytype);
|
||||
}
|
||||
@ -1018,7 +1063,7 @@ static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags,
|
||||
return -EINVAL;
|
||||
if (kb->attr & EP11_BLOB_PKEY_EXTRACTABLE) {
|
||||
minhwtype = ZCRYPT_CEX7;
|
||||
api = EP11_API_V;
|
||||
api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
|
||||
}
|
||||
rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
|
||||
minhwtype, api, kb->wkvp);
|
||||
@ -1034,7 +1079,7 @@ static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags,
|
||||
return -EINVAL;
|
||||
if (kb->attr & EP11_BLOB_PKEY_EXTRACTABLE) {
|
||||
minhwtype = ZCRYPT_CEX7;
|
||||
api = EP11_API_V;
|
||||
api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
|
||||
}
|
||||
rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
|
||||
minhwtype, api, kb->wkvp);
|
||||
@ -1144,11 +1189,13 @@ static int pkey_apqns4keytype(enum pkey_key_type ktype,
|
||||
ktype == PKEY_TYPE_EP11_AES ||
|
||||
ktype == PKEY_TYPE_EP11_ECC) {
|
||||
u8 *wkvp = NULL;
|
||||
int api;
|
||||
|
||||
if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
|
||||
wkvp = cur_mkvp;
|
||||
api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
|
||||
rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
|
||||
ZCRYPT_CEX7, EP11_API_V, wkvp);
|
||||
ZCRYPT_CEX7, api, wkvp);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
@ -1243,12 +1290,14 @@ static int pkey_keyblob2pkey3(const struct pkey_apqn *apqns, size_t nr_apqns,
|
||||
hdr->version == TOKVER_EP11_ECC_WITH_HEADER) &&
|
||||
is_ep11_keyblob(key + sizeof(struct ep11kblob_header)))
|
||||
rc = ep11_kblob2protkey(card, dom, key, hdr->len,
|
||||
protkey, protkeylen, protkeytype);
|
||||
protkey, protkeylen,
|
||||
protkeytype);
|
||||
else if (hdr->type == TOKTYPE_NON_CCA &&
|
||||
hdr->version == TOKVER_EP11_AES &&
|
||||
is_ep11_keyblob(key))
|
||||
rc = ep11_kblob2protkey(card, dom, key, hdr->len,
|
||||
protkey, protkeylen, protkeytype);
|
||||
protkey, protkeylen,
|
||||
protkeytype);
|
||||
else if (hdr->type == TOKTYPE_CCA_INTERNAL &&
|
||||
hdr->version == TOKVER_CCA_AES)
|
||||
rc = cca_sec2protkey(card, dom, key, protkey,
|
||||
@ -1466,7 +1515,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
|
||||
apqns = _copy_apqns_from_user(kgs.apqns, kgs.apqn_entries);
|
||||
if (IS_ERR(apqns))
|
||||
return PTR_ERR(apqns);
|
||||
kkey = kmalloc(klen, GFP_KERNEL);
|
||||
kkey = kzalloc(klen, GFP_KERNEL);
|
||||
if (!kkey) {
|
||||
kfree(apqns);
|
||||
return -ENOMEM;
|
||||
@ -1508,7 +1557,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
|
||||
apqns = _copy_apqns_from_user(kcs.apqns, kcs.apqn_entries);
|
||||
if (IS_ERR(apqns))
|
||||
return PTR_ERR(apqns);
|
||||
kkey = kmalloc(klen, GFP_KERNEL);
|
||||
kkey = kzalloc(klen, GFP_KERNEL);
|
||||
if (!kkey) {
|
||||
kfree(apqns);
|
||||
return -ENOMEM;
|
||||
@ -2102,7 +2151,7 @@ static struct attribute_group ccacipher_attr_group = {
|
||||
* (i.e. off != 0 or count < key blob size) -EINVAL is returned.
|
||||
* This function and the sysfs attributes using it provide EP11 key blobs
|
||||
* padded to the upper limit of MAXEP11AESKEYBLOBSIZE which is currently
|
||||
* 320 bytes.
|
||||
* 336 bytes.
|
||||
*/
|
||||
static ssize_t pkey_ep11_aes_attr_read(enum pkey_key_size keybits,
|
||||
bool is_xts, char *buf, loff_t off,
|
||||
@ -2120,7 +2169,9 @@ static ssize_t pkey_ep11_aes_attr_read(enum pkey_key_size keybits,
|
||||
|
||||
/* build a list of apqns able to generate an cipher key */
|
||||
rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
|
||||
ZCRYPT_CEX7, EP11_API_V, NULL);
|
||||
ZCRYPT_CEX7,
|
||||
ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4,
|
||||
NULL);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
@ -2130,7 +2181,8 @@ static ssize_t pkey_ep11_aes_attr_read(enum pkey_key_size keybits,
|
||||
for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
|
||||
card = apqns[i] >> 16;
|
||||
dom = apqns[i] & 0xFFFF;
|
||||
rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize);
|
||||
rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize,
|
||||
PKEY_TYPE_EP11_AES);
|
||||
if (rc == 0)
|
||||
break;
|
||||
}
|
||||
@ -2140,7 +2192,8 @@ static ssize_t pkey_ep11_aes_attr_read(enum pkey_key_size keybits,
|
||||
if (is_xts) {
|
||||
keysize = MAXEP11AESKEYBLOBSIZE;
|
||||
buf += MAXEP11AESKEYBLOBSIZE;
|
||||
rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize);
|
||||
rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize,
|
||||
PKEY_TYPE_EP11_AES);
|
||||
if (rc == 0)
|
||||
return 2 * MAXEP11AESKEYBLOBSIZE;
|
||||
}
|
||||
|
@ -30,13 +30,12 @@
|
||||
#define AP_QUEUE_UNASSIGNED "unassigned"
|
||||
#define AP_QUEUE_IN_USE "in use"
|
||||
|
||||
#define MAX_RESET_CHECK_WAIT 200 /* Sleep max 200ms for reset check */
|
||||
#define AP_RESET_INTERVAL 20 /* Reset sleep interval (20ms) */
|
||||
|
||||
static int vfio_ap_mdev_reset_queues(struct ap_queue_table *qtable);
|
||||
static struct vfio_ap_queue *vfio_ap_find_queue(int apqn);
|
||||
static const struct vfio_device_ops vfio_ap_matrix_dev_ops;
|
||||
static int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q);
|
||||
static void vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q);
|
||||
|
||||
/**
|
||||
* get_update_locks_for_kvm: Acquire the locks required to dynamically update a
|
||||
@ -360,6 +359,28 @@ static int vfio_ap_validate_nib(struct kvm_vcpu *vcpu, dma_addr_t *nib)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ensure_nib_shared(unsigned long addr, struct gmap *gmap)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* The nib has to be located in shared storage since guest and
|
||||
* host access it. vfio_pin_pages() will do a pin shared and
|
||||
* if that fails (possibly because it's not a shared page) it
|
||||
* calls export. We try to do a second pin shared here so that
|
||||
* the UV gives us an error code if we try to pin a non-shared
|
||||
* page.
|
||||
*
|
||||
* If the page is already pinned shared the UV will return a success.
|
||||
*/
|
||||
ret = uv_pin_shared(addr);
|
||||
if (ret) {
|
||||
/* vfio_pin_pages() likely exported the page so let's re-import */
|
||||
gmap_convert_to_secure(gmap, addr);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* vfio_ap_irq_enable - Enable Interruption for a APQN
|
||||
*
|
||||
@ -423,6 +444,14 @@ static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
|
||||
h_nib = page_to_phys(h_page) | (nib & ~PAGE_MASK);
|
||||
aqic_gisa.gisc = isc;
|
||||
|
||||
/* NIB in non-shared storage is a rc 6 for PV guests */
|
||||
if (kvm_s390_pv_cpu_is_protected(vcpu) &&
|
||||
ensure_nib_shared(h_nib & PAGE_MASK, kvm->arch.gmap)) {
|
||||
vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1);
|
||||
status.response_code = AP_RESPONSE_INVALID_ADDRESS;
|
||||
return status;
|
||||
}
|
||||
|
||||
nisc = kvm_s390_gisc_register(kvm, isc);
|
||||
if (nisc < 0) {
|
||||
VFIO_AP_DBF_WARN("%s: gisc registration failed: nisc=%d, isc=%d, apqn=%#04x\n",
|
||||
@ -675,7 +704,7 @@ static bool vfio_ap_mdev_filter_matrix(unsigned long *apm, unsigned long *aqm,
|
||||
*/
|
||||
apqn = AP_MKQID(apid, apqi);
|
||||
q = vfio_ap_mdev_get_queue(matrix_mdev, apqn);
|
||||
if (!q || q->reset_rc) {
|
||||
if (!q || q->reset_status.response_code) {
|
||||
clear_bit_inv(apid,
|
||||
matrix_mdev->shadow_apcb.apm);
|
||||
break;
|
||||
@ -1608,19 +1637,21 @@ static int apq_status_check(int apqn, struct ap_queue_status *status)
|
||||
{
|
||||
switch (status->response_code) {
|
||||
case AP_RESPONSE_NORMAL:
|
||||
case AP_RESPONSE_RESET_IN_PROGRESS:
|
||||
if (status->queue_empty && !status->irq_enabled)
|
||||
return 0;
|
||||
return -EBUSY;
|
||||
case AP_RESPONSE_DECONFIGURED:
|
||||
/*
|
||||
* If the AP queue is deconfigured, any subsequent AP command
|
||||
* targeting the queue will fail with the same response code. On the
|
||||
* other hand, when an AP adapter is deconfigured, the associated
|
||||
* queues are reset, so let's return a value indicating the reset
|
||||
* for which we're waiting completed successfully.
|
||||
*/
|
||||
return 0;
|
||||
case AP_RESPONSE_RESET_IN_PROGRESS:
|
||||
case AP_RESPONSE_BUSY:
|
||||
return -EBUSY;
|
||||
case AP_RESPONSE_ASSOC_SECRET_NOT_UNIQUE:
|
||||
case AP_RESPONSE_ASSOC_FAILED:
|
||||
/*
|
||||
* These asynchronous response codes indicate a PQAP(AAPQ)
|
||||
* instruction to associate a secret with the guest failed. All
|
||||
* subsequent AP instructions will end with the asynchronous
|
||||
* response code until the AP queue is reset; so, let's return
|
||||
* a value indicating a reset needs to be performed again.
|
||||
*/
|
||||
return -EAGAIN;
|
||||
default:
|
||||
WARN(true,
|
||||
"failed to verify reset of queue %02x.%04x: TAPQ rc=%u\n",
|
||||
@ -1630,91 +1661,105 @@ static int apq_status_check(int apqn, struct ap_queue_status *status)
|
||||
}
|
||||
}
|
||||
|
||||
static int apq_reset_check(struct vfio_ap_queue *q)
|
||||
{
|
||||
int ret;
|
||||
int iters = MAX_RESET_CHECK_WAIT / AP_RESET_INTERVAL;
|
||||
struct ap_queue_status status;
|
||||
#define WAIT_MSG "Waited %dms for reset of queue %02x.%04x (%u, %u, %u)"
|
||||
|
||||
for (; iters > 0; iters--) {
|
||||
static void apq_reset_check(struct work_struct *reset_work)
|
||||
{
|
||||
int ret = -EBUSY, elapsed = 0;
|
||||
struct ap_queue_status status;
|
||||
struct vfio_ap_queue *q;
|
||||
|
||||
q = container_of(reset_work, struct vfio_ap_queue, reset_work);
|
||||
memcpy(&status, &q->reset_status, sizeof(status));
|
||||
while (true) {
|
||||
msleep(AP_RESET_INTERVAL);
|
||||
elapsed += AP_RESET_INTERVAL;
|
||||
status = ap_tapq(q->apqn, NULL);
|
||||
ret = apq_status_check(q->apqn, &status);
|
||||
if (ret != -EBUSY)
|
||||
return ret;
|
||||
if (ret == -EIO)
|
||||
return;
|
||||
if (ret == -EBUSY) {
|
||||
pr_notice_ratelimited(WAIT_MSG, elapsed,
|
||||
AP_QID_CARD(q->apqn),
|
||||
AP_QID_QUEUE(q->apqn),
|
||||
status.response_code,
|
||||
status.queue_empty,
|
||||
status.irq_enabled);
|
||||
} else {
|
||||
if (q->reset_status.response_code == AP_RESPONSE_RESET_IN_PROGRESS ||
|
||||
q->reset_status.response_code == AP_RESPONSE_BUSY ||
|
||||
q->reset_status.response_code == AP_RESPONSE_STATE_CHANGE_IN_PROGRESS ||
|
||||
ret == -EAGAIN) {
|
||||
status = ap_zapq(q->apqn, 0);
|
||||
memcpy(&q->reset_status, &status, sizeof(status));
|
||||
continue;
|
||||
}
|
||||
/*
|
||||
* When an AP adapter is deconfigured, the
|
||||
* associated queues are reset, so let's set the
|
||||
* status response code to 0 so the queue may be
|
||||
* passed through (i.e., not filtered)
|
||||
*/
|
||||
if (status.response_code == AP_RESPONSE_DECONFIGURED)
|
||||
q->reset_status.response_code = 0;
|
||||
if (q->saved_isc != VFIO_AP_ISC_INVALID)
|
||||
vfio_ap_free_aqic_resources(q);
|
||||
break;
|
||||
}
|
||||
}
|
||||
WARN_ONCE(iters <= 0,
|
||||
"timeout verifying reset of queue %02x.%04x (%u, %u, %u)",
|
||||
AP_QID_CARD(q->apqn), AP_QID_QUEUE(q->apqn),
|
||||
status.queue_empty, status.irq_enabled, status.response_code);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q)
|
||||
static void vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q)
|
||||
{
|
||||
struct ap_queue_status status;
|
||||
int ret;
|
||||
|
||||
if (!q)
|
||||
return 0;
|
||||
retry_zapq:
|
||||
return;
|
||||
status = ap_zapq(q->apqn, 0);
|
||||
q->reset_rc = status.response_code;
|
||||
memcpy(&q->reset_status, &status, sizeof(status));
|
||||
switch (status.response_code) {
|
||||
case AP_RESPONSE_NORMAL:
|
||||
ret = 0;
|
||||
/* if the reset has not completed, wait for it to take effect */
|
||||
if (!status.queue_empty || status.irq_enabled)
|
||||
ret = apq_reset_check(q);
|
||||
break;
|
||||
case AP_RESPONSE_RESET_IN_PROGRESS:
|
||||
case AP_RESPONSE_BUSY:
|
||||
case AP_RESPONSE_STATE_CHANGE_IN_PROGRESS:
|
||||
/*
|
||||
* There is a reset issued by another process in progress. Let's wait
|
||||
* for that to complete. Since we have no idea whether it was a RAPQ or
|
||||
* ZAPQ, then if it completes successfully, let's issue the ZAPQ.
|
||||
* Let's verify whether the ZAPQ completed successfully on a work queue.
|
||||
*/
|
||||
ret = apq_reset_check(q);
|
||||
if (ret)
|
||||
break;
|
||||
goto retry_zapq;
|
||||
queue_work(system_long_wq, &q->reset_work);
|
||||
break;
|
||||
case AP_RESPONSE_DECONFIGURED:
|
||||
/*
|
||||
* When an AP adapter is deconfigured, the associated
|
||||
* queues are reset, so let's return a value indicating the reset
|
||||
* completed successfully.
|
||||
* queues are reset, so let's set the status response code to 0
|
||||
* so the queue may be passed through (i.e., not filtered).
|
||||
*/
|
||||
ret = 0;
|
||||
q->reset_status.response_code = 0;
|
||||
vfio_ap_free_aqic_resources(q);
|
||||
break;
|
||||
default:
|
||||
WARN(true,
|
||||
"PQAP/ZAPQ for %02x.%04x failed with invalid rc=%u\n",
|
||||
AP_QID_CARD(q->apqn), AP_QID_QUEUE(q->apqn),
|
||||
status.response_code);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
vfio_ap_free_aqic_resources(q);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vfio_ap_mdev_reset_queues(struct ap_queue_table *qtable)
|
||||
{
|
||||
int ret, loop_cursor, rc = 0;
|
||||
int ret = 0, loop_cursor;
|
||||
struct vfio_ap_queue *q;
|
||||
|
||||
hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode)
|
||||
vfio_ap_mdev_reset_queue(q);
|
||||
|
||||
hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) {
|
||||
ret = vfio_ap_mdev_reset_queue(q);
|
||||
/*
|
||||
* Regardless whether a queue turns out to be busy, or
|
||||
* is not operational, we need to continue resetting
|
||||
* the remaining queues.
|
||||
*/
|
||||
if (ret)
|
||||
rc = ret;
|
||||
flush_work(&q->reset_work);
|
||||
|
||||
if (q->reset_status.response_code)
|
||||
ret = -EIO;
|
||||
}
|
||||
|
||||
return rc;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vfio_ap_mdev_open_device(struct vfio_device *vdev)
|
||||
@ -2038,6 +2083,8 @@ int vfio_ap_mdev_probe_queue(struct ap_device *apdev)
|
||||
|
||||
q->apqn = to_ap_queue(&apdev->device)->qid;
|
||||
q->saved_isc = VFIO_AP_ISC_INVALID;
|
||||
memset(&q->reset_status, 0, sizeof(q->reset_status));
|
||||
INIT_WORK(&q->reset_work, apq_reset_check);
|
||||
matrix_mdev = get_update_locks_by_apqn(q->apqn);
|
||||
|
||||
if (matrix_mdev) {
|
||||
@ -2087,6 +2134,7 @@ void vfio_ap_mdev_remove_queue(struct ap_device *apdev)
|
||||
}
|
||||
|
||||
vfio_ap_mdev_reset_queue(q);
|
||||
flush_work(&q->reset_work);
|
||||
dev_set_drvdata(&apdev->device, NULL);
|
||||
kfree(q);
|
||||
release_update_locks_for_mdev(matrix_mdev);
|
||||
|
@ -133,7 +133,8 @@ struct ap_matrix_mdev {
|
||||
* @apqn: the APQN of the AP queue device
|
||||
* @saved_isc: the guest ISC registered with the GIB interface
|
||||
* @mdev_qnode: allows the vfio_ap_queue struct to be added to a hashtable
|
||||
* @reset_rc: the status response code from the last reset of the queue
|
||||
* @reset_status: the status from the last reset of the queue
|
||||
* @reset_work: work to wait for queue reset to complete
|
||||
*/
|
||||
struct vfio_ap_queue {
|
||||
struct ap_matrix_mdev *matrix_mdev;
|
||||
@ -142,7 +143,8 @@ struct vfio_ap_queue {
|
||||
#define VFIO_AP_ISC_INVALID 0xff
|
||||
unsigned char saved_isc;
|
||||
struct hlist_node mdev_qnode;
|
||||
unsigned int reset_rc;
|
||||
struct ap_queue_status reset_status;
|
||||
struct work_struct reset_work;
|
||||
};
|
||||
|
||||
int vfio_ap_mdev_register(void);
|
||||
|
@ -1,227 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/*
|
||||
* Copyright IBM Corp. 2001, 2012
|
||||
* Author(s): Robert Burroughs
|
||||
* Eric Rossman (edrossma@us.ibm.com)
|
||||
*
|
||||
* Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
|
||||
* Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
|
||||
* Ralph Wuerthner <rwuerthn@de.ibm.com>
|
||||
* MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
|
||||
#include "ap_bus.h"
|
||||
#include "zcrypt_api.h"
|
||||
#include "zcrypt_error.h"
|
||||
#include "zcrypt_cex2a.h"
|
||||
#include "zcrypt_msgtype50.h"
|
||||
|
||||
#define CEX2A_MIN_MOD_SIZE 1 /* 8 bits */
|
||||
#define CEX2A_MAX_MOD_SIZE 256 /* 2048 bits */
|
||||
#define CEX3A_MIN_MOD_SIZE CEX2A_MIN_MOD_SIZE
|
||||
#define CEX3A_MAX_MOD_SIZE 512 /* 4096 bits */
|
||||
|
||||
#define CEX2A_MAX_MESSAGE_SIZE 0x390 /* sizeof(struct type50_crb2_msg) */
|
||||
#define CEX2A_MAX_RESPONSE_SIZE 0x110 /* max outputdatalength + type80_hdr */
|
||||
|
||||
#define CEX3A_MAX_RESPONSE_SIZE 0x210 /* 512 bit modulus
|
||||
* (max outputdatalength) +
|
||||
* type80_hdr
|
||||
*/
|
||||
#define CEX3A_MAX_MESSAGE_SIZE sizeof(struct type50_crb3_msg)
|
||||
|
||||
#define CEX2A_CLEANUP_TIME (15 * HZ)
|
||||
#define CEX3A_CLEANUP_TIME CEX2A_CLEANUP_TIME
|
||||
|
||||
MODULE_AUTHOR("IBM Corporation");
|
||||
MODULE_DESCRIPTION("CEX2A/CEX3A Cryptographic Coprocessor device driver, " \
|
||||
"Copyright IBM Corp. 2001, 2018");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
static struct ap_device_id zcrypt_cex2a_card_ids[] = {
|
||||
{ .dev_type = AP_DEVICE_TYPE_CEX2A,
|
||||
.match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
|
||||
{ .dev_type = AP_DEVICE_TYPE_CEX3A,
|
||||
.match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
|
||||
{ /* end of list */ },
|
||||
};
|
||||
|
||||
MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_card_ids);
|
||||
|
||||
static struct ap_device_id zcrypt_cex2a_queue_ids[] = {
|
||||
{ .dev_type = AP_DEVICE_TYPE_CEX2A,
|
||||
.match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
|
||||
{ .dev_type = AP_DEVICE_TYPE_CEX3A,
|
||||
.match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
|
||||
{ /* end of list */ },
|
||||
};
|
||||
|
||||
MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_queue_ids);
|
||||
|
||||
/*
|
||||
* Probe function for CEX2A card devices. It always accepts the AP device
|
||||
* since the bus_match already checked the card type.
|
||||
* @ap_dev: pointer to the AP device.
|
||||
*/
|
||||
static int zcrypt_cex2a_card_probe(struct ap_device *ap_dev)
|
||||
{
|
||||
/*
|
||||
* Normalized speed ratings per crypto adapter
|
||||
* MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY
|
||||
*/
|
||||
static const int CEX2A_SPEED_IDX[] = {
|
||||
800, 1000, 2000, 900, 1200, 2400, 0, 0};
|
||||
static const int CEX3A_SPEED_IDX[] = {
|
||||
400, 500, 1000, 450, 550, 1200, 0, 0};
|
||||
|
||||
struct ap_card *ac = to_ap_card(&ap_dev->device);
|
||||
struct zcrypt_card *zc;
|
||||
int rc = 0;
|
||||
|
||||
zc = zcrypt_card_alloc();
|
||||
if (!zc)
|
||||
return -ENOMEM;
|
||||
zc->card = ac;
|
||||
dev_set_drvdata(&ap_dev->device, zc);
|
||||
|
||||
if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX2A) {
|
||||
zc->min_mod_size = CEX2A_MIN_MOD_SIZE;
|
||||
zc->max_mod_size = CEX2A_MAX_MOD_SIZE;
|
||||
zc->speed_rating = CEX2A_SPEED_IDX;
|
||||
zc->max_exp_bit_length = CEX2A_MAX_MOD_SIZE;
|
||||
zc->type_string = "CEX2A";
|
||||
zc->user_space_type = ZCRYPT_CEX2A;
|
||||
} else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX3A) {
|
||||
zc->min_mod_size = CEX2A_MIN_MOD_SIZE;
|
||||
zc->max_mod_size = CEX2A_MAX_MOD_SIZE;
|
||||
zc->max_exp_bit_length = CEX2A_MAX_MOD_SIZE;
|
||||
if (ap_test_bit(&ac->functions, AP_FUNC_MEX4K) &&
|
||||
ap_test_bit(&ac->functions, AP_FUNC_CRT4K)) {
|
||||
zc->max_mod_size = CEX3A_MAX_MOD_SIZE;
|
||||
zc->max_exp_bit_length = CEX3A_MAX_MOD_SIZE;
|
||||
}
|
||||
zc->speed_rating = CEX3A_SPEED_IDX;
|
||||
zc->type_string = "CEX3A";
|
||||
zc->user_space_type = ZCRYPT_CEX3A;
|
||||
} else {
|
||||
zcrypt_card_free(zc);
|
||||
return -ENODEV;
|
||||
}
|
||||
zc->online = 1;
|
||||
|
||||
rc = zcrypt_card_register(zc);
|
||||
if (rc)
|
||||
zcrypt_card_free(zc);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is called to remove the CEX2A card driver information
|
||||
* if an AP card device is removed.
|
||||
*/
|
||||
static void zcrypt_cex2a_card_remove(struct ap_device *ap_dev)
|
||||
{
|
||||
struct zcrypt_card *zc = dev_get_drvdata(&ap_dev->device);
|
||||
|
||||
zcrypt_card_unregister(zc);
|
||||
}
|
||||
|
||||
static struct ap_driver zcrypt_cex2a_card_driver = {
|
||||
.probe = zcrypt_cex2a_card_probe,
|
||||
.remove = zcrypt_cex2a_card_remove,
|
||||
.ids = zcrypt_cex2a_card_ids,
|
||||
.flags = AP_DRIVER_FLAG_DEFAULT,
|
||||
};
|
||||
|
||||
/*
|
||||
* Probe function for CEX2A queue devices. It always accepts the AP device
|
||||
* since the bus_match already checked the queue type.
|
||||
* @ap_dev: pointer to the AP device.
|
||||
*/
|
||||
static int zcrypt_cex2a_queue_probe(struct ap_device *ap_dev)
|
||||
{
|
||||
struct ap_queue *aq = to_ap_queue(&ap_dev->device);
|
||||
struct zcrypt_queue *zq = NULL;
|
||||
int rc;
|
||||
|
||||
switch (ap_dev->device_type) {
|
||||
case AP_DEVICE_TYPE_CEX2A:
|
||||
zq = zcrypt_queue_alloc(CEX2A_MAX_RESPONSE_SIZE);
|
||||
if (!zq)
|
||||
return -ENOMEM;
|
||||
break;
|
||||
case AP_DEVICE_TYPE_CEX3A:
|
||||
zq = zcrypt_queue_alloc(CEX3A_MAX_RESPONSE_SIZE);
|
||||
if (!zq)
|
||||
return -ENOMEM;
|
||||
break;
|
||||
}
|
||||
if (!zq)
|
||||
return -ENODEV;
|
||||
zq->ops = zcrypt_msgtype(MSGTYPE50_NAME, MSGTYPE50_VARIANT_DEFAULT);
|
||||
zq->queue = aq;
|
||||
zq->online = 1;
|
||||
atomic_set(&zq->load, 0);
|
||||
ap_queue_init_state(aq);
|
||||
ap_queue_init_reply(aq, &zq->reply);
|
||||
aq->request_timeout = CEX2A_CLEANUP_TIME;
|
||||
dev_set_drvdata(&ap_dev->device, zq);
|
||||
rc = zcrypt_queue_register(zq);
|
||||
if (rc)
|
||||
zcrypt_queue_free(zq);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is called to remove the CEX2A queue driver information
|
||||
* if an AP queue device is removed.
|
||||
*/
|
||||
static void zcrypt_cex2a_queue_remove(struct ap_device *ap_dev)
|
||||
{
|
||||
struct zcrypt_queue *zq = dev_get_drvdata(&ap_dev->device);
|
||||
|
||||
zcrypt_queue_unregister(zq);
|
||||
}
|
||||
|
||||
static struct ap_driver zcrypt_cex2a_queue_driver = {
|
||||
.probe = zcrypt_cex2a_queue_probe,
|
||||
.remove = zcrypt_cex2a_queue_remove,
|
||||
.ids = zcrypt_cex2a_queue_ids,
|
||||
.flags = AP_DRIVER_FLAG_DEFAULT,
|
||||
};
|
||||
|
||||
int __init zcrypt_cex2a_init(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = ap_driver_register(&zcrypt_cex2a_card_driver,
|
||||
THIS_MODULE, "cex2acard");
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = ap_driver_register(&zcrypt_cex2a_queue_driver,
|
||||
THIS_MODULE, "cex2aqueue");
|
||||
if (rc)
|
||||
ap_driver_unregister(&zcrypt_cex2a_card_driver);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
void __exit zcrypt_cex2a_exit(void)
|
||||
{
|
||||
ap_driver_unregister(&zcrypt_cex2a_queue_driver);
|
||||
ap_driver_unregister(&zcrypt_cex2a_card_driver);
|
||||
}
|
||||
|
||||
module_init(zcrypt_cex2a_init);
|
||||
module_exit(zcrypt_cex2a_exit);
|
@ -1,134 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0+ */
|
||||
/*
|
||||
* Copyright IBM Corp. 2001, 2006
|
||||
* Author(s): Robert Burroughs
|
||||
* Eric Rossman (edrossma@us.ibm.com)
|
||||
*
|
||||
* Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
|
||||
* Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
|
||||
*/
|
||||
|
||||
#ifndef _ZCRYPT_CEX2A_H_
|
||||
#define _ZCRYPT_CEX2A_H_
|
||||
|
||||
/**
|
||||
* The type 50 message family is associated with CEXxA cards.
|
||||
*
|
||||
* The four members of the family are described below.
|
||||
*
|
||||
* Note that all unsigned char arrays are right-justified and left-padded
|
||||
* with zeroes.
|
||||
*
|
||||
* Note that all reserved fields must be zeroes.
|
||||
*/
|
||||
struct type50_hdr {
|
||||
unsigned char reserved1;
|
||||
unsigned char msg_type_code; /* 0x50 */
|
||||
unsigned short msg_len;
|
||||
unsigned char reserved2;
|
||||
unsigned char ignored;
|
||||
unsigned short reserved3;
|
||||
} __packed;
|
||||
|
||||
#define TYPE50_TYPE_CODE 0x50
|
||||
|
||||
#define TYPE50_MEB1_FMT 0x0001
|
||||
#define TYPE50_MEB2_FMT 0x0002
|
||||
#define TYPE50_MEB3_FMT 0x0003
|
||||
#define TYPE50_CRB1_FMT 0x0011
|
||||
#define TYPE50_CRB2_FMT 0x0012
|
||||
#define TYPE50_CRB3_FMT 0x0013
|
||||
|
||||
/* Mod-Exp, with a small modulus */
|
||||
struct type50_meb1_msg {
|
||||
struct type50_hdr header;
|
||||
unsigned short keyblock_type; /* 0x0001 */
|
||||
unsigned char reserved[6];
|
||||
unsigned char exponent[128];
|
||||
unsigned char modulus[128];
|
||||
unsigned char message[128];
|
||||
} __packed;
|
||||
|
||||
/* Mod-Exp, with a large modulus */
|
||||
struct type50_meb2_msg {
|
||||
struct type50_hdr header;
|
||||
unsigned short keyblock_type; /* 0x0002 */
|
||||
unsigned char reserved[6];
|
||||
unsigned char exponent[256];
|
||||
unsigned char modulus[256];
|
||||
unsigned char message[256];
|
||||
} __packed;
|
||||
|
||||
/* Mod-Exp, with a larger modulus */
|
||||
struct type50_meb3_msg {
|
||||
struct type50_hdr header;
|
||||
unsigned short keyblock_type; /* 0x0003 */
|
||||
unsigned char reserved[6];
|
||||
unsigned char exponent[512];
|
||||
unsigned char modulus[512];
|
||||
unsigned char message[512];
|
||||
} __packed;
|
||||
|
||||
/* CRT, with a small modulus */
|
||||
struct type50_crb1_msg {
|
||||
struct type50_hdr header;
|
||||
unsigned short keyblock_type; /* 0x0011 */
|
||||
unsigned char reserved[6];
|
||||
unsigned char p[64];
|
||||
unsigned char q[64];
|
||||
unsigned char dp[64];
|
||||
unsigned char dq[64];
|
||||
unsigned char u[64];
|
||||
unsigned char message[128];
|
||||
} __packed;
|
||||
|
||||
/* CRT, with a large modulus */
|
||||
struct type50_crb2_msg {
|
||||
struct type50_hdr header;
|
||||
unsigned short keyblock_type; /* 0x0012 */
|
||||
unsigned char reserved[6];
|
||||
unsigned char p[128];
|
||||
unsigned char q[128];
|
||||
unsigned char dp[128];
|
||||
unsigned char dq[128];
|
||||
unsigned char u[128];
|
||||
unsigned char message[256];
|
||||
} __packed;
|
||||
|
||||
/* CRT, with a larger modulus */
|
||||
struct type50_crb3_msg {
|
||||
struct type50_hdr header;
|
||||
unsigned short keyblock_type; /* 0x0013 */
|
||||
unsigned char reserved[6];
|
||||
unsigned char p[256];
|
||||
unsigned char q[256];
|
||||
unsigned char dp[256];
|
||||
unsigned char dq[256];
|
||||
unsigned char u[256];
|
||||
unsigned char message[512];
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* The type 80 response family is associated with a CEXxA cards.
|
||||
*
|
||||
* Note that all unsigned char arrays are right-justified and left-padded
|
||||
* with zeroes.
|
||||
*
|
||||
* Note that all reserved fields must be zeroes.
|
||||
*/
|
||||
|
||||
#define TYPE80_RSP_CODE 0x80
|
||||
|
||||
struct type80_hdr {
|
||||
unsigned char reserved1;
|
||||
unsigned char type; /* 0x80 */
|
||||
unsigned short len;
|
||||
unsigned char code; /* 0x00 */
|
||||
unsigned char reserved2[3];
|
||||
unsigned char reserved3[8];
|
||||
} __packed;
|
||||
|
||||
int zcrypt_cex2a_init(void);
|
||||
void zcrypt_cex2a_exit(void);
|
||||
|
||||
#endif /* _ZCRYPT_CEX2A_H_ */
|
@ -1,421 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/*
|
||||
* Copyright IBM Corp. 2001, 2018
|
||||
* Author(s): Robert Burroughs
|
||||
* Eric Rossman (edrossma@us.ibm.com)
|
||||
*
|
||||
* Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
|
||||
* Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
|
||||
* Ralph Wuerthner <rwuerthn@de.ibm.com>
|
||||
* MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
|
||||
#include "ap_bus.h"
|
||||
#include "zcrypt_api.h"
|
||||
#include "zcrypt_error.h"
|
||||
#include "zcrypt_msgtype6.h"
|
||||
#include "zcrypt_cex2c.h"
|
||||
#include "zcrypt_cca_key.h"
|
||||
#include "zcrypt_ccamisc.h"
|
||||
|
||||
#define CEX2C_MIN_MOD_SIZE 16 /* 128 bits */
|
||||
#define CEX2C_MAX_MOD_SIZE 256 /* 2048 bits */
|
||||
#define CEX3C_MIN_MOD_SIZE 16 /* 128 bits */
|
||||
#define CEX3C_MAX_MOD_SIZE 512 /* 4096 bits */
|
||||
#define CEX2C_MAX_XCRB_MESSAGE_SIZE (12 * 1024)
|
||||
#define CEX2C_CLEANUP_TIME (15 * HZ)
|
||||
|
||||
MODULE_AUTHOR("IBM Corporation");
|
||||
MODULE_DESCRIPTION("CEX2C/CEX3C Cryptographic Coprocessor device driver, " \
|
||||
"Copyright IBM Corp. 2001, 2018");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
static struct ap_device_id zcrypt_cex2c_card_ids[] = {
|
||||
{ .dev_type = AP_DEVICE_TYPE_CEX2C,
|
||||
.match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
|
||||
{ .dev_type = AP_DEVICE_TYPE_CEX3C,
|
||||
.match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
|
||||
{ /* end of list */ },
|
||||
};
|
||||
|
||||
MODULE_DEVICE_TABLE(ap, zcrypt_cex2c_card_ids);
|
||||
|
||||
static struct ap_device_id zcrypt_cex2c_queue_ids[] = {
|
||||
{ .dev_type = AP_DEVICE_TYPE_CEX2C,
|
||||
.match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
|
||||
{ .dev_type = AP_DEVICE_TYPE_CEX3C,
|
||||
.match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
|
||||
{ /* end of list */ },
|
||||
};
|
||||
|
||||
MODULE_DEVICE_TABLE(ap, zcrypt_cex2c_queue_ids);
|
||||
|
||||
/*
|
||||
* CCA card additional device attributes
|
||||
*/
|
||||
static ssize_t cca_serialnr_show(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct zcrypt_card *zc = dev_get_drvdata(dev);
|
||||
struct cca_info ci;
|
||||
struct ap_card *ac = to_ap_card(dev);
|
||||
|
||||
memset(&ci, 0, sizeof(ci));
|
||||
|
||||
if (ap_domain_index >= 0)
|
||||
cca_get_info(ac->id, ap_domain_index, &ci, zc->online);
|
||||
|
||||
return sysfs_emit(buf, "%s\n", ci.serial);
|
||||
}
|
||||
|
||||
static struct device_attribute dev_attr_cca_serialnr =
|
||||
__ATTR(serialnr, 0444, cca_serialnr_show, NULL);
|
||||
|
||||
static struct attribute *cca_card_attrs[] = {
|
||||
&dev_attr_cca_serialnr.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static const struct attribute_group cca_card_attr_grp = {
|
||||
.attrs = cca_card_attrs,
|
||||
};
|
||||
|
||||
/*
|
||||
* CCA queue additional device attributes
|
||||
*/
|
||||
static ssize_t cca_mkvps_show(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct zcrypt_queue *zq = dev_get_drvdata(dev);
|
||||
int n = 0;
|
||||
struct cca_info ci;
|
||||
static const char * const cao_state[] = { "invalid", "valid" };
|
||||
static const char * const new_state[] = { "empty", "partial", "full" };
|
||||
|
||||
memset(&ci, 0, sizeof(ci));
|
||||
|
||||
cca_get_info(AP_QID_CARD(zq->queue->qid),
|
||||
AP_QID_QUEUE(zq->queue->qid),
|
||||
&ci, zq->online);
|
||||
|
||||
if (ci.new_aes_mk_state >= '1' && ci.new_aes_mk_state <= '3')
|
||||
n = sysfs_emit(buf, "AES NEW: %s 0x%016llx\n",
|
||||
new_state[ci.new_aes_mk_state - '1'],
|
||||
ci.new_aes_mkvp);
|
||||
else
|
||||
n = sysfs_emit(buf, "AES NEW: - -\n");
|
||||
|
||||
if (ci.cur_aes_mk_state >= '1' && ci.cur_aes_mk_state <= '2')
|
||||
n += sysfs_emit_at(buf, n, "AES CUR: %s 0x%016llx\n",
|
||||
cao_state[ci.cur_aes_mk_state - '1'],
|
||||
ci.cur_aes_mkvp);
|
||||
else
|
||||
n += sysfs_emit_at(buf, n, "AES CUR: - -\n");
|
||||
|
||||
if (ci.old_aes_mk_state >= '1' && ci.old_aes_mk_state <= '2')
|
||||
n += sysfs_emit_at(buf, n, "AES OLD: %s 0x%016llx\n",
|
||||
cao_state[ci.old_aes_mk_state - '1'],
|
||||
ci.old_aes_mkvp);
|
||||
else
|
||||
n += sysfs_emit_at(buf, n, "AES OLD: - -\n");
|
||||
|
||||
if (ci.new_apka_mk_state >= '1' && ci.new_apka_mk_state <= '3')
|
||||
n += sysfs_emit_at(buf, n, "APKA NEW: %s 0x%016llx\n",
|
||||
new_state[ci.new_apka_mk_state - '1'],
|
||||
ci.new_apka_mkvp);
|
||||
else
|
||||
n += sysfs_emit_at(buf, n, "APKA NEW: - -\n");
|
||||
|
||||
if (ci.cur_apka_mk_state >= '1' && ci.cur_apka_mk_state <= '2')
|
||||
n += sysfs_emit_at(buf, n, "APKA CUR: %s 0x%016llx\n",
|
||||
cao_state[ci.cur_apka_mk_state - '1'],
|
||||
ci.cur_apka_mkvp);
|
||||
else
|
||||
n += sysfs_emit_at(buf, n, "APKA CUR: - -\n");
|
||||
|
||||
if (ci.old_apka_mk_state >= '1' && ci.old_apka_mk_state <= '2')
|
||||
n += sysfs_emit_at(buf, n, "APKA OLD: %s 0x%016llx\n",
|
||||
cao_state[ci.old_apka_mk_state - '1'],
|
||||
ci.old_apka_mkvp);
|
||||
else
|
||||
n += sysfs_emit_at(buf, n, "APKA OLD: - -\n");
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
static struct device_attribute dev_attr_cca_mkvps =
|
||||
__ATTR(mkvps, 0444, cca_mkvps_show, NULL);
|
||||
|
||||
static struct attribute *cca_queue_attrs[] = {
|
||||
&dev_attr_cca_mkvps.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static const struct attribute_group cca_queue_attr_grp = {
|
||||
.attrs = cca_queue_attrs,
|
||||
};
|
||||
|
||||
/*
|
||||
* Large random number detection function. Its sends a message to a CEX2C/CEX3C
|
||||
* card to find out if large random numbers are supported.
|
||||
* @ap_dev: pointer to the AP device.
|
||||
*
|
||||
* Returns 1 if large random numbers are supported, 0 if not and < 0 on error.
|
||||
*/
|
||||
static int zcrypt_cex2c_rng_supported(struct ap_queue *aq)
|
||||
{
|
||||
struct ap_message ap_msg;
|
||||
unsigned long psmid;
|
||||
unsigned int domain;
|
||||
struct {
|
||||
struct type86_hdr hdr;
|
||||
struct type86_fmt2_ext fmt2;
|
||||
struct CPRBX cprbx;
|
||||
} __packed *reply;
|
||||
struct {
|
||||
struct type6_hdr hdr;
|
||||
struct CPRBX cprbx;
|
||||
char function_code[2];
|
||||
short int rule_length;
|
||||
char rule[8];
|
||||
short int verb_length;
|
||||
short int key_length;
|
||||
} __packed *msg;
|
||||
int rc, i;
|
||||
|
||||
ap_init_message(&ap_msg);
|
||||
ap_msg.msg = (void *)get_zeroed_page(GFP_KERNEL);
|
||||
if (!ap_msg.msg)
|
||||
return -ENOMEM;
|
||||
ap_msg.bufsize = PAGE_SIZE;
|
||||
|
||||
rng_type6cprb_msgx(&ap_msg, 4, &domain);
|
||||
|
||||
msg = ap_msg.msg;
|
||||
msg->cprbx.domain = AP_QID_QUEUE(aq->qid);
|
||||
|
||||
rc = ap_send(aq->qid, 0x0102030405060708UL, ap_msg.msg, ap_msg.len);
|
||||
if (rc)
|
||||
goto out_free;
|
||||
|
||||
/* Wait for the test message to complete. */
|
||||
for (i = 0; i < 2 * HZ; i++) {
|
||||
msleep(1000 / HZ);
|
||||
rc = ap_recv(aq->qid, &psmid, ap_msg.msg, ap_msg.bufsize);
|
||||
if (rc == 0 && psmid == 0x0102030405060708UL)
|
||||
break;
|
||||
}
|
||||
|
||||
if (i >= 2 * HZ) {
|
||||
/* Got no answer. */
|
||||
rc = -ENODEV;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
reply = ap_msg.msg;
|
||||
if (reply->cprbx.ccp_rtcode == 0 && reply->cprbx.ccp_rscode == 0)
|
||||
rc = 1;
|
||||
else
|
||||
rc = 0;
|
||||
out_free:
|
||||
free_page((unsigned long)ap_msg.msg);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Probe function for CEX2C/CEX3C card devices. It always accepts the
|
||||
* AP device since the bus_match already checked the hardware type.
|
||||
* @ap_dev: pointer to the AP card device.
|
||||
*/
|
||||
static int zcrypt_cex2c_card_probe(struct ap_device *ap_dev)
|
||||
{
|
||||
/*
|
||||
* Normalized speed ratings per crypto adapter
|
||||
* MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY
|
||||
*/
|
||||
static const int CEX2C_SPEED_IDX[] = {
|
||||
1000, 1400, 2400, 1100, 1500, 2600, 100, 12};
|
||||
static const int CEX3C_SPEED_IDX[] = {
|
||||
500, 700, 1400, 550, 800, 1500, 80, 10};
|
||||
|
||||
struct ap_card *ac = to_ap_card(&ap_dev->device);
|
||||
struct zcrypt_card *zc;
|
||||
int rc = 0;
|
||||
|
||||
zc = zcrypt_card_alloc();
|
||||
if (!zc)
|
||||
return -ENOMEM;
|
||||
zc->card = ac;
|
||||
dev_set_drvdata(&ap_dev->device, zc);
|
||||
switch (ac->ap_dev.device_type) {
|
||||
case AP_DEVICE_TYPE_CEX2C:
|
||||
zc->user_space_type = ZCRYPT_CEX2C;
|
||||
zc->type_string = "CEX2C";
|
||||
zc->speed_rating = CEX2C_SPEED_IDX;
|
||||
zc->min_mod_size = CEX2C_MIN_MOD_SIZE;
|
||||
zc->max_mod_size = CEX2C_MAX_MOD_SIZE;
|
||||
zc->max_exp_bit_length = CEX2C_MAX_MOD_SIZE;
|
||||
break;
|
||||
case AP_DEVICE_TYPE_CEX3C:
|
||||
zc->user_space_type = ZCRYPT_CEX3C;
|
||||
zc->type_string = "CEX3C";
|
||||
zc->speed_rating = CEX3C_SPEED_IDX;
|
||||
zc->min_mod_size = CEX3C_MIN_MOD_SIZE;
|
||||
zc->max_mod_size = CEX3C_MAX_MOD_SIZE;
|
||||
zc->max_exp_bit_length = CEX3C_MAX_MOD_SIZE;
|
||||
break;
|
||||
default:
|
||||
zcrypt_card_free(zc);
|
||||
return -ENODEV;
|
||||
}
|
||||
zc->online = 1;
|
||||
|
||||
rc = zcrypt_card_register(zc);
|
||||
if (rc) {
|
||||
zcrypt_card_free(zc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
if (ap_test_bit(&ac->functions, AP_FUNC_COPRO)) {
|
||||
rc = sysfs_create_group(&ap_dev->device.kobj,
|
||||
&cca_card_attr_grp);
|
||||
if (rc) {
|
||||
zcrypt_card_unregister(zc);
|
||||
zcrypt_card_free(zc);
|
||||
}
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is called to remove the CEX2C/CEX3C card driver information
|
||||
* if an AP card device is removed.
|
||||
*/
|
||||
static void zcrypt_cex2c_card_remove(struct ap_device *ap_dev)
|
||||
{
|
||||
struct zcrypt_card *zc = dev_get_drvdata(&ap_dev->device);
|
||||
struct ap_card *ac = to_ap_card(&ap_dev->device);
|
||||
|
||||
if (ap_test_bit(&ac->functions, AP_FUNC_COPRO))
|
||||
sysfs_remove_group(&ap_dev->device.kobj, &cca_card_attr_grp);
|
||||
|
||||
zcrypt_card_unregister(zc);
|
||||
}
|
||||
|
||||
static struct ap_driver zcrypt_cex2c_card_driver = {
|
||||
.probe = zcrypt_cex2c_card_probe,
|
||||
.remove = zcrypt_cex2c_card_remove,
|
||||
.ids = zcrypt_cex2c_card_ids,
|
||||
.flags = AP_DRIVER_FLAG_DEFAULT,
|
||||
};
|
||||
|
||||
/*
|
||||
* Probe function for CEX2C/CEX3C queue devices. It always accepts the
|
||||
* AP device since the bus_match already checked the hardware type.
|
||||
* @ap_dev: pointer to the AP card device.
|
||||
*/
|
||||
static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev)
|
||||
{
|
||||
struct ap_queue *aq = to_ap_queue(&ap_dev->device);
|
||||
struct zcrypt_queue *zq;
|
||||
int rc;
|
||||
|
||||
zq = zcrypt_queue_alloc(CEX2C_MAX_XCRB_MESSAGE_SIZE);
|
||||
if (!zq)
|
||||
return -ENOMEM;
|
||||
zq->queue = aq;
|
||||
zq->online = 1;
|
||||
atomic_set(&zq->load, 0);
|
||||
ap_rapq(aq->qid, 0);
|
||||
rc = zcrypt_cex2c_rng_supported(aq);
|
||||
if (rc < 0) {
|
||||
zcrypt_queue_free(zq);
|
||||
return rc;
|
||||
}
|
||||
if (rc)
|
||||
zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
|
||||
MSGTYPE06_VARIANT_DEFAULT);
|
||||
else
|
||||
zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
|
||||
MSGTYPE06_VARIANT_NORNG);
|
||||
ap_queue_init_state(aq);
|
||||
ap_queue_init_reply(aq, &zq->reply);
|
||||
aq->request_timeout = CEX2C_CLEANUP_TIME;
|
||||
dev_set_drvdata(&ap_dev->device, zq);
|
||||
rc = zcrypt_queue_register(zq);
|
||||
if (rc) {
|
||||
zcrypt_queue_free(zq);
|
||||
return rc;
|
||||
}
|
||||
|
||||
if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO)) {
|
||||
rc = sysfs_create_group(&ap_dev->device.kobj,
|
||||
&cca_queue_attr_grp);
|
||||
if (rc) {
|
||||
zcrypt_queue_unregister(zq);
|
||||
zcrypt_queue_free(zq);
|
||||
}
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is called to remove the CEX2C/CEX3C queue driver information
|
||||
* if an AP queue device is removed.
|
||||
*/
|
||||
static void zcrypt_cex2c_queue_remove(struct ap_device *ap_dev)
|
||||
{
|
||||
struct zcrypt_queue *zq = dev_get_drvdata(&ap_dev->device);
|
||||
struct ap_queue *aq = to_ap_queue(&ap_dev->device);
|
||||
|
||||
if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO))
|
||||
sysfs_remove_group(&ap_dev->device.kobj, &cca_queue_attr_grp);
|
||||
|
||||
zcrypt_queue_unregister(zq);
|
||||
}
|
||||
|
||||
static struct ap_driver zcrypt_cex2c_queue_driver = {
|
||||
.probe = zcrypt_cex2c_queue_probe,
|
||||
.remove = zcrypt_cex2c_queue_remove,
|
||||
.ids = zcrypt_cex2c_queue_ids,
|
||||
.flags = AP_DRIVER_FLAG_DEFAULT,
|
||||
};
|
||||
|
||||
int __init zcrypt_cex2c_init(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = ap_driver_register(&zcrypt_cex2c_card_driver,
|
||||
THIS_MODULE, "cex2card");
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = ap_driver_register(&zcrypt_cex2c_queue_driver,
|
||||
THIS_MODULE, "cex2cqueue");
|
||||
if (rc)
|
||||
ap_driver_unregister(&zcrypt_cex2c_card_driver);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
void zcrypt_cex2c_exit(void)
|
||||
{
|
||||
ap_driver_unregister(&zcrypt_cex2c_queue_driver);
|
||||
ap_driver_unregister(&zcrypt_cex2c_card_driver);
|
||||
}
|
||||
|
||||
module_init(zcrypt_cex2c_init);
|
||||
module_exit(zcrypt_cex2c_exit);
|
@ -1,18 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0+ */
|
||||
/*
|
||||
* Copyright IBM Corp. 2001, 2018
|
||||
* Author(s): Robert Burroughs
|
||||
* Eric Rossman (edrossma@us.ibm.com)
|
||||
*
|
||||
* Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
|
||||
* Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
|
||||
* MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
|
||||
*/
|
||||
|
||||
#ifndef _ZCRYPT_CEX2C_H_
|
||||
#define _ZCRYPT_CEX2C_H_
|
||||
|
||||
int zcrypt_cex2c_init(void);
|
||||
void zcrypt_cex2c_exit(void);
|
||||
|
||||
#endif /* _ZCRYPT_CEX2C_H_ */
|
@ -29,6 +29,8 @@
|
||||
#define DEBUG_WARN(...) ZCRYPT_DBF(DBF_WARN, ##__VA_ARGS__)
|
||||
#define DEBUG_ERR(...) ZCRYPT_DBF(DBF_ERR, ##__VA_ARGS__)
|
||||
|
||||
#define EP11_PINBLOB_V1_BYTES 56
|
||||
|
||||
/* default iv used here */
|
||||
static const u8 def_iv[16] = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,
|
||||
0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff };
|
||||
@ -113,6 +115,109 @@ static void __exit card_cache_free(void)
|
||||
spin_unlock_bh(&card_list_lock);
|
||||
}
|
||||
|
||||
static int ep11_kb_split(const u8 *kb, size_t kblen, u32 kbver,
|
||||
struct ep11kblob_header **kbhdr, size_t *kbhdrsize,
|
||||
u8 **kbpl, size_t *kbplsize)
|
||||
{
|
||||
struct ep11kblob_header *hdr = NULL;
|
||||
size_t hdrsize, plsize = 0;
|
||||
int rc = -EINVAL;
|
||||
u8 *pl = NULL;
|
||||
|
||||
if (kblen < sizeof(struct ep11kblob_header))
|
||||
goto out;
|
||||
hdr = (struct ep11kblob_header *)kb;
|
||||
|
||||
switch (kbver) {
|
||||
case TOKVER_EP11_AES:
|
||||
/* header overlays the payload */
|
||||
hdrsize = 0;
|
||||
break;
|
||||
case TOKVER_EP11_ECC_WITH_HEADER:
|
||||
case TOKVER_EP11_AES_WITH_HEADER:
|
||||
/* payload starts after the header */
|
||||
hdrsize = sizeof(struct ep11kblob_header);
|
||||
break;
|
||||
default:
|
||||
goto out;
|
||||
}
|
||||
|
||||
plsize = kblen - hdrsize;
|
||||
pl = (u8 *)kb + hdrsize;
|
||||
|
||||
if (kbhdr)
|
||||
*kbhdr = hdr;
|
||||
if (kbhdrsize)
|
||||
*kbhdrsize = hdrsize;
|
||||
if (kbpl)
|
||||
*kbpl = pl;
|
||||
if (kbplsize)
|
||||
*kbplsize = plsize;
|
||||
|
||||
rc = 0;
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int ep11_kb_decode(const u8 *kb, size_t kblen,
|
||||
struct ep11kblob_header **kbhdr, size_t *kbhdrsize,
|
||||
struct ep11keyblob **kbpl, size_t *kbplsize)
|
||||
{
|
||||
struct ep11kblob_header *tmph, *hdr = NULL;
|
||||
size_t hdrsize = 0, plsize = 0;
|
||||
struct ep11keyblob *pl = NULL;
|
||||
int rc = -EINVAL;
|
||||
u8 *tmpp;
|
||||
|
||||
if (kblen < sizeof(struct ep11kblob_header))
|
||||
goto out;
|
||||
tmph = (struct ep11kblob_header *)kb;
|
||||
|
||||
if (tmph->type != TOKTYPE_NON_CCA &&
|
||||
tmph->len > kblen)
|
||||
goto out;
|
||||
|
||||
if (ep11_kb_split(kb, kblen, tmph->version,
|
||||
&hdr, &hdrsize, &tmpp, &plsize))
|
||||
goto out;
|
||||
|
||||
if (plsize < sizeof(struct ep11keyblob))
|
||||
goto out;
|
||||
|
||||
if (!is_ep11_keyblob(tmpp))
|
||||
goto out;
|
||||
|
||||
pl = (struct ep11keyblob *)tmpp;
|
||||
plsize = hdr->len - hdrsize;
|
||||
|
||||
if (kbhdr)
|
||||
*kbhdr = hdr;
|
||||
if (kbhdrsize)
|
||||
*kbhdrsize = hdrsize;
|
||||
if (kbpl)
|
||||
*kbpl = pl;
|
||||
if (kbplsize)
|
||||
*kbplsize = plsize;
|
||||
|
||||
rc = 0;
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* For valid ep11 keyblobs, returns a reference to the wrappingkey verification
|
||||
* pattern. Otherwise NULL.
|
||||
*/
|
||||
const u8 *ep11_kb_wkvp(const u8 *keyblob, size_t keybloblen)
|
||||
{
|
||||
struct ep11keyblob *kb;
|
||||
|
||||
if (ep11_kb_decode(keyblob, keybloblen, NULL, NULL, &kb, NULL))
|
||||
return NULL;
|
||||
return kb->wkvp;
|
||||
}
|
||||
EXPORT_SYMBOL(ep11_kb_wkvp);
|
||||
|
||||
/*
|
||||
* Simple check if the key blob is a valid EP11 AES key blob with header.
|
||||
*/
|
||||
@ -489,7 +594,7 @@ static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type,
|
||||
struct ep11_cprb *req = NULL, *rep = NULL;
|
||||
struct ep11_target_dev target;
|
||||
struct ep11_urb *urb = NULL;
|
||||
int api = 1, rc = -ENOMEM;
|
||||
int api = EP11_API_V1, rc = -ENOMEM;
|
||||
|
||||
/* request cprb and payload */
|
||||
req = alloc_cprb(sizeof(struct ep11_info_req_pl));
|
||||
@ -664,8 +769,9 @@ EXPORT_SYMBOL(ep11_get_domain_info);
|
||||
*/
|
||||
#define KEY_ATTR_DEFAULTS 0x00200c00
|
||||
|
||||
int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
|
||||
u8 *keybuf, size_t *keybufsize)
|
||||
static int _ep11_genaeskey(u16 card, u16 domain,
|
||||
u32 keybitsize, u32 keygenflags,
|
||||
u8 *keybuf, size_t *keybufsize)
|
||||
{
|
||||
struct keygen_req_pl {
|
||||
struct pl_head head;
|
||||
@ -685,8 +791,7 @@ int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
|
||||
u32 attr_bool_bits;
|
||||
u32 attr_val_len_type;
|
||||
u32 attr_val_len_value;
|
||||
u8 pin_tag;
|
||||
u8 pin_len;
|
||||
/* followed by empty pin tag or empty pinblob tag */
|
||||
} __packed * req_pl;
|
||||
struct keygen_rep_pl {
|
||||
struct pl_head head;
|
||||
@ -699,10 +804,11 @@ int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
|
||||
u8 data[512];
|
||||
} __packed * rep_pl;
|
||||
struct ep11_cprb *req = NULL, *rep = NULL;
|
||||
size_t req_pl_size, pinblob_size = 0;
|
||||
struct ep11_target_dev target;
|
||||
struct ep11_urb *urb = NULL;
|
||||
struct ep11keyblob *kb;
|
||||
int api, rc = -ENOMEM;
|
||||
u8 *p;
|
||||
|
||||
switch (keybitsize) {
|
||||
case 128:
|
||||
@ -718,12 +824,22 @@ int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
|
||||
}
|
||||
|
||||
/* request cprb and payload */
|
||||
req = alloc_cprb(sizeof(struct keygen_req_pl));
|
||||
api = (!keygenflags || keygenflags & 0x00200000) ?
|
||||
EP11_API_V4 : EP11_API_V1;
|
||||
if (ap_is_se_guest()) {
|
||||
/*
|
||||
* genkey within SE environment requires API ordinal 6
|
||||
* with empty pinblob
|
||||
*/
|
||||
api = EP11_API_V6;
|
||||
pinblob_size = EP11_PINBLOB_V1_BYTES;
|
||||
}
|
||||
req_pl_size = sizeof(struct keygen_req_pl) + ASN1TAGLEN(pinblob_size);
|
||||
req = alloc_cprb(req_pl_size);
|
||||
if (!req)
|
||||
goto out;
|
||||
req_pl = (struct keygen_req_pl *)(((u8 *)req) + sizeof(*req));
|
||||
api = (!keygenflags || keygenflags & 0x00200000) ? 4 : 1;
|
||||
prep_head(&req_pl->head, sizeof(*req_pl), api, 21); /* GenerateKey */
|
||||
prep_head(&req_pl->head, req_pl_size, api, 21); /* GenerateKey */
|
||||
req_pl->var_tag = 0x04;
|
||||
req_pl->var_len = sizeof(u32);
|
||||
req_pl->keybytes_tag = 0x04;
|
||||
@ -739,7 +855,10 @@ int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
|
||||
req_pl->attr_bool_bits = keygenflags ? keygenflags : KEY_ATTR_DEFAULTS;
|
||||
req_pl->attr_val_len_type = 0x00000161; /* CKA_VALUE_LEN */
|
||||
req_pl->attr_val_len_value = keybitsize / 8;
|
||||
req_pl->pin_tag = 0x04;
|
||||
p = ((u8 *)req_pl) + sizeof(*req_pl);
|
||||
/* pin tag */
|
||||
*p++ = 0x04;
|
||||
*p++ = pinblob_size;
|
||||
|
||||
/* reply cprb and payload */
|
||||
rep = alloc_cprb(sizeof(struct keygen_rep_pl));
|
||||
@ -754,7 +873,7 @@ int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
|
||||
target.ap_id = card;
|
||||
target.dom_id = domain;
|
||||
prep_urb(urb, &target, 1,
|
||||
req, sizeof(*req) + sizeof(*req_pl),
|
||||
req, sizeof(*req) + req_pl_size,
|
||||
rep, sizeof(*rep) + sizeof(*rep_pl));
|
||||
|
||||
rc = zcrypt_send_ep11_cprb(urb);
|
||||
@ -780,14 +899,9 @@ int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* copy key blob and set header values */
|
||||
/* copy key blob */
|
||||
memcpy(keybuf, rep_pl->data, rep_pl->data_len);
|
||||
*keybufsize = rep_pl->data_len;
|
||||
kb = (struct ep11keyblob *)keybuf;
|
||||
kb->head.type = TOKTYPE_NON_CCA;
|
||||
kb->head.len = rep_pl->data_len;
|
||||
kb->head.version = TOKVER_EP11_AES;
|
||||
kb->head.keybitlen = keybitsize;
|
||||
|
||||
out:
|
||||
kfree(req);
|
||||
@ -795,6 +909,43 @@ out:
|
||||
kfree(urb);
|
||||
return rc;
|
||||
}
|
||||
|
||||
int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
|
||||
u8 *keybuf, size_t *keybufsize, u32 keybufver)
|
||||
{
|
||||
struct ep11kblob_header *hdr;
|
||||
size_t hdr_size, pl_size;
|
||||
u8 *pl;
|
||||
int rc;
|
||||
|
||||
switch (keybufver) {
|
||||
case TOKVER_EP11_AES:
|
||||
case TOKVER_EP11_AES_WITH_HEADER:
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rc = ep11_kb_split(keybuf, *keybufsize, keybufver,
|
||||
&hdr, &hdr_size, &pl, &pl_size);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = _ep11_genaeskey(card, domain, keybitsize, keygenflags,
|
||||
pl, &pl_size);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
*keybufsize = hdr_size + pl_size;
|
||||
|
||||
/* update header information */
|
||||
hdr->type = TOKTYPE_NON_CCA;
|
||||
hdr->len = *keybufsize;
|
||||
hdr->version = keybufver;
|
||||
hdr->bitlen = keybitsize;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ep11_genaeskey);
|
||||
|
||||
static int ep11_cryptsingle(u16 card, u16 domain,
|
||||
@ -830,7 +981,7 @@ static int ep11_cryptsingle(u16 card, u16 domain,
|
||||
struct ep11_target_dev target;
|
||||
struct ep11_urb *urb = NULL;
|
||||
size_t req_pl_size, rep_pl_size;
|
||||
int n, api = 1, rc = -ENOMEM;
|
||||
int n, api = EP11_API_V1, rc = -ENOMEM;
|
||||
u8 *p;
|
||||
|
||||
/* the simple asn1 coding used has length limits */
|
||||
@ -924,12 +1075,12 @@ out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int ep11_unwrapkey(u16 card, u16 domain,
|
||||
const u8 *kek, size_t keksize,
|
||||
const u8 *enckey, size_t enckeysize,
|
||||
u32 mech, const u8 *iv,
|
||||
u32 keybitsize, u32 keygenflags,
|
||||
u8 *keybuf, size_t *keybufsize)
|
||||
static int _ep11_unwrapkey(u16 card, u16 domain,
|
||||
const u8 *kek, size_t keksize,
|
||||
const u8 *enckey, size_t enckeysize,
|
||||
u32 mech, const u8 *iv,
|
||||
u32 keybitsize, u32 keygenflags,
|
||||
u8 *keybuf, size_t *keybufsize)
|
||||
{
|
||||
struct uw_req_pl {
|
||||
struct pl_head head;
|
||||
@ -949,7 +1100,7 @@ static int ep11_unwrapkey(u16 card, u16 domain,
|
||||
* maybe followed by iv data
|
||||
* followed by kek tag + kek blob
|
||||
* followed by empty mac tag
|
||||
* followed by empty pin tag
|
||||
* followed by empty pin tag or empty pinblob tag
|
||||
* followed by encryted key tag + bytes
|
||||
*/
|
||||
} __packed * req_pl;
|
||||
@ -964,21 +1115,30 @@ static int ep11_unwrapkey(u16 card, u16 domain,
|
||||
u8 data[512];
|
||||
} __packed * rep_pl;
|
||||
struct ep11_cprb *req = NULL, *rep = NULL;
|
||||
size_t req_pl_size, pinblob_size = 0;
|
||||
struct ep11_target_dev target;
|
||||
struct ep11_urb *urb = NULL;
|
||||
struct ep11keyblob *kb;
|
||||
size_t req_pl_size;
|
||||
int api, rc = -ENOMEM;
|
||||
u8 *p;
|
||||
|
||||
/* request cprb and payload */
|
||||
api = (!keygenflags || keygenflags & 0x00200000) ?
|
||||
EP11_API_V4 : EP11_API_V1;
|
||||
if (ap_is_se_guest()) {
|
||||
/*
|
||||
* unwrap within SE environment requires API ordinal 6
|
||||
* with empty pinblob
|
||||
*/
|
||||
api = EP11_API_V6;
|
||||
pinblob_size = EP11_PINBLOB_V1_BYTES;
|
||||
}
|
||||
req_pl_size = sizeof(struct uw_req_pl) + (iv ? 16 : 0)
|
||||
+ ASN1TAGLEN(keksize) + 4 + ASN1TAGLEN(enckeysize);
|
||||
+ ASN1TAGLEN(keksize) + ASN1TAGLEN(0)
|
||||
+ ASN1TAGLEN(pinblob_size) + ASN1TAGLEN(enckeysize);
|
||||
req = alloc_cprb(req_pl_size);
|
||||
if (!req)
|
||||
goto out;
|
||||
req_pl = (struct uw_req_pl *)(((u8 *)req) + sizeof(*req));
|
||||
api = (!keygenflags || keygenflags & 0x00200000) ? 4 : 1;
|
||||
prep_head(&req_pl->head, req_pl_size, api, 34); /* UnwrapKey */
|
||||
req_pl->attr_tag = 0x04;
|
||||
req_pl->attr_len = 7 * sizeof(u32);
|
||||
@ -1003,9 +1163,10 @@ static int ep11_unwrapkey(u16 card, u16 domain,
|
||||
/* empty mac key tag */
|
||||
*p++ = 0x04;
|
||||
*p++ = 0;
|
||||
/* empty pin tag */
|
||||
/* pin tag */
|
||||
*p++ = 0x04;
|
||||
*p++ = 0;
|
||||
*p++ = pinblob_size;
|
||||
p += pinblob_size;
|
||||
/* encrypted key value tag and bytes */
|
||||
p += asn1tag_write(p, 0x04, enckey, enckeysize);
|
||||
|
||||
@ -1048,14 +1209,9 @@ static int ep11_unwrapkey(u16 card, u16 domain,
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* copy key blob and set header values */
|
||||
/* copy key blob */
|
||||
memcpy(keybuf, rep_pl->data, rep_pl->data_len);
|
||||
*keybufsize = rep_pl->data_len;
|
||||
kb = (struct ep11keyblob *)keybuf;
|
||||
kb->head.type = TOKTYPE_NON_CCA;
|
||||
kb->head.len = rep_pl->data_len;
|
||||
kb->head.version = TOKVER_EP11_AES;
|
||||
kb->head.keybitlen = keybitsize;
|
||||
|
||||
out:
|
||||
kfree(req);
|
||||
@ -1064,10 +1220,46 @@ out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int ep11_wrapkey(u16 card, u16 domain,
|
||||
const u8 *key, size_t keysize,
|
||||
u32 mech, const u8 *iv,
|
||||
u8 *databuf, size_t *datasize)
|
||||
static int ep11_unwrapkey(u16 card, u16 domain,
|
||||
const u8 *kek, size_t keksize,
|
||||
const u8 *enckey, size_t enckeysize,
|
||||
u32 mech, const u8 *iv,
|
||||
u32 keybitsize, u32 keygenflags,
|
||||
u8 *keybuf, size_t *keybufsize,
|
||||
u8 keybufver)
|
||||
{
|
||||
struct ep11kblob_header *hdr;
|
||||
size_t hdr_size, pl_size;
|
||||
u8 *pl;
|
||||
int rc;
|
||||
|
||||
rc = ep11_kb_split(keybuf, *keybufsize, keybufver,
|
||||
&hdr, &hdr_size, &pl, &pl_size);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = _ep11_unwrapkey(card, domain, kek, keksize, enckey, enckeysize,
|
||||
mech, iv, keybitsize, keygenflags,
|
||||
pl, &pl_size);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
*keybufsize = hdr_size + pl_size;
|
||||
|
||||
/* update header information */
|
||||
hdr = (struct ep11kblob_header *)keybuf;
|
||||
hdr->type = TOKTYPE_NON_CCA;
|
||||
hdr->len = *keybufsize;
|
||||
hdr->version = keybufver;
|
||||
hdr->bitlen = keybitsize;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int _ep11_wrapkey(u16 card, u16 domain,
|
||||
const u8 *key, size_t keysize,
|
||||
u32 mech, const u8 *iv,
|
||||
u8 *databuf, size_t *datasize)
|
||||
{
|
||||
struct wk_req_pl {
|
||||
struct pl_head head;
|
||||
@ -1097,20 +1289,10 @@ static int ep11_wrapkey(u16 card, u16 domain,
|
||||
struct ep11_cprb *req = NULL, *rep = NULL;
|
||||
struct ep11_target_dev target;
|
||||
struct ep11_urb *urb = NULL;
|
||||
struct ep11keyblob *kb;
|
||||
size_t req_pl_size;
|
||||
int api, rc = -ENOMEM;
|
||||
bool has_header = false;
|
||||
u8 *p;
|
||||
|
||||
/* maybe the session field holds a header with key info */
|
||||
kb = (struct ep11keyblob *)key;
|
||||
if (kb->head.type == TOKTYPE_NON_CCA &&
|
||||
kb->head.version == TOKVER_EP11_AES) {
|
||||
has_header = true;
|
||||
keysize = min_t(size_t, kb->head.len, keysize);
|
||||
}
|
||||
|
||||
/* request cprb and payload */
|
||||
req_pl_size = sizeof(struct wk_req_pl) + (iv ? 16 : 0)
|
||||
+ ASN1TAGLEN(keysize) + 4;
|
||||
@ -1120,7 +1302,8 @@ static int ep11_wrapkey(u16 card, u16 domain,
|
||||
if (!mech || mech == 0x80060001)
|
||||
req->flags |= 0x20; /* CPACF_WRAP needs special bit */
|
||||
req_pl = (struct wk_req_pl *)(((u8 *)req) + sizeof(*req));
|
||||
api = (!mech || mech == 0x80060001) ? 4 : 1; /* CKM_IBM_CPACF_WRAP */
|
||||
api = (!mech || mech == 0x80060001) ? /* CKM_IBM_CPACF_WRAP */
|
||||
EP11_API_V4 : EP11_API_V1;
|
||||
prep_head(&req_pl->head, req_pl_size, api, 33); /* WrapKey */
|
||||
req_pl->var_tag = 0x04;
|
||||
req_pl->var_len = sizeof(u32);
|
||||
@ -1135,11 +1318,6 @@ static int ep11_wrapkey(u16 card, u16 domain,
|
||||
}
|
||||
/* key blob */
|
||||
p += asn1tag_write(p, 0x04, key, keysize);
|
||||
/* maybe the key argument needs the head data cleaned out */
|
||||
if (has_header) {
|
||||
kb = (struct ep11keyblob *)(p - keysize);
|
||||
memset(&kb->head, 0, sizeof(kb->head));
|
||||
}
|
||||
/* empty kek tag */
|
||||
*p++ = 0x04;
|
||||
*p++ = 0;
|
||||
@ -1198,10 +1376,10 @@ out:
|
||||
}
|
||||
|
||||
int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
|
||||
const u8 *clrkey, u8 *keybuf, size_t *keybufsize)
|
||||
const u8 *clrkey, u8 *keybuf, size_t *keybufsize,
|
||||
u32 keytype)
|
||||
{
|
||||
int rc;
|
||||
struct ep11keyblob *kb;
|
||||
u8 encbuf[64], *kek = NULL;
|
||||
size_t clrkeylen, keklen, encbuflen = sizeof(encbuf);
|
||||
|
||||
@ -1223,17 +1401,15 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
|
||||
}
|
||||
|
||||
/* Step 1: generate AES 256 bit random kek key */
|
||||
rc = ep11_genaeskey(card, domain, 256,
|
||||
0x00006c00, /* EN/DECRYPT, WRAP/UNWRAP */
|
||||
kek, &keklen);
|
||||
rc = _ep11_genaeskey(card, domain, 256,
|
||||
0x00006c00, /* EN/DECRYPT, WRAP/UNWRAP */
|
||||
kek, &keklen);
|
||||
if (rc) {
|
||||
DEBUG_ERR(
|
||||
"%s generate kek key failed, rc=%d\n",
|
||||
__func__, rc);
|
||||
goto out;
|
||||
}
|
||||
kb = (struct ep11keyblob *)kek;
|
||||
memset(&kb->head, 0, sizeof(kb->head));
|
||||
|
||||
/* Step 2: encrypt clear key value with the kek key */
|
||||
rc = ep11_cryptsingle(card, domain, 0, 0, def_iv, kek, keklen,
|
||||
@ -1248,7 +1424,7 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
|
||||
/* Step 3: import the encrypted key value as a new key */
|
||||
rc = ep11_unwrapkey(card, domain, kek, keklen,
|
||||
encbuf, encbuflen, 0, def_iv,
|
||||
keybitsize, 0, keybuf, keybufsize);
|
||||
keybitsize, 0, keybuf, keybufsize, keytype);
|
||||
if (rc) {
|
||||
DEBUG_ERR(
|
||||
"%s importing key value as new key failed,, rc=%d\n",
|
||||
@ -1262,11 +1438,12 @@ out:
|
||||
}
|
||||
EXPORT_SYMBOL(ep11_clr2keyblob);
|
||||
|
||||
int ep11_kblob2protkey(u16 card, u16 dom, const u8 *keyblob, size_t keybloblen,
|
||||
int ep11_kblob2protkey(u16 card, u16 dom,
|
||||
const u8 *keyblob, size_t keybloblen,
|
||||
u8 *protkey, u32 *protkeylen, u32 *protkeytype)
|
||||
{
|
||||
int rc = -EIO;
|
||||
u8 *wkbuf = NULL;
|
||||
struct ep11kblob_header *hdr;
|
||||
struct ep11keyblob *key;
|
||||
size_t wkbuflen, keylen;
|
||||
struct wk_info {
|
||||
u16 version;
|
||||
@ -1277,31 +1454,17 @@ int ep11_kblob2protkey(u16 card, u16 dom, const u8 *keyblob, size_t keybloblen,
|
||||
u8 res2[8];
|
||||
u8 pkey[];
|
||||
} __packed * wki;
|
||||
const u8 *key;
|
||||
struct ep11kblob_header *hdr;
|
||||
u8 *wkbuf = NULL;
|
||||
int rc = -EIO;
|
||||
|
||||
/* key with or without header ? */
|
||||
hdr = (struct ep11kblob_header *)keyblob;
|
||||
if (hdr->type == TOKTYPE_NON_CCA &&
|
||||
(hdr->version == TOKVER_EP11_AES_WITH_HEADER ||
|
||||
hdr->version == TOKVER_EP11_ECC_WITH_HEADER) &&
|
||||
is_ep11_keyblob(keyblob + sizeof(struct ep11kblob_header))) {
|
||||
/* EP11 AES or ECC key with header */
|
||||
key = keyblob + sizeof(struct ep11kblob_header);
|
||||
keylen = hdr->len - sizeof(struct ep11kblob_header);
|
||||
} else if (hdr->type == TOKTYPE_NON_CCA &&
|
||||
hdr->version == TOKVER_EP11_AES &&
|
||||
is_ep11_keyblob(keyblob)) {
|
||||
/* EP11 AES key (old style) */
|
||||
key = keyblob;
|
||||
keylen = hdr->len;
|
||||
} else if (is_ep11_keyblob(keyblob)) {
|
||||
/* raw EP11 key blob */
|
||||
key = keyblob;
|
||||
keylen = keybloblen;
|
||||
} else {
|
||||
if (ep11_kb_decode((u8 *)keyblob, keybloblen, &hdr, NULL, &key, &keylen))
|
||||
return -EINVAL;
|
||||
|
||||
if (hdr->version == TOKVER_EP11_AES) {
|
||||
/* wipe overlayed header */
|
||||
memset(hdr, 0, sizeof(*hdr));
|
||||
}
|
||||
/* !!! hdr is no longer a valid header !!! */
|
||||
|
||||
/* alloc temp working buffer */
|
||||
wkbuflen = (keylen + AES_BLOCK_SIZE) & (~(AES_BLOCK_SIZE - 1));
|
||||
@ -1310,8 +1473,8 @@ int ep11_kblob2protkey(u16 card, u16 dom, const u8 *keyblob, size_t keybloblen,
|
||||
return -ENOMEM;
|
||||
|
||||
/* ep11 secure key -> protected key + info */
|
||||
rc = ep11_wrapkey(card, dom, key, keylen,
|
||||
0, def_iv, wkbuf, &wkbuflen);
|
||||
rc = _ep11_wrapkey(card, dom, (u8 *)key, keylen,
|
||||
0, def_iv, wkbuf, &wkbuflen);
|
||||
if (rc) {
|
||||
DEBUG_ERR(
|
||||
"%s rewrapping ep11 key to pkey failed, rc=%d\n",
|
||||
|
@ -12,7 +12,9 @@
|
||||
#include <asm/zcrypt.h>
|
||||
#include <asm/pkey.h>
|
||||
|
||||
#define EP11_API_V 4 /* highest known and supported EP11 API version */
|
||||
#define EP11_API_V1 1 /* min EP11 API, default if no higher api required */
|
||||
#define EP11_API_V4 4 /* supported EP11 API for the ep11misc cprbs */
|
||||
#define EP11_API_V6 6 /* min EP11 API for some cprbs in SE environment */
|
||||
#define EP11_STRUCT_MAGIC 0x1234
|
||||
#define EP11_BLOB_PKEY_EXTRACTABLE 0x00200000
|
||||
|
||||
@ -29,14 +31,7 @@ struct ep11keyblob {
|
||||
union {
|
||||
u8 session[32];
|
||||
/* only used for PKEY_TYPE_EP11: */
|
||||
struct {
|
||||
u8 type; /* 0x00 (TOKTYPE_NON_CCA) */
|
||||
u8 res0; /* unused */
|
||||
u16 len; /* total length in bytes of this blob */
|
||||
u8 version; /* 0x03 (TOKVER_EP11_AES) */
|
||||
u8 res1; /* unused */
|
||||
u16 keybitlen; /* clear key bit len, 0 for unknown */
|
||||
} head;
|
||||
struct ep11kblob_header head;
|
||||
};
|
||||
u8 wkvp[16]; /* wrapping key verification pattern */
|
||||
u64 attr; /* boolean key attributes */
|
||||
@ -55,6 +50,12 @@ static inline bool is_ep11_keyblob(const u8 *key)
|
||||
return (kb->version == EP11_STRUCT_MAGIC);
|
||||
}
|
||||
|
||||
/*
|
||||
* For valid ep11 keyblobs, returns a reference to the wrappingkey verification
|
||||
* pattern. Otherwise NULL.
|
||||
*/
|
||||
const u8 *ep11_kb_wkvp(const u8 *kblob, size_t kbloblen);
|
||||
|
||||
/*
|
||||
* Simple check if the key blob is a valid EP11 AES key blob with header.
|
||||
* If checkcpacfexport is enabled, the key is also checked for the
|
||||
@ -114,13 +115,14 @@ int ep11_get_domain_info(u16 card, u16 domain, struct ep11_domain_info *info);
|
||||
* Generate (random) EP11 AES secure key.
|
||||
*/
|
||||
int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
|
||||
u8 *keybuf, size_t *keybufsize);
|
||||
u8 *keybuf, size_t *keybufsize, u32 keybufver);
|
||||
|
||||
/*
|
||||
* Generate EP11 AES secure key with given clear key value.
|
||||
*/
|
||||
int ep11_clr2keyblob(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
|
||||
const u8 *clrkey, u8 *keybuf, size_t *keybufsize);
|
||||
const u8 *clrkey, u8 *keybuf, size_t *keybufsize,
|
||||
u32 keytype);
|
||||
|
||||
/*
|
||||
* Build a list of ep11 apqns meeting the following constrains:
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user