mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-15 08:44:14 +08:00
- Second batch of the lazy destroy patches
- First batch of KVM changes for kernel virtual != physical address support - Removal of a unused function -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEwGNS88vfc9+v45Yq41TmuOI4ufgFAmN/eYwACgkQ41TmuOI4 ufjoxA/9Et38aXO/IhmUt8v0QhA4yec+sc5GSFfQSYehej/1Vqhw0DXx+ORUiRgg +rtiXJSSqkuD2dL+BDffY2xoul6nzNdVf4AbkcnrWscfWr6xwVYlPvuL0ymGI6J2 U/IPedRoKw0bHw/wHs05yV5PubrRwDFERKhtyXWYGbPJhX0w2n3IFOoKH1oWBhLW Dc8jEs6t3gDbJ71Er0xoeBUoiuu+PgZG06cpOvzBZ0KclRgjADXyISqqk8/4mu8w R+/Wf8NcrbQYV1jfCeq5zIsKC8uvnFj25UuyTLumn5vh+dNNsvE72Khe4tz7LI0I ZPZ+GZuemu7Yi12dKjw4Sw3ui0ejWH/5XL1SVB0X/xYIWrBqOot+Lq6538GCng+c tJt+zsu64VFgXCCZ8O9qO4uE4DBL70H3ThT7VZxIghSTZtY0xh3uFc64f3/3d9dy K4WTJHrmMxhXaA/rqtIa8I53JvFl8CztofZATiQQesyPuc7lZ01w1Co5el4xYaxe YknyMTq11qf/iYqVOW7sjoWW/YRuuMZ4+FhpI3o/SllVdN98iTwkk1kP3wcoBO5P bvzpm+WXHbv9OxifPrqkqv34+upbjfEmSogHudQzagBX4vl3rZRfBCdQGCAha0Uc ZYyg68kiil5sWmHI/Ln/ZjANYfbS5sF0CreuWxnmqcwKl2NSN/E= =/1yt -----END PGP SIGNATURE----- Merge tag 'kvm-s390-next-6.2-1' of https://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD - Second batch of the lazy destroy patches - First batch of KVM changes for kernel virtual != physical address support - Removal of a unused function
This commit is contained in:
commit
1e79a9e3ab
@ -5163,10 +5163,13 @@ KVM_PV_ENABLE
|
||||
===== =============================
|
||||
|
||||
KVM_PV_DISABLE
|
||||
Deregister the VM from the Ultravisor and reclaim the memory that
|
||||
had been donated to the Ultravisor, making it usable by the kernel
|
||||
again. All registered VCPUs are converted back to non-protected
|
||||
ones.
|
||||
Deregister the VM from the Ultravisor and reclaim the memory that had
|
||||
been donated to the Ultravisor, making it usable by the kernel again.
|
||||
All registered VCPUs are converted back to non-protected ones. If a
|
||||
previous protected VM had been prepared for asynchonous teardown with
|
||||
KVM_PV_ASYNC_CLEANUP_PREPARE and not subsequently torn down with
|
||||
KVM_PV_ASYNC_CLEANUP_PERFORM, it will be torn down in this call
|
||||
together with the current protected VM.
|
||||
|
||||
KVM_PV_VM_SET_SEC_PARMS
|
||||
Pass the image header from VM memory to the Ultravisor in
|
||||
@ -5289,6 +5292,36 @@ KVM_PV_DUMP
|
||||
authentication tag all of which are needed to decrypt the dump at a
|
||||
later time.
|
||||
|
||||
KVM_PV_ASYNC_CLEANUP_PREPARE
|
||||
:Capability: KVM_CAP_S390_PROTECTED_ASYNC_DISABLE
|
||||
|
||||
Prepare the current protected VM for asynchronous teardown. Most
|
||||
resources used by the current protected VM will be set aside for a
|
||||
subsequent asynchronous teardown. The current protected VM will then
|
||||
resume execution immediately as non-protected. There can be at most
|
||||
one protected VM prepared for asynchronous teardown at any time. If
|
||||
a protected VM had already been prepared for teardown without
|
||||
subsequently calling KVM_PV_ASYNC_CLEANUP_PERFORM, this call will
|
||||
fail. In that case, the userspace process should issue a normal
|
||||
KVM_PV_DISABLE. The resources set aside with this call will need to
|
||||
be cleaned up with a subsequent call to KVM_PV_ASYNC_CLEANUP_PERFORM
|
||||
or KVM_PV_DISABLE, otherwise they will be cleaned up when KVM
|
||||
terminates. KVM_PV_ASYNC_CLEANUP_PREPARE can be called again as soon
|
||||
as cleanup starts, i.e. before KVM_PV_ASYNC_CLEANUP_PERFORM finishes.
|
||||
|
||||
KVM_PV_ASYNC_CLEANUP_PERFORM
|
||||
:Capability: KVM_CAP_S390_PROTECTED_ASYNC_DISABLE
|
||||
|
||||
Tear down the protected VM previously prepared for teardown with
|
||||
KVM_PV_ASYNC_CLEANUP_PREPARE. The resources that had been set aside
|
||||
will be freed during the execution of this command. This PV command
|
||||
should ideally be issued by userspace from a separate thread. If a
|
||||
fatal signal is received (or the process terminates naturally), the
|
||||
command will terminate immediately without completing, and the normal
|
||||
KVM shutdown procedure will take care of cleaning up all remaining
|
||||
protected VMs, including the ones whose teardown was interrupted by
|
||||
process termination.
|
||||
|
||||
4.126 KVM_XEN_HVM_SET_ATTR
|
||||
--------------------------
|
||||
|
||||
|
@ -142,8 +142,7 @@ struct mcck_volatile_info {
|
||||
CR14_EXTERNAL_DAMAGE_SUBMASK)
|
||||
|
||||
#define SIDAD_SIZE_MASK 0xff
|
||||
#define sida_origin(sie_block) \
|
||||
((sie_block)->sidad & PAGE_MASK)
|
||||
#define sida_addr(sie_block) phys_to_virt((sie_block)->sidad & PAGE_MASK)
|
||||
#define sida_size(sie_block) \
|
||||
((((sie_block)->sidad & SIDAD_SIZE_MASK) + 1) * PAGE_SIZE)
|
||||
|
||||
@ -276,6 +275,7 @@ struct kvm_s390_sie_block {
|
||||
#define ECB3_AES 0x04
|
||||
#define ECB3_RI 0x01
|
||||
__u8 ecb3; /* 0x0063 */
|
||||
#define ESCA_SCAOL_MASK ~0x3fU
|
||||
__u32 scaol; /* 0x0064 */
|
||||
__u8 sdf; /* 0x0068 */
|
||||
__u8 epdx; /* 0x0069 */
|
||||
@ -942,6 +942,8 @@ struct kvm_s390_pv {
|
||||
unsigned long stor_base;
|
||||
void *stor_var;
|
||||
bool dumping;
|
||||
void *set_aside;
|
||||
struct list_head need_cleanup;
|
||||
struct mmu_notifier mmu_notifier;
|
||||
};
|
||||
|
||||
@ -1017,7 +1019,13 @@ void kvm_arch_crypto_clear_masks(struct kvm *kvm);
|
||||
void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
|
||||
unsigned long *aqm, unsigned long *adm);
|
||||
|
||||
extern int sie64a(struct kvm_s390_sie_block *, u64 *);
|
||||
int __sie64a(phys_addr_t sie_block_phys, struct kvm_s390_sie_block *sie_block, u64 *rsa);
|
||||
|
||||
static inline int sie64a(struct kvm_s390_sie_block *sie_block, u64 *rsa)
|
||||
{
|
||||
return __sie64a(virt_to_phys(sie_block), sie_block, rsa);
|
||||
}
|
||||
|
||||
extern char sie_exit;
|
||||
|
||||
extern int kvm_s390_gisc_register(struct kvm *kvm, u32 gisc);
|
||||
|
@ -4,8 +4,8 @@
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
int set_memory_encrypted(unsigned long addr, int numpages);
|
||||
int set_memory_decrypted(unsigned long addr, int numpages);
|
||||
int set_memory_encrypted(unsigned long vaddr, int numpages);
|
||||
int set_memory_decrypted(unsigned long vaddr, int numpages);
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
|
@ -46,6 +46,7 @@ struct stack_frame {
|
||||
unsigned long sie_savearea;
|
||||
unsigned long sie_reason;
|
||||
unsigned long sie_flags;
|
||||
unsigned long sie_control_block_phys;
|
||||
};
|
||||
};
|
||||
unsigned long gprs[10];
|
||||
|
@ -34,6 +34,7 @@
|
||||
#define UVC_CMD_INIT_UV 0x000f
|
||||
#define UVC_CMD_CREATE_SEC_CONF 0x0100
|
||||
#define UVC_CMD_DESTROY_SEC_CONF 0x0101
|
||||
#define UVC_CMD_DESTROY_SEC_CONF_FAST 0x0102
|
||||
#define UVC_CMD_CREATE_SEC_CPU 0x0120
|
||||
#define UVC_CMD_DESTROY_SEC_CPU 0x0121
|
||||
#define UVC_CMD_CONV_TO_SEC_STOR 0x0200
|
||||
@ -81,6 +82,7 @@ enum uv_cmds_inst {
|
||||
BIT_UVC_CMD_UNSHARE_ALL = 20,
|
||||
BIT_UVC_CMD_PIN_PAGE_SHARED = 21,
|
||||
BIT_UVC_CMD_UNPIN_PAGE_SHARED = 22,
|
||||
BIT_UVC_CMD_DESTROY_SEC_CONF_FAST = 23,
|
||||
BIT_UVC_CMD_DUMP_INIT = 24,
|
||||
BIT_UVC_CMD_DUMP_CONFIG_STOR_STATE = 25,
|
||||
BIT_UVC_CMD_DUMP_CPU = 26,
|
||||
@ -230,6 +232,14 @@ struct uv_cb_nodata {
|
||||
u64 reserved20[4];
|
||||
} __packed __aligned(8);
|
||||
|
||||
/* Destroy Configuration Fast */
|
||||
struct uv_cb_destroy_fast {
|
||||
struct uv_cb_header header;
|
||||
u64 reserved08[2];
|
||||
u64 handle;
|
||||
u64 reserved20[5];
|
||||
} __packed __aligned(8);
|
||||
|
||||
/* Set Shared Access */
|
||||
struct uv_cb_share {
|
||||
struct uv_cb_header header;
|
||||
|
@ -62,6 +62,7 @@ int main(void)
|
||||
OFFSET(__SF_SIE_SAVEAREA, stack_frame, sie_savearea);
|
||||
OFFSET(__SF_SIE_REASON, stack_frame, sie_reason);
|
||||
OFFSET(__SF_SIE_FLAGS, stack_frame, sie_flags);
|
||||
OFFSET(__SF_SIE_CONTROL_PHYS, stack_frame, sie_control_block_phys);
|
||||
DEFINE(STACK_FRAME_OVERHEAD, sizeof(struct stack_frame));
|
||||
BLANK();
|
||||
/* idle data offsets */
|
||||
|
@ -225,18 +225,20 @@ ENDPROC(__switch_to)
|
||||
|
||||
#if IS_ENABLED(CONFIG_KVM)
|
||||
/*
|
||||
* sie64a calling convention:
|
||||
* %r2 pointer to sie control block
|
||||
* %r3 guest register save area
|
||||
* __sie64a calling convention:
|
||||
* %r2 pointer to sie control block phys
|
||||
* %r3 pointer to sie control block virt
|
||||
* %r4 guest register save area
|
||||
*/
|
||||
ENTRY(sie64a)
|
||||
ENTRY(__sie64a)
|
||||
stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers
|
||||
lg %r12,__LC_CURRENT
|
||||
stg %r2,__SF_SIE_CONTROL(%r15) # save control block pointer
|
||||
stg %r3,__SF_SIE_SAVEAREA(%r15) # save guest register save area
|
||||
stg %r2,__SF_SIE_CONTROL_PHYS(%r15) # save sie block physical..
|
||||
stg %r3,__SF_SIE_CONTROL(%r15) # ...and virtual addresses
|
||||
stg %r4,__SF_SIE_SAVEAREA(%r15) # save guest register save area
|
||||
xc __SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0
|
||||
mvc __SF_SIE_FLAGS(8,%r15),__TI_flags(%r12) # copy thread flags
|
||||
lmg %r0,%r13,0(%r3) # load guest gprs 0-13
|
||||
lmg %r0,%r13,0(%r4) # load guest gprs 0-13
|
||||
lg %r14,__LC_GMAP # get gmap pointer
|
||||
ltgr %r14,%r14
|
||||
jz .Lsie_gmap
|
||||
@ -248,6 +250,7 @@ ENTRY(sie64a)
|
||||
jnz .Lsie_skip
|
||||
TSTMSK __LC_CPU_FLAGS,_CIF_FPU
|
||||
jo .Lsie_skip # exit if fp/vx regs changed
|
||||
lg %r14,__SF_SIE_CONTROL_PHYS(%r15) # get sie block phys addr
|
||||
BPEXIT __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
|
||||
.Lsie_entry:
|
||||
sie 0(%r14)
|
||||
@ -258,13 +261,14 @@ ENTRY(sie64a)
|
||||
BPOFF
|
||||
BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
|
||||
.Lsie_skip:
|
||||
lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer
|
||||
ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
|
||||
lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce
|
||||
.Lsie_done:
|
||||
# some program checks are suppressing. C code (e.g. do_protection_exception)
|
||||
# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
|
||||
# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
|
||||
# Other instructions between sie64a and .Lsie_done should not cause program
|
||||
# Other instructions between __sie64a and .Lsie_done should not cause program
|
||||
# interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
|
||||
.Lrewind_pad6:
|
||||
nopr 7
|
||||
@ -293,8 +297,8 @@ sie_exit:
|
||||
EX_TABLE(.Lrewind_pad4,.Lsie_fault)
|
||||
EX_TABLE(.Lrewind_pad2,.Lsie_fault)
|
||||
EX_TABLE(sie_exit,.Lsie_fault)
|
||||
ENDPROC(sie64a)
|
||||
EXPORT_SYMBOL(sie64a)
|
||||
ENDPROC(__sie64a)
|
||||
EXPORT_SYMBOL(__sie64a)
|
||||
EXPORT_SYMBOL(sie_exit)
|
||||
#endif
|
||||
|
||||
@ -373,7 +377,7 @@ ENTRY(pgm_check_handler)
|
||||
j 3f # -> fault in user space
|
||||
.Lpgm_skip_asce:
|
||||
#if IS_ENABLED(CONFIG_KVM)
|
||||
# cleanup critical section for program checks in sie64a
|
||||
# cleanup critical section for program checks in __sie64a
|
||||
OUTSIDE %r9,.Lsie_gmap,.Lsie_done,1f
|
||||
SIEEXIT
|
||||
lghi %r10,_PIF_GUEST_FAULT
|
||||
|
@ -255,6 +255,13 @@ static int make_secure_pte(pte_t *ptep, unsigned long addr,
|
||||
*/
|
||||
static bool should_export_before_import(struct uv_cb_header *uvcb, struct mm_struct *mm)
|
||||
{
|
||||
/*
|
||||
* The misc feature indicates, among other things, that importing a
|
||||
* shared page from a different protected VM will automatically also
|
||||
* transfer its ownership.
|
||||
*/
|
||||
if (test_bit_inv(BIT_UV_FEAT_MISC, &uv_info.uv_feature_indications))
|
||||
return false;
|
||||
if (uvcb->cmd == UVC_CMD_UNPIN_PAGE_SHARED)
|
||||
return false;
|
||||
return atomic_read(&mm->context.protected_count) > 1;
|
||||
|
@ -217,7 +217,7 @@ static int handle_itdb(struct kvm_vcpu *vcpu)
|
||||
return 0;
|
||||
if (current->thread.per_flags & PER_FLAG_NO_TE)
|
||||
return 0;
|
||||
itdb = (struct kvm_s390_itdb *)vcpu->arch.sie_block->itdba;
|
||||
itdb = phys_to_virt(vcpu->arch.sie_block->itdba);
|
||||
rc = write_guest_lc(vcpu, __LC_PGM_TDB, itdb, sizeof(*itdb));
|
||||
if (rc)
|
||||
return rc;
|
||||
@ -409,8 +409,7 @@ int handle_sthyi(struct kvm_vcpu *vcpu)
|
||||
out:
|
||||
if (!cc) {
|
||||
if (kvm_s390_pv_cpu_is_protected(vcpu)) {
|
||||
memcpy((void *)(sida_origin(vcpu->arch.sie_block)),
|
||||
sctns, PAGE_SIZE);
|
||||
memcpy(sida_addr(vcpu->arch.sie_block), sctns, PAGE_SIZE);
|
||||
} else {
|
||||
r = write_guest(vcpu, addr, reg2, sctns, PAGE_SIZE);
|
||||
if (r) {
|
||||
@ -464,7 +463,7 @@ static int handle_operexc(struct kvm_vcpu *vcpu)
|
||||
|
||||
static int handle_pv_spx(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 pref = *(u32 *)vcpu->arch.sie_block->sidad;
|
||||
u32 pref = *(u32 *)sida_addr(vcpu->arch.sie_block);
|
||||
|
||||
kvm_s390_set_prefix(vcpu, pref);
|
||||
trace_kvm_s390_handle_prefix(vcpu, 1, pref);
|
||||
@ -497,7 +496,7 @@ static int handle_pv_sclp(struct kvm_vcpu *vcpu)
|
||||
|
||||
static int handle_pv_uvc(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct uv_cb_share *guest_uvcb = (void *)vcpu->arch.sie_block->sidad;
|
||||
struct uv_cb_share *guest_uvcb = sida_addr(vcpu->arch.sie_block);
|
||||
struct uv_cb_cts uvcb = {
|
||||
.header.cmd = UVC_CMD_UNPIN_PAGE_SHARED,
|
||||
.header.len = sizeof(uvcb),
|
||||
|
@ -314,11 +314,6 @@ static inline u8 gisa_get_ipm(struct kvm_s390_gisa *gisa)
|
||||
return READ_ONCE(gisa->ipm);
|
||||
}
|
||||
|
||||
static inline void gisa_clear_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
|
||||
{
|
||||
clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
|
||||
}
|
||||
|
||||
static inline int gisa_tac_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
|
||||
{
|
||||
return test_and_clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
|
||||
|
@ -209,6 +209,14 @@ unsigned int diag9c_forwarding_hz;
|
||||
module_param(diag9c_forwarding_hz, uint, 0644);
|
||||
MODULE_PARM_DESC(diag9c_forwarding_hz, "Maximum diag9c forwarding per second, 0 to turn off");
|
||||
|
||||
/*
|
||||
* allow asynchronous deinit for protected guests; enable by default since
|
||||
* the feature is opt-in anyway
|
||||
*/
|
||||
static int async_destroy = 1;
|
||||
module_param(async_destroy, int, 0444);
|
||||
MODULE_PARM_DESC(async_destroy, "Asynchronous destroy for protected guests");
|
||||
|
||||
/*
|
||||
* For now we handle at most 16 double words as this is what the s390 base
|
||||
* kernel handles and stores in the prefix page. If we ever need to go beyond
|
||||
@ -616,6 +624,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
||||
case KVM_CAP_S390_BPB:
|
||||
r = test_facility(82);
|
||||
break;
|
||||
case KVM_CAP_S390_PROTECTED_ASYNC_DISABLE:
|
||||
r = async_destroy && is_prot_virt_host();
|
||||
break;
|
||||
case KVM_CAP_S390_PROTECTED:
|
||||
r = is_prot_virt_host();
|
||||
break;
|
||||
@ -2519,9 +2530,13 @@ static int kvm_s390_pv_dmp(struct kvm *kvm, struct kvm_pv_cmd *cmd,
|
||||
|
||||
static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
|
||||
{
|
||||
const bool need_lock = (cmd->cmd != KVM_PV_ASYNC_CLEANUP_PERFORM);
|
||||
void __user *argp = (void __user *)cmd->data;
|
||||
int r = 0;
|
||||
u16 dummy;
|
||||
void __user *argp = (void __user *)cmd->data;
|
||||
|
||||
if (need_lock)
|
||||
mutex_lock(&kvm->lock);
|
||||
|
||||
switch (cmd->cmd) {
|
||||
case KVM_PV_ENABLE: {
|
||||
@ -2555,6 +2570,31 @@ static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
|
||||
set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
|
||||
break;
|
||||
}
|
||||
case KVM_PV_ASYNC_CLEANUP_PREPARE:
|
||||
r = -EINVAL;
|
||||
if (!kvm_s390_pv_is_protected(kvm) || !async_destroy)
|
||||
break;
|
||||
|
||||
r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc);
|
||||
/*
|
||||
* If a CPU could not be destroyed, destroy VM will also fail.
|
||||
* There is no point in trying to destroy it. Instead return
|
||||
* the rc and rrc from the first CPU that failed destroying.
|
||||
*/
|
||||
if (r)
|
||||
break;
|
||||
r = kvm_s390_pv_set_aside(kvm, &cmd->rc, &cmd->rrc);
|
||||
|
||||
/* no need to block service interrupts any more */
|
||||
clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
|
||||
break;
|
||||
case KVM_PV_ASYNC_CLEANUP_PERFORM:
|
||||
r = -EINVAL;
|
||||
if (!async_destroy)
|
||||
break;
|
||||
/* kvm->lock must not be held; this is asserted inside the function. */
|
||||
r = kvm_s390_pv_deinit_aside_vm(kvm, &cmd->rc, &cmd->rrc);
|
||||
break;
|
||||
case KVM_PV_DISABLE: {
|
||||
r = -EINVAL;
|
||||
if (!kvm_s390_pv_is_protected(kvm))
|
||||
@ -2568,7 +2608,7 @@ static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
|
||||
*/
|
||||
if (r)
|
||||
break;
|
||||
r = kvm_s390_pv_deinit_vm(kvm, &cmd->rc, &cmd->rrc);
|
||||
r = kvm_s390_pv_deinit_cleanup_all(kvm, &cmd->rc, &cmd->rrc);
|
||||
|
||||
/* no need to block service interrupts any more */
|
||||
clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
|
||||
@ -2718,6 +2758,9 @@ static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
|
||||
default:
|
||||
r = -ENOTTY;
|
||||
}
|
||||
if (need_lock)
|
||||
mutex_unlock(&kvm->lock);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
@ -2922,9 +2965,8 @@ long kvm_arch_vm_ioctl(struct file *filp,
|
||||
r = -EINVAL;
|
||||
break;
|
||||
}
|
||||
mutex_lock(&kvm->lock);
|
||||
/* must be called without kvm->lock */
|
||||
r = kvm_s390_handle_pv(kvm, &args);
|
||||
mutex_unlock(&kvm->lock);
|
||||
if (copy_to_user(argp, &args, sizeof(args))) {
|
||||
r = -EFAULT;
|
||||
break;
|
||||
@ -3243,6 +3285,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||
kvm_s390_vsie_init(kvm);
|
||||
if (use_gisa)
|
||||
kvm_s390_gisa_init(kvm);
|
||||
INIT_LIST_HEAD(&kvm->arch.pv.need_cleanup);
|
||||
kvm->arch.pv.set_aside = NULL;
|
||||
KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
|
||||
|
||||
return 0;
|
||||
@ -3287,11 +3331,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
|
||||
/*
|
||||
* We are already at the end of life and kvm->lock is not taken.
|
||||
* This is ok as the file descriptor is closed by now and nobody
|
||||
* can mess with the pv state. To avoid lockdep_assert_held from
|
||||
* complaining we do not use kvm_s390_pv_is_protected.
|
||||
* can mess with the pv state.
|
||||
*/
|
||||
if (kvm_s390_pv_get_handle(kvm))
|
||||
kvm_s390_pv_deinit_vm(kvm, &rc, &rrc);
|
||||
kvm_s390_pv_deinit_cleanup_all(kvm, &rc, &rrc);
|
||||
/*
|
||||
* Remove the mmu notifier only when the whole KVM VM is torn down,
|
||||
* and only if one was registered to begin with. If the VM is
|
||||
@ -3344,28 +3386,30 @@ static void sca_del_vcpu(struct kvm_vcpu *vcpu)
|
||||
static void sca_add_vcpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!kvm_s390_use_sca_entries()) {
|
||||
struct bsca_block *sca = vcpu->kvm->arch.sca;
|
||||
phys_addr_t sca_phys = virt_to_phys(vcpu->kvm->arch.sca);
|
||||
|
||||
/* we still need the basic sca for the ipte control */
|
||||
vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
|
||||
vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
|
||||
vcpu->arch.sie_block->scaoh = sca_phys >> 32;
|
||||
vcpu->arch.sie_block->scaol = sca_phys;
|
||||
return;
|
||||
}
|
||||
read_lock(&vcpu->kvm->arch.sca_lock);
|
||||
if (vcpu->kvm->arch.use_esca) {
|
||||
struct esca_block *sca = vcpu->kvm->arch.sca;
|
||||
phys_addr_t sca_phys = virt_to_phys(sca);
|
||||
|
||||
sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
|
||||
vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
|
||||
vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
|
||||
sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block);
|
||||
vcpu->arch.sie_block->scaoh = sca_phys >> 32;
|
||||
vcpu->arch.sie_block->scaol = sca_phys & ESCA_SCAOL_MASK;
|
||||
vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
|
||||
set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
|
||||
} else {
|
||||
struct bsca_block *sca = vcpu->kvm->arch.sca;
|
||||
phys_addr_t sca_phys = virt_to_phys(sca);
|
||||
|
||||
sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
|
||||
vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
|
||||
vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
|
||||
sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block);
|
||||
vcpu->arch.sie_block->scaoh = sca_phys >> 32;
|
||||
vcpu->arch.sie_block->scaol = sca_phys;
|
||||
set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
|
||||
}
|
||||
read_unlock(&vcpu->kvm->arch.sca_lock);
|
||||
@ -3396,6 +3440,7 @@ static int sca_switch_to_extended(struct kvm *kvm)
|
||||
struct kvm_vcpu *vcpu;
|
||||
unsigned long vcpu_idx;
|
||||
u32 scaol, scaoh;
|
||||
phys_addr_t new_sca_phys;
|
||||
|
||||
if (kvm->arch.use_esca)
|
||||
return 0;
|
||||
@ -3404,8 +3449,9 @@ static int sca_switch_to_extended(struct kvm *kvm)
|
||||
if (!new_sca)
|
||||
return -ENOMEM;
|
||||
|
||||
scaoh = (u32)((u64)(new_sca) >> 32);
|
||||
scaol = (u32)(u64)(new_sca) & ~0x3fU;
|
||||
new_sca_phys = virt_to_phys(new_sca);
|
||||
scaoh = new_sca_phys >> 32;
|
||||
scaol = new_sca_phys & ESCA_SCAOL_MASK;
|
||||
|
||||
kvm_s390_vcpu_block_all(kvm);
|
||||
write_lock(&kvm->arch.sca_lock);
|
||||
@ -3625,15 +3671,18 @@ static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
|
||||
|
||||
void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
free_page(vcpu->arch.sie_block->cbrlo);
|
||||
free_page((unsigned long)phys_to_virt(vcpu->arch.sie_block->cbrlo));
|
||||
vcpu->arch.sie_block->cbrlo = 0;
|
||||
}
|
||||
|
||||
int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL_ACCOUNT);
|
||||
if (!vcpu->arch.sie_block->cbrlo)
|
||||
void *cbrlo_page = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
|
||||
|
||||
if (!cbrlo_page)
|
||||
return -ENOMEM;
|
||||
|
||||
vcpu->arch.sie_block->cbrlo = virt_to_phys(cbrlo_page);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -3643,7 +3692,7 @@ static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
|
||||
|
||||
vcpu->arch.sie_block->ibc = model->ibc;
|
||||
if (test_kvm_facility(vcpu->kvm, 7))
|
||||
vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
|
||||
vcpu->arch.sie_block->fac = virt_to_phys(model->fac_list);
|
||||
}
|
||||
|
||||
static int kvm_s390_vcpu_setup(struct kvm_vcpu *vcpu)
|
||||
@ -3700,9 +3749,8 @@ static int kvm_s390_vcpu_setup(struct kvm_vcpu *vcpu)
|
||||
VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
|
||||
vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
|
||||
}
|
||||
vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
|
||||
| SDNXC;
|
||||
vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
|
||||
vcpu->arch.sie_block->sdnxo = virt_to_phys(&vcpu->run->s.regs.sdnx) | SDNXC;
|
||||
vcpu->arch.sie_block->riccbd = virt_to_phys(&vcpu->run->s.regs.riccb);
|
||||
|
||||
if (sclp.has_kss)
|
||||
kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
|
||||
@ -3752,7 +3800,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
|
||||
return -ENOMEM;
|
||||
|
||||
vcpu->arch.sie_block = &sie_page->sie_block;
|
||||
vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
|
||||
vcpu->arch.sie_block->itdba = virt_to_phys(&sie_page->itdb);
|
||||
|
||||
/* the real guest size will always be smaller than msl */
|
||||
vcpu->arch.sie_block->mso = 0;
|
||||
@ -5169,6 +5217,7 @@ static long kvm_s390_vcpu_sida_op(struct kvm_vcpu *vcpu,
|
||||
struct kvm_s390_mem_op *mop)
|
||||
{
|
||||
void __user *uaddr = (void __user *)mop->buf;
|
||||
void *sida_addr;
|
||||
int r = 0;
|
||||
|
||||
if (mop->flags || !mop->size)
|
||||
@ -5180,16 +5229,16 @@ static long kvm_s390_vcpu_sida_op(struct kvm_vcpu *vcpu,
|
||||
if (!kvm_s390_pv_cpu_is_protected(vcpu))
|
||||
return -EINVAL;
|
||||
|
||||
sida_addr = (char *)sida_addr(vcpu->arch.sie_block) + mop->sida_offset;
|
||||
|
||||
switch (mop->op) {
|
||||
case KVM_S390_MEMOP_SIDA_READ:
|
||||
if (copy_to_user(uaddr, (void *)(sida_origin(vcpu->arch.sie_block) +
|
||||
mop->sida_offset), mop->size))
|
||||
if (copy_to_user(uaddr, sida_addr, mop->size))
|
||||
r = -EFAULT;
|
||||
|
||||
break;
|
||||
case KVM_S390_MEMOP_SIDA_WRITE:
|
||||
if (copy_from_user((void *)(sida_origin(vcpu->arch.sie_block) +
|
||||
mop->sida_offset), uaddr, mop->size))
|
||||
if (copy_from_user(sida_addr, uaddr, mop->size))
|
||||
r = -EFAULT;
|
||||
break;
|
||||
}
|
||||
|
@ -23,7 +23,8 @@
|
||||
/* Transactional Memory Execution related macros */
|
||||
#define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & ECB_TE))
|
||||
#define TDB_FORMAT1 1
|
||||
#define IS_ITDB_VALID(vcpu) ((*(char *)vcpu->arch.sie_block->itdba == TDB_FORMAT1))
|
||||
#define IS_ITDB_VALID(vcpu) \
|
||||
((*(char *)phys_to_virt((vcpu)->arch.sie_block->itdba) == TDB_FORMAT1))
|
||||
|
||||
extern debug_info_t *kvm_s390_dbf;
|
||||
extern debug_info_t *kvm_s390_dbf_uv;
|
||||
@ -233,7 +234,7 @@ static inline unsigned long kvm_s390_get_gfn_end(struct kvm_memslots *slots)
|
||||
|
||||
static inline u32 kvm_s390_get_gisa_desc(struct kvm *kvm)
|
||||
{
|
||||
u32 gd = (u32)(u64)kvm->arch.gisa_int.origin;
|
||||
u32 gd = virt_to_phys(kvm->arch.gisa_int.origin);
|
||||
|
||||
if (gd && sclp.has_gisaf)
|
||||
gd |= GISA_FORMAT1;
|
||||
@ -243,6 +244,9 @@ static inline u32 kvm_s390_get_gisa_desc(struct kvm *kvm)
|
||||
/* implemented in pv.c */
|
||||
int kvm_s390_pv_destroy_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc);
|
||||
int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc);
|
||||
int kvm_s390_pv_set_aside(struct kvm *kvm, u16 *rc, u16 *rrc);
|
||||
int kvm_s390_pv_deinit_aside_vm(struct kvm *kvm, u16 *rc, u16 *rrc);
|
||||
int kvm_s390_pv_deinit_cleanup_all(struct kvm *kvm, u16 *rc, u16 *rrc);
|
||||
int kvm_s390_pv_deinit_vm(struct kvm *kvm, u16 *rc, u16 *rrc);
|
||||
int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc);
|
||||
int kvm_s390_pv_set_sec_parms(struct kvm *kvm, void *hdr, u64 length, u16 *rc,
|
||||
|
@ -924,8 +924,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
|
||||
return -EREMOTE;
|
||||
}
|
||||
if (kvm_s390_pv_cpu_is_protected(vcpu)) {
|
||||
memcpy((void *)sida_origin(vcpu->arch.sie_block), (void *)mem,
|
||||
PAGE_SIZE);
|
||||
memcpy(sida_addr(vcpu->arch.sie_block), (void *)mem, PAGE_SIZE);
|
||||
rc = 0;
|
||||
} else {
|
||||
rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE);
|
||||
|
@ -18,6 +18,29 @@
|
||||
#include <linux/mmu_notifier.h>
|
||||
#include "kvm-s390.h"
|
||||
|
||||
/**
|
||||
* struct pv_vm_to_be_destroyed - Represents a protected VM that needs to
|
||||
* be destroyed
|
||||
*
|
||||
* @list: list head for the list of leftover VMs
|
||||
* @old_gmap_table: the gmap table of the leftover protected VM
|
||||
* @handle: the handle of the leftover protected VM
|
||||
* @stor_var: pointer to the variable storage of the leftover protected VM
|
||||
* @stor_base: address of the base storage of the leftover protected VM
|
||||
*
|
||||
* Represents a protected VM that is still registered with the Ultravisor,
|
||||
* but which does not correspond any longer to an active KVM VM. It should
|
||||
* be destroyed at some point later, either asynchronously or when the
|
||||
* process terminates.
|
||||
*/
|
||||
struct pv_vm_to_be_destroyed {
|
||||
struct list_head list;
|
||||
unsigned long old_gmap_table;
|
||||
u64 handle;
|
||||
void *stor_var;
|
||||
unsigned long stor_base;
|
||||
};
|
||||
|
||||
static void kvm_s390_clear_pv_state(struct kvm *kvm)
|
||||
{
|
||||
kvm->arch.pv.handle = 0;
|
||||
@ -44,7 +67,7 @@ int kvm_s390_pv_destroy_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc)
|
||||
free_pages(vcpu->arch.pv.stor_base,
|
||||
get_order(uv_info.guest_cpu_stor_len));
|
||||
|
||||
free_page(sida_origin(vcpu->arch.sie_block));
|
||||
free_page((unsigned long)sida_addr(vcpu->arch.sie_block));
|
||||
vcpu->arch.sie_block->pv_handle_cpu = 0;
|
||||
vcpu->arch.sie_block->pv_handle_config = 0;
|
||||
memset(&vcpu->arch.pv, 0, sizeof(vcpu->arch.pv));
|
||||
@ -66,6 +89,7 @@ int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc)
|
||||
.header.cmd = UVC_CMD_CREATE_SEC_CPU,
|
||||
.header.len = sizeof(uvcb),
|
||||
};
|
||||
void *sida_addr;
|
||||
int cc;
|
||||
|
||||
if (kvm_s390_pv_cpu_get_handle(vcpu))
|
||||
@ -79,16 +103,17 @@ int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc)
|
||||
/* Input */
|
||||
uvcb.guest_handle = kvm_s390_pv_get_handle(vcpu->kvm);
|
||||
uvcb.num = vcpu->arch.sie_block->icpua;
|
||||
uvcb.state_origin = (u64)vcpu->arch.sie_block;
|
||||
uvcb.stor_origin = (u64)vcpu->arch.pv.stor_base;
|
||||
uvcb.state_origin = virt_to_phys(vcpu->arch.sie_block);
|
||||
uvcb.stor_origin = virt_to_phys((void *)vcpu->arch.pv.stor_base);
|
||||
|
||||
/* Alloc Secure Instruction Data Area Designation */
|
||||
vcpu->arch.sie_block->sidad = __get_free_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
|
||||
if (!vcpu->arch.sie_block->sidad) {
|
||||
sida_addr = (void *)__get_free_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
|
||||
if (!sida_addr) {
|
||||
free_pages(vcpu->arch.pv.stor_base,
|
||||
get_order(uv_info.guest_cpu_stor_len));
|
||||
return -ENOMEM;
|
||||
}
|
||||
vcpu->arch.sie_block->sidad = virt_to_phys(sida_addr);
|
||||
|
||||
cc = uv_call(0, (u64)&uvcb);
|
||||
*rc = uvcb.header.rc;
|
||||
@ -159,7 +184,185 @@ out_err:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* this should not fail, but if it does, we must not free the donated memory */
|
||||
/**
|
||||
* kvm_s390_pv_dispose_one_leftover - Clean up one leftover protected VM.
|
||||
* @kvm: the KVM that was associated with this leftover protected VM
|
||||
* @leftover: details about the leftover protected VM that needs a clean up
|
||||
* @rc: the RC code of the Destroy Secure Configuration UVC
|
||||
* @rrc: the RRC code of the Destroy Secure Configuration UVC
|
||||
*
|
||||
* Destroy one leftover protected VM.
|
||||
* On success, kvm->mm->context.protected_count will be decremented atomically
|
||||
* and all other resources used by the VM will be freed.
|
||||
*
|
||||
* Return: 0 in case of success, otherwise 1
|
||||
*/
|
||||
static int kvm_s390_pv_dispose_one_leftover(struct kvm *kvm,
|
||||
struct pv_vm_to_be_destroyed *leftover,
|
||||
u16 *rc, u16 *rrc)
|
||||
{
|
||||
int cc;
|
||||
|
||||
/* It used the destroy-fast UVC, nothing left to do here */
|
||||
if (!leftover->handle)
|
||||
goto done_fast;
|
||||
cc = uv_cmd_nodata(leftover->handle, UVC_CMD_DESTROY_SEC_CONF, rc, rrc);
|
||||
KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY LEFTOVER VM: rc %x rrc %x", *rc, *rrc);
|
||||
WARN_ONCE(cc, "protvirt destroy leftover vm failed rc %x rrc %x", *rc, *rrc);
|
||||
if (cc)
|
||||
return cc;
|
||||
/*
|
||||
* Intentionally leak unusable memory. If the UVC fails, the memory
|
||||
* used for the VM and its metadata is permanently unusable.
|
||||
* This can only happen in case of a serious KVM or hardware bug; it
|
||||
* is not expected to happen in normal operation.
|
||||
*/
|
||||
free_pages(leftover->stor_base, get_order(uv_info.guest_base_stor_len));
|
||||
free_pages(leftover->old_gmap_table, CRST_ALLOC_ORDER);
|
||||
vfree(leftover->stor_var);
|
||||
done_fast:
|
||||
atomic_dec(&kvm->mm->context.protected_count);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_s390_destroy_lower_2g - Destroy the first 2GB of protected guest memory.
|
||||
* @kvm: the VM whose memory is to be cleared.
|
||||
*
|
||||
* Destroy the first 2GB of guest memory, to avoid prefix issues after reboot.
|
||||
* The CPUs of the protected VM need to be destroyed beforehand.
|
||||
*/
|
||||
static void kvm_s390_destroy_lower_2g(struct kvm *kvm)
|
||||
{
|
||||
const unsigned long pages_2g = SZ_2G / PAGE_SIZE;
|
||||
struct kvm_memory_slot *slot;
|
||||
unsigned long len;
|
||||
int srcu_idx;
|
||||
|
||||
srcu_idx = srcu_read_lock(&kvm->srcu);
|
||||
|
||||
/* Take the memslot containing guest absolute address 0 */
|
||||
slot = gfn_to_memslot(kvm, 0);
|
||||
/* Clear all slots or parts thereof that are below 2GB */
|
||||
while (slot && slot->base_gfn < pages_2g) {
|
||||
len = min_t(u64, slot->npages, pages_2g - slot->base_gfn) * PAGE_SIZE;
|
||||
s390_uv_destroy_range(kvm->mm, slot->userspace_addr, slot->userspace_addr + len);
|
||||
/* Take the next memslot */
|
||||
slot = gfn_to_memslot(kvm, slot->base_gfn + slot->npages);
|
||||
}
|
||||
|
||||
srcu_read_unlock(&kvm->srcu, srcu_idx);
|
||||
}
|
||||
|
||||
static int kvm_s390_pv_deinit_vm_fast(struct kvm *kvm, u16 *rc, u16 *rrc)
|
||||
{
|
||||
struct uv_cb_destroy_fast uvcb = {
|
||||
.header.cmd = UVC_CMD_DESTROY_SEC_CONF_FAST,
|
||||
.header.len = sizeof(uvcb),
|
||||
.handle = kvm_s390_pv_get_handle(kvm),
|
||||
};
|
||||
int cc;
|
||||
|
||||
cc = uv_call_sched(0, (u64)&uvcb);
|
||||
if (rc)
|
||||
*rc = uvcb.header.rc;
|
||||
if (rrc)
|
||||
*rrc = uvcb.header.rrc;
|
||||
WRITE_ONCE(kvm->arch.gmap->guest_handle, 0);
|
||||
KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY VM FAST: rc %x rrc %x",
|
||||
uvcb.header.rc, uvcb.header.rrc);
|
||||
WARN_ONCE(cc, "protvirt destroy vm fast failed handle %llx rc %x rrc %x",
|
||||
kvm_s390_pv_get_handle(kvm), uvcb.header.rc, uvcb.header.rrc);
|
||||
/* Inteded memory leak on "impossible" error */
|
||||
if (!cc)
|
||||
kvm_s390_pv_dealloc_vm(kvm);
|
||||
return cc ? -EIO : 0;
|
||||
}
|
||||
|
||||
static inline bool is_destroy_fast_available(void)
|
||||
{
|
||||
return test_bit_inv(BIT_UVC_CMD_DESTROY_SEC_CONF_FAST, uv_info.inst_calls_list);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_s390_pv_set_aside - Set aside a protected VM for later teardown.
|
||||
* @kvm: the VM
|
||||
* @rc: return value for the RC field of the UVCB
|
||||
* @rrc: return value for the RRC field of the UVCB
|
||||
*
|
||||
* Set aside the protected VM for a subsequent teardown. The VM will be able
|
||||
* to continue immediately as a non-secure VM, and the information needed to
|
||||
* properly tear down the protected VM is set aside. If another protected VM
|
||||
* was already set aside without starting its teardown, this function will
|
||||
* fail.
|
||||
* The CPUs of the protected VM need to be destroyed beforehand.
|
||||
*
|
||||
* Context: kvm->lock needs to be held
|
||||
*
|
||||
* Return: 0 in case of success, -EINVAL if another protected VM was already set
|
||||
* aside, -ENOMEM if the system ran out of memory.
|
||||
*/
|
||||
int kvm_s390_pv_set_aside(struct kvm *kvm, u16 *rc, u16 *rrc)
|
||||
{
|
||||
struct pv_vm_to_be_destroyed *priv;
|
||||
int res = 0;
|
||||
|
||||
lockdep_assert_held(&kvm->lock);
|
||||
/*
|
||||
* If another protected VM was already prepared for teardown, refuse.
|
||||
* A normal deinitialization has to be performed instead.
|
||||
*/
|
||||
if (kvm->arch.pv.set_aside)
|
||||
return -EINVAL;
|
||||
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
|
||||
if (is_destroy_fast_available()) {
|
||||
res = kvm_s390_pv_deinit_vm_fast(kvm, rc, rrc);
|
||||
} else {
|
||||
priv->stor_var = kvm->arch.pv.stor_var;
|
||||
priv->stor_base = kvm->arch.pv.stor_base;
|
||||
priv->handle = kvm_s390_pv_get_handle(kvm);
|
||||
priv->old_gmap_table = (unsigned long)kvm->arch.gmap->table;
|
||||
WRITE_ONCE(kvm->arch.gmap->guest_handle, 0);
|
||||
if (s390_replace_asce(kvm->arch.gmap))
|
||||
res = -ENOMEM;
|
||||
}
|
||||
|
||||
if (res) {
|
||||
kfree(priv);
|
||||
return res;
|
||||
}
|
||||
|
||||
kvm_s390_destroy_lower_2g(kvm);
|
||||
kvm_s390_clear_pv_state(kvm);
|
||||
kvm->arch.pv.set_aside = priv;
|
||||
|
||||
*rc = UVC_RC_EXECUTED;
|
||||
*rrc = 42;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_s390_pv_deinit_vm - Deinitialize the current protected VM
|
||||
* @kvm: the KVM whose protected VM needs to be deinitialized
|
||||
* @rc: the RC code of the UVC
|
||||
* @rrc: the RRC code of the UVC
|
||||
*
|
||||
* Deinitialize the current protected VM. This function will destroy and
|
||||
* cleanup the current protected VM, but it will not cleanup the guest
|
||||
* memory. This function should only be called when the protected VM has
|
||||
* just been created and therefore does not have any guest memory, or when
|
||||
* the caller cleans up the guest memory separately.
|
||||
*
|
||||
* This function should not fail, but if it does, the donated memory must
|
||||
* not be freed.
|
||||
*
|
||||
* Context: kvm->lock needs to be held
|
||||
*
|
||||
* Return: 0 in case of success, otherwise -EIO
|
||||
*/
|
||||
int kvm_s390_pv_deinit_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
|
||||
{
|
||||
int cc;
|
||||
@ -167,15 +370,6 @@ int kvm_s390_pv_deinit_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
|
||||
cc = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
|
||||
UVC_CMD_DESTROY_SEC_CONF, rc, rrc);
|
||||
WRITE_ONCE(kvm->arch.gmap->guest_handle, 0);
|
||||
/*
|
||||
* if the mm still has a mapping, make all its pages accessible
|
||||
* before destroying the guest
|
||||
*/
|
||||
if (mmget_not_zero(kvm->mm)) {
|
||||
s390_uv_destroy_range(kvm->mm, 0, TASK_SIZE);
|
||||
mmput(kvm->mm);
|
||||
}
|
||||
|
||||
if (!cc) {
|
||||
atomic_dec(&kvm->mm->context.protected_count);
|
||||
kvm_s390_pv_dealloc_vm(kvm);
|
||||
@ -189,11 +383,137 @@ int kvm_s390_pv_deinit_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
|
||||
return cc ? -EIO : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_s390_pv_deinit_cleanup_all - Clean up all protected VMs associated
|
||||
* with a specific KVM.
|
||||
* @kvm: the KVM to be cleaned up
|
||||
* @rc: the RC code of the first failing UVC
|
||||
* @rrc: the RRC code of the first failing UVC
|
||||
*
|
||||
* This function will clean up all protected VMs associated with a KVM.
|
||||
* This includes the active one, the one prepared for deinitialization with
|
||||
* kvm_s390_pv_set_aside, and any still pending in the need_cleanup list.
|
||||
*
|
||||
* Context: kvm->lock needs to be held unless being called from
|
||||
* kvm_arch_destroy_vm.
|
||||
*
|
||||
* Return: 0 if all VMs are successfully cleaned up, otherwise -EIO
|
||||
*/
|
||||
int kvm_s390_pv_deinit_cleanup_all(struct kvm *kvm, u16 *rc, u16 *rrc)
|
||||
{
|
||||
struct pv_vm_to_be_destroyed *cur;
|
||||
bool need_zap = false;
|
||||
u16 _rc, _rrc;
|
||||
int cc = 0;
|
||||
|
||||
/* Make sure the counter does not reach 0 before calling s390_uv_destroy_range */
|
||||
atomic_inc(&kvm->mm->context.protected_count);
|
||||
|
||||
*rc = 1;
|
||||
/* If the current VM is protected, destroy it */
|
||||
if (kvm_s390_pv_get_handle(kvm)) {
|
||||
cc = kvm_s390_pv_deinit_vm(kvm, rc, rrc);
|
||||
need_zap = true;
|
||||
}
|
||||
|
||||
/* If a previous protected VM was set aside, put it in the need_cleanup list */
|
||||
if (kvm->arch.pv.set_aside) {
|
||||
list_add(kvm->arch.pv.set_aside, &kvm->arch.pv.need_cleanup);
|
||||
kvm->arch.pv.set_aside = NULL;
|
||||
}
|
||||
|
||||
/* Cleanup all protected VMs in the need_cleanup list */
|
||||
while (!list_empty(&kvm->arch.pv.need_cleanup)) {
|
||||
cur = list_first_entry(&kvm->arch.pv.need_cleanup, typeof(*cur), list);
|
||||
need_zap = true;
|
||||
if (kvm_s390_pv_dispose_one_leftover(kvm, cur, &_rc, &_rrc)) {
|
||||
cc = 1;
|
||||
/*
|
||||
* Only return the first error rc and rrc, so make
|
||||
* sure it is not overwritten. All destroys will
|
||||
* additionally be reported via KVM_UV_EVENT().
|
||||
*/
|
||||
if (*rc == UVC_RC_EXECUTED) {
|
||||
*rc = _rc;
|
||||
*rrc = _rrc;
|
||||
}
|
||||
}
|
||||
list_del(&cur->list);
|
||||
kfree(cur);
|
||||
}
|
||||
|
||||
/*
|
||||
* If the mm still has a mapping, try to mark all its pages as
|
||||
* accessible. The counter should not reach zero before this
|
||||
* cleanup has been performed.
|
||||
*/
|
||||
if (need_zap && mmget_not_zero(kvm->mm)) {
|
||||
s390_uv_destroy_range(kvm->mm, 0, TASK_SIZE);
|
||||
mmput(kvm->mm);
|
||||
}
|
||||
|
||||
/* Now the counter can safely reach 0 */
|
||||
atomic_dec(&kvm->mm->context.protected_count);
|
||||
return cc ? -EIO : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_s390_pv_deinit_aside_vm - Teardown a previously set aside protected VM.
|
||||
* @kvm: the VM previously associated with the protected VM
|
||||
* @rc: return value for the RC field of the UVCB
|
||||
* @rrc: return value for the RRC field of the UVCB
|
||||
*
|
||||
* Tear down the protected VM that had been previously prepared for teardown
|
||||
* using kvm_s390_pv_set_aside_vm. Ideally this should be called by
|
||||
* userspace asynchronously from a separate thread.
|
||||
*
|
||||
* Context: kvm->lock must not be held.
|
||||
*
|
||||
* Return: 0 in case of success, -EINVAL if no protected VM had been
|
||||
* prepared for asynchronous teardowm, -EIO in case of other errors.
|
||||
*/
|
||||
int kvm_s390_pv_deinit_aside_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
|
||||
{
|
||||
struct pv_vm_to_be_destroyed *p;
|
||||
int ret = 0;
|
||||
|
||||
lockdep_assert_not_held(&kvm->lock);
|
||||
mutex_lock(&kvm->lock);
|
||||
p = kvm->arch.pv.set_aside;
|
||||
kvm->arch.pv.set_aside = NULL;
|
||||
mutex_unlock(&kvm->lock);
|
||||
if (!p)
|
||||
return -EINVAL;
|
||||
|
||||
/* When a fatal signal is received, stop immediately */
|
||||
if (s390_uv_destroy_range_interruptible(kvm->mm, 0, TASK_SIZE_MAX))
|
||||
goto done;
|
||||
if (kvm_s390_pv_dispose_one_leftover(kvm, p, rc, rrc))
|
||||
ret = -EIO;
|
||||
kfree(p);
|
||||
p = NULL;
|
||||
done:
|
||||
/*
|
||||
* p is not NULL if we aborted because of a fatal signal, in which
|
||||
* case queue the leftover for later cleanup.
|
||||
*/
|
||||
if (p) {
|
||||
mutex_lock(&kvm->lock);
|
||||
list_add(&p->list, &kvm->arch.pv.need_cleanup);
|
||||
mutex_unlock(&kvm->lock);
|
||||
/* Did not finish, but pretend things went well */
|
||||
*rc = UVC_RC_EXECUTED;
|
||||
*rrc = 42;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void kvm_s390_pv_mmu_notifier_release(struct mmu_notifier *subscription,
|
||||
struct mm_struct *mm)
|
||||
{
|
||||
struct kvm *kvm = container_of(subscription, struct kvm, arch.pv.mmu_notifier);
|
||||
u16 dummy;
|
||||
int r;
|
||||
|
||||
/*
|
||||
* No locking is needed since this is the last thread of the last user of this
|
||||
@ -202,7 +522,9 @@ static void kvm_s390_pv_mmu_notifier_release(struct mmu_notifier *subscription,
|
||||
* unregistered. This means that if this notifier runs, then the
|
||||
* struct kvm is still valid.
|
||||
*/
|
||||
kvm_s390_cpus_from_pv(kvm, &dummy, &dummy);
|
||||
r = kvm_s390_cpus_from_pv(kvm, &dummy, &dummy);
|
||||
if (!r && is_destroy_fast_available() && kvm_s390_pv_get_handle(kvm))
|
||||
kvm_s390_pv_deinit_vm_fast(kvm, &dummy, &dummy);
|
||||
}
|
||||
|
||||
static const struct mmu_notifier_ops kvm_s390_pv_mmu_notifier_ops = {
|
||||
@ -226,8 +548,9 @@ int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
|
||||
uvcb.guest_stor_origin = 0; /* MSO is 0 for KVM */
|
||||
uvcb.guest_stor_len = kvm->arch.pv.guest_len;
|
||||
uvcb.guest_asce = kvm->arch.gmap->asce;
|
||||
uvcb.guest_sca = (unsigned long)kvm->arch.sca;
|
||||
uvcb.conf_base_stor_origin = (u64)kvm->arch.pv.stor_base;
|
||||
uvcb.guest_sca = virt_to_phys(kvm->arch.sca);
|
||||
uvcb.conf_base_stor_origin =
|
||||
virt_to_phys((void *)kvm->arch.pv.stor_base);
|
||||
uvcb.conf_virt_stor_origin = (u64)kvm->arch.pv.stor_var;
|
||||
|
||||
cc = uv_call_sched(0, (u64)&uvcb);
|
||||
|
@ -654,7 +654,7 @@ static int pin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t *hpa)
|
||||
page = gfn_to_page(kvm, gpa_to_gfn(gpa));
|
||||
if (is_error_page(page))
|
||||
return -EINVAL;
|
||||
*hpa = (hpa_t) page_to_virt(page) + (gpa & ~PAGE_MASK);
|
||||
*hpa = (hpa_t)page_to_phys(page) + (gpa & ~PAGE_MASK);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -869,7 +869,7 @@ static int pin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page,
|
||||
WARN_ON_ONCE(rc);
|
||||
return 1;
|
||||
}
|
||||
vsie_page->scb_o = (struct kvm_s390_sie_block *) hpa;
|
||||
vsie_page->scb_o = phys_to_virt(hpa);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -72,7 +72,7 @@ static struct gmap *gmap_alloc(unsigned long limit)
|
||||
goto out_free;
|
||||
page->index = 0;
|
||||
list_add(&page->lru, &gmap->crst_list);
|
||||
table = (unsigned long *) page_to_phys(page);
|
||||
table = page_to_virt(page);
|
||||
crst_table_init(table, etype);
|
||||
gmap->table = table;
|
||||
gmap->asce = atype | _ASCE_TABLE_LENGTH |
|
||||
@ -311,12 +311,12 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
|
||||
page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
new = (unsigned long *) page_to_phys(page);
|
||||
new = page_to_virt(page);
|
||||
crst_table_init(new, init);
|
||||
spin_lock(&gmap->guest_table_lock);
|
||||
if (*table & _REGION_ENTRY_INVALID) {
|
||||
list_add(&page->lru, &gmap->crst_list);
|
||||
*table = (unsigned long) new | _REGION_ENTRY_LENGTH |
|
||||
*table = __pa(new) | _REGION_ENTRY_LENGTH |
|
||||
(*table & _REGION_ENTRY_TYPE_MASK);
|
||||
page->index = gaddr;
|
||||
page = NULL;
|
||||
@ -557,7 +557,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
|
||||
gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY,
|
||||
gaddr & _REGION1_MASK))
|
||||
return -ENOMEM;
|
||||
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
|
||||
table = __va(*table & _REGION_ENTRY_ORIGIN);
|
||||
}
|
||||
if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION2) {
|
||||
table += (gaddr & _REGION2_INDEX) >> _REGION2_SHIFT;
|
||||
@ -565,7 +565,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
|
||||
gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY,
|
||||
gaddr & _REGION2_MASK))
|
||||
return -ENOMEM;
|
||||
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
|
||||
table = __va(*table & _REGION_ENTRY_ORIGIN);
|
||||
}
|
||||
if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION3) {
|
||||
table += (gaddr & _REGION3_INDEX) >> _REGION3_SHIFT;
|
||||
@ -573,7 +573,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
|
||||
gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY,
|
||||
gaddr & _REGION3_MASK))
|
||||
return -ENOMEM;
|
||||
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
|
||||
table = __va(*table & _REGION_ENTRY_ORIGIN);
|
||||
}
|
||||
table += (gaddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
|
||||
/* Walk the parent mm page table */
|
||||
@ -813,7 +813,7 @@ static inline unsigned long *gmap_table_walk(struct gmap *gmap,
|
||||
break;
|
||||
if (*table & _REGION_ENTRY_INVALID)
|
||||
return NULL;
|
||||
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
|
||||
table = __va(*table & _REGION_ENTRY_ORIGIN);
|
||||
fallthrough;
|
||||
case _ASCE_TYPE_REGION2:
|
||||
table += (gaddr & _REGION2_INDEX) >> _REGION2_SHIFT;
|
||||
@ -821,7 +821,7 @@ static inline unsigned long *gmap_table_walk(struct gmap *gmap,
|
||||
break;
|
||||
if (*table & _REGION_ENTRY_INVALID)
|
||||
return NULL;
|
||||
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
|
||||
table = __va(*table & _REGION_ENTRY_ORIGIN);
|
||||
fallthrough;
|
||||
case _ASCE_TYPE_REGION3:
|
||||
table += (gaddr & _REGION3_INDEX) >> _REGION3_SHIFT;
|
||||
@ -829,7 +829,7 @@ static inline unsigned long *gmap_table_walk(struct gmap *gmap,
|
||||
break;
|
||||
if (*table & _REGION_ENTRY_INVALID)
|
||||
return NULL;
|
||||
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
|
||||
table = __va(*table & _REGION_ENTRY_ORIGIN);
|
||||
fallthrough;
|
||||
case _ASCE_TYPE_SEGMENT:
|
||||
table += (gaddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
|
||||
@ -837,7 +837,7 @@ static inline unsigned long *gmap_table_walk(struct gmap *gmap,
|
||||
break;
|
||||
if (*table & _REGION_ENTRY_INVALID)
|
||||
return NULL;
|
||||
table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
|
||||
table = __va(*table & _SEGMENT_ENTRY_ORIGIN);
|
||||
table += (gaddr & _PAGE_INDEX) >> _PAGE_SHIFT;
|
||||
}
|
||||
return table;
|
||||
@ -1150,7 +1150,7 @@ int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val)
|
||||
if (pte_present(pte) && (pte_val(pte) & _PAGE_READ)) {
|
||||
address = pte_val(pte) & PAGE_MASK;
|
||||
address += gaddr & ~PAGE_MASK;
|
||||
*val = *(unsigned long *) address;
|
||||
*val = *(unsigned long *)__va(address);
|
||||
set_pte(ptep, set_pte_bit(*ptep, __pgprot(_PAGE_YOUNG)));
|
||||
/* Do *NOT* clear the _PAGE_INVALID bit! */
|
||||
rc = 0;
|
||||
@ -1335,7 +1335,8 @@ static void __gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr,
|
||||
*/
|
||||
static void gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr)
|
||||
{
|
||||
unsigned long sto, *ste, *pgt;
|
||||
unsigned long *ste;
|
||||
phys_addr_t sto, pgt;
|
||||
struct page *page;
|
||||
|
||||
BUG_ON(!gmap_is_shadow(sg));
|
||||
@ -1343,13 +1344,13 @@ static void gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr)
|
||||
if (!ste || !(*ste & _SEGMENT_ENTRY_ORIGIN))
|
||||
return;
|
||||
gmap_call_notifier(sg, raddr, raddr + _SEGMENT_SIZE - 1);
|
||||
sto = (unsigned long) (ste - ((raddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT));
|
||||
sto = __pa(ste - ((raddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT));
|
||||
gmap_idte_one(sto | _ASCE_TYPE_SEGMENT, raddr);
|
||||
pgt = (unsigned long *)(*ste & _SEGMENT_ENTRY_ORIGIN);
|
||||
pgt = *ste & _SEGMENT_ENTRY_ORIGIN;
|
||||
*ste = _SEGMENT_ENTRY_EMPTY;
|
||||
__gmap_unshadow_pgt(sg, raddr, pgt);
|
||||
__gmap_unshadow_pgt(sg, raddr, __va(pgt));
|
||||
/* Free page table */
|
||||
page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
|
||||
page = phys_to_page(pgt);
|
||||
list_del(&page->lru);
|
||||
page_table_free_pgste(page);
|
||||
}
|
||||
@ -1365,19 +1366,19 @@ static void gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr)
|
||||
static void __gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr,
|
||||
unsigned long *sgt)
|
||||
{
|
||||
unsigned long *pgt;
|
||||
struct page *page;
|
||||
phys_addr_t pgt;
|
||||
int i;
|
||||
|
||||
BUG_ON(!gmap_is_shadow(sg));
|
||||
for (i = 0; i < _CRST_ENTRIES; i++, raddr += _SEGMENT_SIZE) {
|
||||
if (!(sgt[i] & _SEGMENT_ENTRY_ORIGIN))
|
||||
continue;
|
||||
pgt = (unsigned long *)(sgt[i] & _REGION_ENTRY_ORIGIN);
|
||||
pgt = sgt[i] & _REGION_ENTRY_ORIGIN;
|
||||
sgt[i] = _SEGMENT_ENTRY_EMPTY;
|
||||
__gmap_unshadow_pgt(sg, raddr, pgt);
|
||||
__gmap_unshadow_pgt(sg, raddr, __va(pgt));
|
||||
/* Free page table */
|
||||
page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
|
||||
page = phys_to_page(pgt);
|
||||
list_del(&page->lru);
|
||||
page_table_free_pgste(page);
|
||||
}
|
||||
@ -1392,7 +1393,8 @@ static void __gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr,
|
||||
*/
|
||||
static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr)
|
||||
{
|
||||
unsigned long r3o, *r3e, *sgt;
|
||||
unsigned long r3o, *r3e;
|
||||
phys_addr_t sgt;
|
||||
struct page *page;
|
||||
|
||||
BUG_ON(!gmap_is_shadow(sg));
|
||||
@ -1401,12 +1403,12 @@ static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr)
|
||||
return;
|
||||
gmap_call_notifier(sg, raddr, raddr + _REGION3_SIZE - 1);
|
||||
r3o = (unsigned long) (r3e - ((raddr & _REGION3_INDEX) >> _REGION3_SHIFT));
|
||||
gmap_idte_one(r3o | _ASCE_TYPE_REGION3, raddr);
|
||||
sgt = (unsigned long *)(*r3e & _REGION_ENTRY_ORIGIN);
|
||||
gmap_idte_one(__pa(r3o) | _ASCE_TYPE_REGION3, raddr);
|
||||
sgt = *r3e & _REGION_ENTRY_ORIGIN;
|
||||
*r3e = _REGION3_ENTRY_EMPTY;
|
||||
__gmap_unshadow_sgt(sg, raddr, sgt);
|
||||
__gmap_unshadow_sgt(sg, raddr, __va(sgt));
|
||||
/* Free segment table */
|
||||
page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
|
||||
page = phys_to_page(sgt);
|
||||
list_del(&page->lru);
|
||||
__free_pages(page, CRST_ALLOC_ORDER);
|
||||
}
|
||||
@ -1422,19 +1424,19 @@ static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr)
|
||||
static void __gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr,
|
||||
unsigned long *r3t)
|
||||
{
|
||||
unsigned long *sgt;
|
||||
struct page *page;
|
||||
phys_addr_t sgt;
|
||||
int i;
|
||||
|
||||
BUG_ON(!gmap_is_shadow(sg));
|
||||
for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION3_SIZE) {
|
||||
if (!(r3t[i] & _REGION_ENTRY_ORIGIN))
|
||||
continue;
|
||||
sgt = (unsigned long *)(r3t[i] & _REGION_ENTRY_ORIGIN);
|
||||
sgt = r3t[i] & _REGION_ENTRY_ORIGIN;
|
||||
r3t[i] = _REGION3_ENTRY_EMPTY;
|
||||
__gmap_unshadow_sgt(sg, raddr, sgt);
|
||||
__gmap_unshadow_sgt(sg, raddr, __va(sgt));
|
||||
/* Free segment table */
|
||||
page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
|
||||
page = phys_to_page(sgt);
|
||||
list_del(&page->lru);
|
||||
__free_pages(page, CRST_ALLOC_ORDER);
|
||||
}
|
||||
@ -1449,7 +1451,8 @@ static void __gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr,
|
||||
*/
|
||||
static void gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr)
|
||||
{
|
||||
unsigned long r2o, *r2e, *r3t;
|
||||
unsigned long r2o, *r2e;
|
||||
phys_addr_t r3t;
|
||||
struct page *page;
|
||||
|
||||
BUG_ON(!gmap_is_shadow(sg));
|
||||
@ -1458,12 +1461,12 @@ static void gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr)
|
||||
return;
|
||||
gmap_call_notifier(sg, raddr, raddr + _REGION2_SIZE - 1);
|
||||
r2o = (unsigned long) (r2e - ((raddr & _REGION2_INDEX) >> _REGION2_SHIFT));
|
||||
gmap_idte_one(r2o | _ASCE_TYPE_REGION2, raddr);
|
||||
r3t = (unsigned long *)(*r2e & _REGION_ENTRY_ORIGIN);
|
||||
gmap_idte_one(__pa(r2o) | _ASCE_TYPE_REGION2, raddr);
|
||||
r3t = *r2e & _REGION_ENTRY_ORIGIN;
|
||||
*r2e = _REGION2_ENTRY_EMPTY;
|
||||
__gmap_unshadow_r3t(sg, raddr, r3t);
|
||||
__gmap_unshadow_r3t(sg, raddr, __va(r3t));
|
||||
/* Free region 3 table */
|
||||
page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
|
||||
page = phys_to_page(r3t);
|
||||
list_del(&page->lru);
|
||||
__free_pages(page, CRST_ALLOC_ORDER);
|
||||
}
|
||||
@ -1479,7 +1482,7 @@ static void gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr)
|
||||
static void __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr,
|
||||
unsigned long *r2t)
|
||||
{
|
||||
unsigned long *r3t;
|
||||
phys_addr_t r3t;
|
||||
struct page *page;
|
||||
int i;
|
||||
|
||||
@ -1487,11 +1490,11 @@ static void __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr,
|
||||
for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION2_SIZE) {
|
||||
if (!(r2t[i] & _REGION_ENTRY_ORIGIN))
|
||||
continue;
|
||||
r3t = (unsigned long *)(r2t[i] & _REGION_ENTRY_ORIGIN);
|
||||
r3t = r2t[i] & _REGION_ENTRY_ORIGIN;
|
||||
r2t[i] = _REGION2_ENTRY_EMPTY;
|
||||
__gmap_unshadow_r3t(sg, raddr, r3t);
|
||||
__gmap_unshadow_r3t(sg, raddr, __va(r3t));
|
||||
/* Free region 3 table */
|
||||
page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
|
||||
page = phys_to_page(r3t);
|
||||
list_del(&page->lru);
|
||||
__free_pages(page, CRST_ALLOC_ORDER);
|
||||
}
|
||||
@ -1506,8 +1509,9 @@ static void __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr,
|
||||
*/
|
||||
static void gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr)
|
||||
{
|
||||
unsigned long r1o, *r1e, *r2t;
|
||||
unsigned long r1o, *r1e;
|
||||
struct page *page;
|
||||
phys_addr_t r2t;
|
||||
|
||||
BUG_ON(!gmap_is_shadow(sg));
|
||||
r1e = gmap_table_walk(sg, raddr, 4); /* get region-1 pointer */
|
||||
@ -1515,12 +1519,12 @@ static void gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr)
|
||||
return;
|
||||
gmap_call_notifier(sg, raddr, raddr + _REGION1_SIZE - 1);
|
||||
r1o = (unsigned long) (r1e - ((raddr & _REGION1_INDEX) >> _REGION1_SHIFT));
|
||||
gmap_idte_one(r1o | _ASCE_TYPE_REGION1, raddr);
|
||||
r2t = (unsigned long *)(*r1e & _REGION_ENTRY_ORIGIN);
|
||||
gmap_idte_one(__pa(r1o) | _ASCE_TYPE_REGION1, raddr);
|
||||
r2t = *r1e & _REGION_ENTRY_ORIGIN;
|
||||
*r1e = _REGION1_ENTRY_EMPTY;
|
||||
__gmap_unshadow_r2t(sg, raddr, r2t);
|
||||
__gmap_unshadow_r2t(sg, raddr, __va(r2t));
|
||||
/* Free region 2 table */
|
||||
page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
|
||||
page = phys_to_page(r2t);
|
||||
list_del(&page->lru);
|
||||
__free_pages(page, CRST_ALLOC_ORDER);
|
||||
}
|
||||
@ -1536,22 +1540,23 @@ static void gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr)
|
||||
static void __gmap_unshadow_r1t(struct gmap *sg, unsigned long raddr,
|
||||
unsigned long *r1t)
|
||||
{
|
||||
unsigned long asce, *r2t;
|
||||
unsigned long asce;
|
||||
struct page *page;
|
||||
phys_addr_t r2t;
|
||||
int i;
|
||||
|
||||
BUG_ON(!gmap_is_shadow(sg));
|
||||
asce = (unsigned long) r1t | _ASCE_TYPE_REGION1;
|
||||
asce = __pa(r1t) | _ASCE_TYPE_REGION1;
|
||||
for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION1_SIZE) {
|
||||
if (!(r1t[i] & _REGION_ENTRY_ORIGIN))
|
||||
continue;
|
||||
r2t = (unsigned long *)(r1t[i] & _REGION_ENTRY_ORIGIN);
|
||||
__gmap_unshadow_r2t(sg, raddr, r2t);
|
||||
r2t = r1t[i] & _REGION_ENTRY_ORIGIN;
|
||||
__gmap_unshadow_r2t(sg, raddr, __va(r2t));
|
||||
/* Clear entry and flush translation r1t -> r2t */
|
||||
gmap_idte_one(asce, raddr);
|
||||
r1t[i] = _REGION1_ENTRY_EMPTY;
|
||||
/* Free region 2 table */
|
||||
page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
|
||||
page = phys_to_page(r2t);
|
||||
list_del(&page->lru);
|
||||
__free_pages(page, CRST_ALLOC_ORDER);
|
||||
}
|
||||
@ -1573,7 +1578,7 @@ static void gmap_unshadow(struct gmap *sg)
|
||||
sg->removed = 1;
|
||||
gmap_call_notifier(sg, 0, -1UL);
|
||||
gmap_flush_tlb(sg);
|
||||
table = (unsigned long *)(sg->asce & _ASCE_ORIGIN);
|
||||
table = __va(sg->asce & _ASCE_ORIGIN);
|
||||
switch (sg->asce & _ASCE_TYPE_MASK) {
|
||||
case _ASCE_TYPE_REGION1:
|
||||
__gmap_unshadow_r1t(sg, 0, table);
|
||||
@ -1748,7 +1753,8 @@ int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
|
||||
int fake)
|
||||
{
|
||||
unsigned long raddr, origin, offset, len;
|
||||
unsigned long *s_r2t, *table;
|
||||
unsigned long *table;
|
||||
phys_addr_t s_r2t;
|
||||
struct page *page;
|
||||
int rc;
|
||||
|
||||
@ -1760,7 +1766,7 @@ int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
|
||||
page->index = r2t & _REGION_ENTRY_ORIGIN;
|
||||
if (fake)
|
||||
page->index |= GMAP_SHADOW_FAKE_TABLE;
|
||||
s_r2t = (unsigned long *) page_to_phys(page);
|
||||
s_r2t = page_to_phys(page);
|
||||
/* Install shadow region second table */
|
||||
spin_lock(&sg->guest_table_lock);
|
||||
table = gmap_table_walk(sg, saddr, 4); /* get region-1 pointer */
|
||||
@ -1775,9 +1781,9 @@ int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
|
||||
rc = -EAGAIN; /* Race with shadow */
|
||||
goto out_free;
|
||||
}
|
||||
crst_table_init(s_r2t, _REGION2_ENTRY_EMPTY);
|
||||
crst_table_init(__va(s_r2t), _REGION2_ENTRY_EMPTY);
|
||||
/* mark as invalid as long as the parent table is not protected */
|
||||
*table = (unsigned long) s_r2t | _REGION_ENTRY_LENGTH |
|
||||
*table = s_r2t | _REGION_ENTRY_LENGTH |
|
||||
_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID;
|
||||
if (sg->edat_level >= 1)
|
||||
*table |= (r2t & _REGION_ENTRY_PROTECT);
|
||||
@ -1798,8 +1804,7 @@ int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
|
||||
spin_lock(&sg->guest_table_lock);
|
||||
if (!rc) {
|
||||
table = gmap_table_walk(sg, saddr, 4);
|
||||
if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
|
||||
(unsigned long) s_r2t)
|
||||
if (!table || (*table & _REGION_ENTRY_ORIGIN) != s_r2t)
|
||||
rc = -EAGAIN; /* Race with unshadow */
|
||||
else
|
||||
*table &= ~_REGION_ENTRY_INVALID;
|
||||
@ -1832,7 +1837,8 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
|
||||
int fake)
|
||||
{
|
||||
unsigned long raddr, origin, offset, len;
|
||||
unsigned long *s_r3t, *table;
|
||||
unsigned long *table;
|
||||
phys_addr_t s_r3t;
|
||||
struct page *page;
|
||||
int rc;
|
||||
|
||||
@ -1844,7 +1850,7 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
|
||||
page->index = r3t & _REGION_ENTRY_ORIGIN;
|
||||
if (fake)
|
||||
page->index |= GMAP_SHADOW_FAKE_TABLE;
|
||||
s_r3t = (unsigned long *) page_to_phys(page);
|
||||
s_r3t = page_to_phys(page);
|
||||
/* Install shadow region second table */
|
||||
spin_lock(&sg->guest_table_lock);
|
||||
table = gmap_table_walk(sg, saddr, 3); /* get region-2 pointer */
|
||||
@ -1859,9 +1865,9 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
|
||||
rc = -EAGAIN; /* Race with shadow */
|
||||
goto out_free;
|
||||
}
|
||||
crst_table_init(s_r3t, _REGION3_ENTRY_EMPTY);
|
||||
crst_table_init(__va(s_r3t), _REGION3_ENTRY_EMPTY);
|
||||
/* mark as invalid as long as the parent table is not protected */
|
||||
*table = (unsigned long) s_r3t | _REGION_ENTRY_LENGTH |
|
||||
*table = s_r3t | _REGION_ENTRY_LENGTH |
|
||||
_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID;
|
||||
if (sg->edat_level >= 1)
|
||||
*table |= (r3t & _REGION_ENTRY_PROTECT);
|
||||
@ -1882,8 +1888,7 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
|
||||
spin_lock(&sg->guest_table_lock);
|
||||
if (!rc) {
|
||||
table = gmap_table_walk(sg, saddr, 3);
|
||||
if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
|
||||
(unsigned long) s_r3t)
|
||||
if (!table || (*table & _REGION_ENTRY_ORIGIN) != s_r3t)
|
||||
rc = -EAGAIN; /* Race with unshadow */
|
||||
else
|
||||
*table &= ~_REGION_ENTRY_INVALID;
|
||||
@ -1916,7 +1921,8 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
|
||||
int fake)
|
||||
{
|
||||
unsigned long raddr, origin, offset, len;
|
||||
unsigned long *s_sgt, *table;
|
||||
unsigned long *table;
|
||||
phys_addr_t s_sgt;
|
||||
struct page *page;
|
||||
int rc;
|
||||
|
||||
@ -1928,7 +1934,7 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
|
||||
page->index = sgt & _REGION_ENTRY_ORIGIN;
|
||||
if (fake)
|
||||
page->index |= GMAP_SHADOW_FAKE_TABLE;
|
||||
s_sgt = (unsigned long *) page_to_phys(page);
|
||||
s_sgt = page_to_phys(page);
|
||||
/* Install shadow region second table */
|
||||
spin_lock(&sg->guest_table_lock);
|
||||
table = gmap_table_walk(sg, saddr, 2); /* get region-3 pointer */
|
||||
@ -1943,9 +1949,9 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
|
||||
rc = -EAGAIN; /* Race with shadow */
|
||||
goto out_free;
|
||||
}
|
||||
crst_table_init(s_sgt, _SEGMENT_ENTRY_EMPTY);
|
||||
crst_table_init(__va(s_sgt), _SEGMENT_ENTRY_EMPTY);
|
||||
/* mark as invalid as long as the parent table is not protected */
|
||||
*table = (unsigned long) s_sgt | _REGION_ENTRY_LENGTH |
|
||||
*table = s_sgt | _REGION_ENTRY_LENGTH |
|
||||
_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID;
|
||||
if (sg->edat_level >= 1)
|
||||
*table |= sgt & _REGION_ENTRY_PROTECT;
|
||||
@ -1966,8 +1972,7 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
|
||||
spin_lock(&sg->guest_table_lock);
|
||||
if (!rc) {
|
||||
table = gmap_table_walk(sg, saddr, 2);
|
||||
if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
|
||||
(unsigned long) s_sgt)
|
||||
if (!table || (*table & _REGION_ENTRY_ORIGIN) != s_sgt)
|
||||
rc = -EAGAIN; /* Race with unshadow */
|
||||
else
|
||||
*table &= ~_REGION_ENTRY_INVALID;
|
||||
@ -2040,8 +2045,9 @@ int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
|
||||
int fake)
|
||||
{
|
||||
unsigned long raddr, origin;
|
||||
unsigned long *s_pgt, *table;
|
||||
unsigned long *table;
|
||||
struct page *page;
|
||||
phys_addr_t s_pgt;
|
||||
int rc;
|
||||
|
||||
BUG_ON(!gmap_is_shadow(sg) || (pgt & _SEGMENT_ENTRY_LARGE));
|
||||
@ -2052,7 +2058,7 @@ int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
|
||||
page->index = pgt & _SEGMENT_ENTRY_ORIGIN;
|
||||
if (fake)
|
||||
page->index |= GMAP_SHADOW_FAKE_TABLE;
|
||||
s_pgt = (unsigned long *) page_to_phys(page);
|
||||
s_pgt = page_to_phys(page);
|
||||
/* Install shadow page table */
|
||||
spin_lock(&sg->guest_table_lock);
|
||||
table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
|
||||
@ -2085,8 +2091,7 @@ int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
|
||||
spin_lock(&sg->guest_table_lock);
|
||||
if (!rc) {
|
||||
table = gmap_table_walk(sg, saddr, 1);
|
||||
if (!table || (*table & _SEGMENT_ENTRY_ORIGIN) !=
|
||||
(unsigned long) s_pgt)
|
||||
if (!table || (*table & _SEGMENT_ENTRY_ORIGIN) != s_pgt)
|
||||
rc = -EAGAIN; /* Race with unshadow */
|
||||
else
|
||||
*table &= ~_SEGMENT_ENTRY_INVALID;
|
||||
|
@ -140,25 +140,25 @@ void mark_rodata_ro(void)
|
||||
debug_checkwx();
|
||||
}
|
||||
|
||||
int set_memory_encrypted(unsigned long addr, int numpages)
|
||||
int set_memory_encrypted(unsigned long vaddr, int numpages)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* make specified pages unshared, (swiotlb, dma_free) */
|
||||
for (i = 0; i < numpages; ++i) {
|
||||
uv_remove_shared(addr);
|
||||
addr += PAGE_SIZE;
|
||||
uv_remove_shared(virt_to_phys((void *)vaddr));
|
||||
vaddr += PAGE_SIZE;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int set_memory_decrypted(unsigned long addr, int numpages)
|
||||
int set_memory_decrypted(unsigned long vaddr, int numpages)
|
||||
{
|
||||
int i;
|
||||
/* make specified pages shared (swiotlb, dma_alloca) */
|
||||
for (i = 0; i < numpages; ++i) {
|
||||
uv_set_shared(addr);
|
||||
addr += PAGE_SIZE;
|
||||
uv_set_shared(virt_to_phys((void *)vaddr));
|
||||
vaddr += PAGE_SIZE;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -429,7 +429,7 @@ static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
|
||||
|
||||
aqic_gisa.isc = nisc;
|
||||
aqic_gisa.ir = 1;
|
||||
aqic_gisa.gisa = (uint64_t)gisa >> 4;
|
||||
aqic_gisa.gisa = virt_to_phys(gisa) >> 4;
|
||||
|
||||
status = ap_aqic(q->apqn, aqic_gisa, h_nib);
|
||||
switch (status.response_code) {
|
||||
|
@ -1181,6 +1181,7 @@ struct kvm_ppc_resize_hpt {
|
||||
#define KVM_CAP_S390_ZPCI_OP 221
|
||||
#define KVM_CAP_S390_CPU_TOPOLOGY 222
|
||||
#define KVM_CAP_DIRTY_LOG_RING_ACQ_REL 223
|
||||
#define KVM_CAP_S390_PROTECTED_ASYNC_DISABLE 224
|
||||
|
||||
#ifdef KVM_CAP_IRQ_ROUTING
|
||||
|
||||
@ -1743,6 +1744,8 @@ enum pv_cmd_id {
|
||||
KVM_PV_UNSHARE_ALL,
|
||||
KVM_PV_INFO,
|
||||
KVM_PV_DUMP,
|
||||
KVM_PV_ASYNC_CLEANUP_PREPARE,
|
||||
KVM_PV_ASYNC_CLEANUP_PERFORM,
|
||||
};
|
||||
|
||||
struct kvm_pv_cmd {
|
||||
|
Loading…
Reference in New Issue
Block a user