KVM/arm64 fixes for 6.3, part #4

- Plug a buffer overflow due to the use of the user-provided register
    width for firmware regs. Outright reject accesses where the
    user register width does not match the kernel representation.
 
  - Protect non-atomic RMW operations on vCPU flags against preemption,
    as an update to the flags by an intervening preemption could be lost.
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYIAB0WIQSNXHjWXuzMZutrKNKivnWIJHzdFgUCZEALwAAKCRCivnWIJHzd
 Fq2nAQDPwWpuMOrQYZCM5RF5/8yai5OPKB0NcvK00UDM4f6E1AEAxDud/KT9i15c
 dmBfGn1zDzW4XAENrOkxjQWuBqi/tQg=
 =Lxvf
 -----END PGP SIGNATURE-----

Merge tag 'kvmarm-fixes-6.3-4' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD

KVM/arm64 fixes for 6.3, part #4

 - Plug a buffer overflow due to the use of the user-provided register
   width for firmware regs. Outright reject accesses where the
   user register width does not match the kernel representation.

 - Protect non-atomic RMW operations on vCPU flags against preemption,
   as an update to the flags by an intervening preemption could be lost.
This commit is contained in:
Paolo Bonzini 2023-04-21 19:19:02 -04:00
commit 265b97cbc2
2 changed files with 20 additions and 1 deletions

View File

@ -576,9 +576,22 @@ struct kvm_vcpu_arch {
({ \
__build_check_flag(v, flagset, f, m); \
\
v->arch.flagset & (m); \
READ_ONCE(v->arch.flagset) & (m); \
})
/*
* Note that the set/clear accessors must be preempt-safe in order to
* avoid nesting them with load/put which also manipulate flags...
*/
#ifdef __KVM_NVHE_HYPERVISOR__
/* the nVHE hypervisor is always non-preemptible */
#define __vcpu_flags_preempt_disable()
#define __vcpu_flags_preempt_enable()
#else
#define __vcpu_flags_preempt_disable() preempt_disable()
#define __vcpu_flags_preempt_enable() preempt_enable()
#endif
#define __vcpu_set_flag(v, flagset, f, m) \
do { \
typeof(v->arch.flagset) *fset; \
@ -586,9 +599,11 @@ struct kvm_vcpu_arch {
__build_check_flag(v, flagset, f, m); \
\
fset = &v->arch.flagset; \
__vcpu_flags_preempt_disable(); \
if (HWEIGHT(m) > 1) \
*fset &= ~(m); \
*fset |= (f); \
__vcpu_flags_preempt_enable(); \
} while (0)
#define __vcpu_clear_flag(v, flagset, f, m) \
@ -598,7 +613,9 @@ struct kvm_vcpu_arch {
__build_check_flag(v, flagset, f, m); \
\
fset = &v->arch.flagset; \
__vcpu_flags_preempt_disable(); \
*fset &= ~(m); \
__vcpu_flags_preempt_enable(); \
} while (0)
#define vcpu_get_flag(v, ...) __vcpu_get_flag((v), __VA_ARGS__)

View File

@ -397,6 +397,8 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
u64 val;
int wa_level;
if (KVM_REG_SIZE(reg->id) != sizeof(val))
return -ENOENT;
if (copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)))
return -EFAULT;