mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-28 14:44:10 +08:00
- ioremap: mask out the bits which are not part of the physical address
*after* the size computation is done to prevent and hypothetical ioremap failures - Change the MSR save/restore functionality during suspend to rely on flags denoting that the related MSRs are actually supported vs reading them and assuming they are (an Atom one allows reading but not writing, thus breaking this scheme at resume time.) - prevent IV reuse in the AES-GCM communication scheme between SNP guests and the AMD secure processor -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAmODRegACgkQEsHwGGHe VUr3Fw//dZr1BpLA2PoHBWRKw+tJA7NZ5QgYPwYgpZysI3LiSd3wHiYx12AMYl/1 rvG0ELCwR5MUt3s0owqm4XlzmNbFG/ISsaR3d2mUlqgPrztYKZTHUP14LjzbCgdx 53FSWqxeK5+NkQcUXF/GsR5flbHHG40wM9PK6UDm+xZPvoTKkBlCNcId+5yMtq0J ZvemhZ9rMGoA6bRWvRIhzKdzz9+MRcKMMjcAULNtngIlE/CfdkkIGios0JmPshSB h10/CmYRz38U90sqFXF/9DJPo6oFB9DOxIZmyb6cTmJCasSwfuU4uEtTiIuNMw0Y zflc1vNnOkpdPvn8nXWwo/OWdjg9oh/TJOzthjyxjlVs4DYjBRnXykdO6lUQWjVI XWE4sP8lt2J4wsiURzcaroqfqpQu1Y/hlh/io5xp8vE2qZaOjgADYV1ZHgB/Y20I Opm4ICsMYN4ZQqejKfhq/Fu15Y6qqGIl0pNBjOJK0rdaDPthFd4+UEJGvd57RdNl RCWC8EvsI8LGWDkGJeR1sytVJT7adWsfy6bYg98BQ2rId4oj89kZYZNqJROf/hv5 aU7i9AMh8WodZTGh2bfwq+dLvACccc2rqbYh0Q7Uwm3IjaPTWiPHanqsvSo6+GrO aO4IUoUidXheVPJu3qfiNTJ4GtTUnqDiatpbfA+Do+Rva2wWBFw= =xYpH -----END PGP SIGNATURE----- Merge tag 'x86_urgent_for_v6.1_rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull x86 fixes from Borislav Petkov: - ioremap: mask out the bits which are not part of the physical address *after* the size computation is done to prevent any hypothetical ioremap failures - Change the MSR save/restore functionality during suspend to rely on flags denoting that the related MSRs are actually supported vs reading them and assuming they are (an Atom one allows reading but not writing, thus breaking this scheme at resume time) - prevent IV reuse in the AES-GCM communication scheme between SNP guests and the AMD secure processor * tag 'x86_urgent_for_v6.1_rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/ioremap: Fix page aligned size calculation in __ioremap_caller() x86/pm: Add enumeration check before spec MSRs save/restore setup x86/tsx: Add a feature bit for TSX control MSR support virt/sev-guest: Prevent IV reuse in the SNP guest driver
This commit is contained in:
commit
08b0644126
@ -305,6 +305,9 @@
|
||||
#define X86_FEATURE_USE_IBPB_FW (11*32+16) /* "" Use IBPB during runtime firmware calls */
|
||||
#define X86_FEATURE_RSB_VMEXIT_LITE (11*32+17) /* "" Fill RSB on VM exit when EIBRS is enabled */
|
||||
|
||||
|
||||
#define X86_FEATURE_MSR_TSX_CTRL (11*32+20) /* "" MSR IA32_TSX_CTRL (Intel) implemented */
|
||||
|
||||
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
|
||||
#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */
|
||||
#define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */
|
||||
|
@ -58,24 +58,6 @@ static void tsx_enable(void)
|
||||
wrmsrl(MSR_IA32_TSX_CTRL, tsx);
|
||||
}
|
||||
|
||||
static bool tsx_ctrl_is_supported(void)
|
||||
{
|
||||
u64 ia32_cap = x86_read_arch_cap_msr();
|
||||
|
||||
/*
|
||||
* TSX is controlled via MSR_IA32_TSX_CTRL. However, support for this
|
||||
* MSR is enumerated by ARCH_CAP_TSX_MSR bit in MSR_IA32_ARCH_CAPABILITIES.
|
||||
*
|
||||
* TSX control (aka MSR_IA32_TSX_CTRL) is only available after a
|
||||
* microcode update on CPUs that have their MSR_IA32_ARCH_CAPABILITIES
|
||||
* bit MDS_NO=1. CPUs with MDS_NO=0 are not planned to get
|
||||
* MSR_IA32_TSX_CTRL support even after a microcode update. Thus,
|
||||
* tsx= cmdline requests will do nothing on CPUs without
|
||||
* MSR_IA32_TSX_CTRL support.
|
||||
*/
|
||||
return !!(ia32_cap & ARCH_CAP_TSX_CTRL_MSR);
|
||||
}
|
||||
|
||||
static enum tsx_ctrl_states x86_get_tsx_auto_mode(void)
|
||||
{
|
||||
if (boot_cpu_has_bug(X86_BUG_TAA))
|
||||
@ -135,7 +117,7 @@ static void tsx_clear_cpuid(void)
|
||||
rdmsrl(MSR_TSX_FORCE_ABORT, msr);
|
||||
msr |= MSR_TFA_TSX_CPUID_CLEAR;
|
||||
wrmsrl(MSR_TSX_FORCE_ABORT, msr);
|
||||
} else if (tsx_ctrl_is_supported()) {
|
||||
} else if (cpu_feature_enabled(X86_FEATURE_MSR_TSX_CTRL)) {
|
||||
rdmsrl(MSR_IA32_TSX_CTRL, msr);
|
||||
msr |= TSX_CTRL_CPUID_CLEAR;
|
||||
wrmsrl(MSR_IA32_TSX_CTRL, msr);
|
||||
@ -158,7 +140,8 @@ static void tsx_dev_mode_disable(void)
|
||||
u64 mcu_opt_ctrl;
|
||||
|
||||
/* Check if RTM_ALLOW exists */
|
||||
if (!boot_cpu_has_bug(X86_BUG_TAA) || !tsx_ctrl_is_supported() ||
|
||||
if (!boot_cpu_has_bug(X86_BUG_TAA) ||
|
||||
!cpu_feature_enabled(X86_FEATURE_MSR_TSX_CTRL) ||
|
||||
!cpu_feature_enabled(X86_FEATURE_SRBDS_CTRL))
|
||||
return;
|
||||
|
||||
@ -191,7 +174,20 @@ void __init tsx_init(void)
|
||||
return;
|
||||
}
|
||||
|
||||
if (!tsx_ctrl_is_supported()) {
|
||||
/*
|
||||
* TSX is controlled via MSR_IA32_TSX_CTRL. However, support for this
|
||||
* MSR is enumerated by ARCH_CAP_TSX_MSR bit in MSR_IA32_ARCH_CAPABILITIES.
|
||||
*
|
||||
* TSX control (aka MSR_IA32_TSX_CTRL) is only available after a
|
||||
* microcode update on CPUs that have their MSR_IA32_ARCH_CAPABILITIES
|
||||
* bit MDS_NO=1. CPUs with MDS_NO=0 are not planned to get
|
||||
* MSR_IA32_TSX_CTRL support even after a microcode update. Thus,
|
||||
* tsx= cmdline requests will do nothing on CPUs without
|
||||
* MSR_IA32_TSX_CTRL support.
|
||||
*/
|
||||
if (x86_read_arch_cap_msr() & ARCH_CAP_TSX_CTRL_MSR) {
|
||||
setup_force_cpu_cap(X86_FEATURE_MSR_TSX_CTRL);
|
||||
} else {
|
||||
tsx_ctrl_state = TSX_CTRL_NOT_SUPPORTED;
|
||||
return;
|
||||
}
|
||||
|
@ -217,9 +217,15 @@ __ioremap_caller(resource_size_t phys_addr, unsigned long size,
|
||||
* Mappings have to be page-aligned
|
||||
*/
|
||||
offset = phys_addr & ~PAGE_MASK;
|
||||
phys_addr &= PHYSICAL_PAGE_MASK;
|
||||
phys_addr &= PAGE_MASK;
|
||||
size = PAGE_ALIGN(last_addr+1) - phys_addr;
|
||||
|
||||
/*
|
||||
* Mask out any bits not part of the actual physical
|
||||
* address, like memory encryption bits.
|
||||
*/
|
||||
phys_addr &= PHYSICAL_PAGE_MASK;
|
||||
|
||||
retval = memtype_reserve(phys_addr, (u64)phys_addr + size,
|
||||
pcm, &new_pcm);
|
||||
if (retval) {
|
||||
|
@ -513,16 +513,23 @@ static int pm_cpu_check(const struct x86_cpu_id *c)
|
||||
|
||||
static void pm_save_spec_msr(void)
|
||||
{
|
||||
u32 spec_msr_id[] = {
|
||||
MSR_IA32_SPEC_CTRL,
|
||||
MSR_IA32_TSX_CTRL,
|
||||
MSR_TSX_FORCE_ABORT,
|
||||
MSR_IA32_MCU_OPT_CTRL,
|
||||
MSR_AMD64_LS_CFG,
|
||||
MSR_AMD64_DE_CFG,
|
||||
struct msr_enumeration {
|
||||
u32 msr_no;
|
||||
u32 feature;
|
||||
} msr_enum[] = {
|
||||
{ MSR_IA32_SPEC_CTRL, X86_FEATURE_MSR_SPEC_CTRL },
|
||||
{ MSR_IA32_TSX_CTRL, X86_FEATURE_MSR_TSX_CTRL },
|
||||
{ MSR_TSX_FORCE_ABORT, X86_FEATURE_TSX_FORCE_ABORT },
|
||||
{ MSR_IA32_MCU_OPT_CTRL, X86_FEATURE_SRBDS_CTRL },
|
||||
{ MSR_AMD64_LS_CFG, X86_FEATURE_LS_CFG_SSBD },
|
||||
{ MSR_AMD64_DE_CFG, X86_FEATURE_LFENCE_RDTSC },
|
||||
};
|
||||
int i;
|
||||
|
||||
msr_build_context(spec_msr_id, ARRAY_SIZE(spec_msr_id));
|
||||
for (i = 0; i < ARRAY_SIZE(msr_enum); i++) {
|
||||
if (boot_cpu_has(msr_enum[i].feature))
|
||||
msr_build_context(&msr_enum[i].msr_no, 1);
|
||||
}
|
||||
}
|
||||
|
||||
static int pm_check_save_msr(void)
|
||||
|
@ -67,8 +67,27 @@ static bool is_vmpck_empty(struct snp_guest_dev *snp_dev)
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* If an error is received from the host or AMD Secure Processor (ASP) there
|
||||
* are two options. Either retry the exact same encrypted request or discontinue
|
||||
* using the VMPCK.
|
||||
*
|
||||
* This is because in the current encryption scheme GHCB v2 uses AES-GCM to
|
||||
* encrypt the requests. The IV for this scheme is the sequence number. GCM
|
||||
* cannot tolerate IV reuse.
|
||||
*
|
||||
* The ASP FW v1.51 only increments the sequence numbers on a successful
|
||||
* guest<->ASP back and forth and only accepts messages at its exact sequence
|
||||
* number.
|
||||
*
|
||||
* So if the sequence number were to be reused the encryption scheme is
|
||||
* vulnerable. If the sequence number were incremented for a fresh IV the ASP
|
||||
* will reject the request.
|
||||
*/
|
||||
static void snp_disable_vmpck(struct snp_guest_dev *snp_dev)
|
||||
{
|
||||
dev_alert(snp_dev->dev, "Disabling vmpck_id %d to prevent IV reuse.\n",
|
||||
vmpck_id);
|
||||
memzero_explicit(snp_dev->vmpck, VMPCK_KEY_LEN);
|
||||
snp_dev->vmpck = NULL;
|
||||
}
|
||||
@ -321,34 +340,71 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, in
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* Call firmware to process the request */
|
||||
/*
|
||||
* Call firmware to process the request. In this function the encrypted
|
||||
* message enters shared memory with the host. So after this call the
|
||||
* sequence number must be incremented or the VMPCK must be deleted to
|
||||
* prevent reuse of the IV.
|
||||
*/
|
||||
rc = snp_issue_guest_request(exit_code, &snp_dev->input, &err);
|
||||
|
||||
/*
|
||||
* If the extended guest request fails due to having too small of a
|
||||
* certificate data buffer, retry the same guest request without the
|
||||
* extended data request in order to increment the sequence number
|
||||
* and thus avoid IV reuse.
|
||||
*/
|
||||
if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST &&
|
||||
err == SNP_GUEST_REQ_INVALID_LEN) {
|
||||
const unsigned int certs_npages = snp_dev->input.data_npages;
|
||||
|
||||
exit_code = SVM_VMGEXIT_GUEST_REQUEST;
|
||||
|
||||
/*
|
||||
* If this call to the firmware succeeds, the sequence number can
|
||||
* be incremented allowing for continued use of the VMPCK. If
|
||||
* there is an error reflected in the return value, this value
|
||||
* is checked further down and the result will be the deletion
|
||||
* of the VMPCK and the error code being propagated back to the
|
||||
* user as an ioctl() return code.
|
||||
*/
|
||||
rc = snp_issue_guest_request(exit_code, &snp_dev->input, &err);
|
||||
|
||||
/*
|
||||
* Override the error to inform callers the given extended
|
||||
* request buffer size was too small and give the caller the
|
||||
* required buffer size.
|
||||
*/
|
||||
err = SNP_GUEST_REQ_INVALID_LEN;
|
||||
snp_dev->input.data_npages = certs_npages;
|
||||
}
|
||||
|
||||
if (fw_err)
|
||||
*fw_err = err;
|
||||
|
||||
if (rc)
|
||||
return rc;
|
||||
if (rc) {
|
||||
dev_alert(snp_dev->dev,
|
||||
"Detected error from ASP request. rc: %d, fw_err: %llu\n",
|
||||
rc, *fw_err);
|
||||
goto disable_vmpck;
|
||||
}
|
||||
|
||||
/*
|
||||
* The verify_and_dec_payload() will fail only if the hypervisor is
|
||||
* actively modifying the message header or corrupting the encrypted payload.
|
||||
* This hints that hypervisor is acting in a bad faith. Disable the VMPCK so that
|
||||
* the key cannot be used for any communication. The key is disabled to ensure
|
||||
* that AES-GCM does not use the same IV while encrypting the request payload.
|
||||
*/
|
||||
rc = verify_and_dec_payload(snp_dev, resp_buf, resp_sz);
|
||||
if (rc) {
|
||||
dev_alert(snp_dev->dev,
|
||||
"Detected unexpected decode failure, disabling the vmpck_id %d\n",
|
||||
vmpck_id);
|
||||
snp_disable_vmpck(snp_dev);
|
||||
return rc;
|
||||
"Detected unexpected decode failure from ASP. rc: %d\n",
|
||||
rc);
|
||||
goto disable_vmpck;
|
||||
}
|
||||
|
||||
/* Increment to new message sequence after payload decryption was successful. */
|
||||
snp_inc_msg_seqno(snp_dev);
|
||||
|
||||
return 0;
|
||||
|
||||
disable_vmpck:
|
||||
snp_disable_vmpck(snp_dev);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
|
||||
|
Loading…
Reference in New Issue
Block a user