mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-26 04:25:27 +08:00
8974eb5882
Gather Data Sampling (GDS) is a hardware vulnerability which allows unprivileged speculative access to data which was previously stored in vector registers. Intel processors that support AVX2 and AVX512 have gather instructions that fetch non-contiguous data elements from memory. On vulnerable hardware, when a gather instruction is transiently executed and encounters a fault, stale data from architectural or internal vector registers may get transiently stored to the destination vector register allowing an attacker to infer the stale data using typical side channel techniques like cache timing attacks. This mitigation is different from many earlier ones for two reasons. First, it is enabled by default and a bit must be set to *DISABLE* it. This is the opposite of normal mitigation polarity. This means GDS can be mitigated simply by updating microcode and leaving the new control bit alone. Second, GDS has a "lock" bit. This lock bit is there because the mitigation affects the hardware security features KeyLocker and SGX. It needs to be enabled and *STAY* enabled for these features to be mitigated against GDS. The mitigation is enabled in the microcode by default. Disable it by setting gather_data_sampling=off or by disabling all mitigations with mitigations=off. The mitigation status can be checked by reading: /sys/devices/system/cpu/vulnerabilities/gather_data_sampling Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com> Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Acked-by: Josh Poimboeuf <jpoimboe@kernel.org>
97 lines
2.8 KiB
C
97 lines
2.8 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef ARCH_X86_CPU_H
|
|
#define ARCH_X86_CPU_H
|
|
|
|
/* attempt to consolidate cpu attributes */
|
|
struct cpu_dev {
|
|
const char *c_vendor;
|
|
|
|
/* some have two possibilities for cpuid string */
|
|
const char *c_ident[2];
|
|
|
|
void (*c_early_init)(struct cpuinfo_x86 *);
|
|
void (*c_bsp_init)(struct cpuinfo_x86 *);
|
|
void (*c_init)(struct cpuinfo_x86 *);
|
|
void (*c_identify)(struct cpuinfo_x86 *);
|
|
void (*c_detect_tlb)(struct cpuinfo_x86 *);
|
|
int c_x86_vendor;
|
|
#ifdef CONFIG_X86_32
|
|
/* Optional vendor specific routine to obtain the cache size. */
|
|
unsigned int (*legacy_cache_size)(struct cpuinfo_x86 *,
|
|
unsigned int);
|
|
|
|
/* Family/stepping-based lookup table for model names. */
|
|
struct legacy_cpu_model_info {
|
|
int family;
|
|
const char *model_names[16];
|
|
} legacy_models[5];
|
|
#endif
|
|
};
|
|
|
|
struct _tlb_table {
|
|
unsigned char descriptor;
|
|
char tlb_type;
|
|
unsigned int entries;
|
|
/* unsigned int ways; */
|
|
char info[128];
|
|
};
|
|
|
|
#define cpu_dev_register(cpu_devX) \
|
|
static const struct cpu_dev *const __cpu_dev_##cpu_devX __used \
|
|
__section(".x86_cpu_dev.init") = \
|
|
&cpu_devX;
|
|
|
|
extern const struct cpu_dev *const __x86_cpu_dev_start[],
|
|
*const __x86_cpu_dev_end[];
|
|
|
|
#ifdef CONFIG_CPU_SUP_INTEL
|
|
enum tsx_ctrl_states {
|
|
TSX_CTRL_ENABLE,
|
|
TSX_CTRL_DISABLE,
|
|
TSX_CTRL_RTM_ALWAYS_ABORT,
|
|
TSX_CTRL_NOT_SUPPORTED,
|
|
};
|
|
|
|
extern __ro_after_init enum tsx_ctrl_states tsx_ctrl_state;
|
|
|
|
extern void __init tsx_init(void);
|
|
void tsx_ap_init(void);
|
|
#else
|
|
static inline void tsx_init(void) { }
|
|
static inline void tsx_ap_init(void) { }
|
|
#endif /* CONFIG_CPU_SUP_INTEL */
|
|
|
|
extern void init_spectral_chicken(struct cpuinfo_x86 *c);
|
|
|
|
extern void get_cpu_cap(struct cpuinfo_x86 *c);
|
|
extern void get_cpu_address_sizes(struct cpuinfo_x86 *c);
|
|
extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
|
|
extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
|
|
extern void init_intel_cacheinfo(struct cpuinfo_x86 *c);
|
|
extern void init_amd_cacheinfo(struct cpuinfo_x86 *c);
|
|
extern void init_hygon_cacheinfo(struct cpuinfo_x86 *c);
|
|
|
|
extern void detect_num_cpu_cores(struct cpuinfo_x86 *c);
|
|
extern int detect_extended_topology_early(struct cpuinfo_x86 *c);
|
|
extern int detect_extended_topology(struct cpuinfo_x86 *c);
|
|
extern int detect_ht_early(struct cpuinfo_x86 *c);
|
|
extern void detect_ht(struct cpuinfo_x86 *c);
|
|
extern void check_null_seg_clears_base(struct cpuinfo_x86 *c);
|
|
|
|
unsigned int aperfmperf_get_khz(int cpu);
|
|
void cpu_select_mitigations(void);
|
|
|
|
extern void x86_spec_ctrl_setup_ap(void);
|
|
extern void update_srbds_msr(void);
|
|
extern void update_gds_msr(void);
|
|
|
|
extern enum spectre_v2_mitigation spectre_v2_enabled;
|
|
|
|
static inline bool spectre_v2_in_eibrs_mode(enum spectre_v2_mitigation mode)
|
|
{
|
|
return mode == SPECTRE_V2_EIBRS ||
|
|
mode == SPECTRE_V2_EIBRS_RETPOLINE ||
|
|
mode == SPECTRE_V2_EIBRS_LFENCE;
|
|
}
|
|
#endif /* ARCH_X86_CPU_H */
|