x86/fpu: Rename 'pcntxt_mask' to 'xfeatures_mask'

So the 'pcntxt_mask' is a misnomer, it's essentially meaningless to anyone
who doesn't know what it does exactly.

Name it more descriptively as 'xfeatures_mask'.

Reviewed-by: Borislav Petkov <bp@alien8.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2015-04-24 09:20:33 +02:00
parent 69496e10f8
commit 614df7fb8a
4 changed files with 33 additions and 33 deletions

View File

@ -45,7 +45,7 @@
#endif #endif
extern unsigned int xstate_size; extern unsigned int xstate_size;
extern u64 pcntxt_mask; extern u64 xfeatures_mask;
extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS]; extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
extern struct xsave_struct *init_xstate_buf; extern struct xsave_struct *init_xstate_buf;

View File

@ -528,7 +528,7 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
* mxcsr reserved bits must be masked to zero for security reasons. * mxcsr reserved bits must be masked to zero for security reasons.
*/ */
xsave->i387.mxcsr &= mxcsr_feature_mask; xsave->i387.mxcsr &= mxcsr_feature_mask;
xsave->xsave_hdr.xstate_bv &= pcntxt_mask; xsave->xsave_hdr.xstate_bv &= xfeatures_mask;
/* /*
* These bits must be zero. * These bits must be zero.
*/ */

View File

@ -13,9 +13,9 @@
#include <asm/xcr.h> #include <asm/xcr.h>
/* /*
* Supported feature mask by the CPU and the kernel. * Mask of xstate features supported by the CPU and the kernel:
*/ */
u64 pcntxt_mask; u64 xfeatures_mask;
/* /*
* Represents init state for the supported extended state. * Represents init state for the supported extended state.
@ -24,7 +24,7 @@ struct xsave_struct *init_xstate_buf;
static struct _fpx_sw_bytes fx_sw_reserved, fx_sw_reserved_ia32; static struct _fpx_sw_bytes fx_sw_reserved, fx_sw_reserved_ia32;
static unsigned int *xstate_offsets, *xstate_sizes; static unsigned int *xstate_offsets, *xstate_sizes;
static unsigned int xstate_comp_offsets[sizeof(pcntxt_mask)*8]; static unsigned int xstate_comp_offsets[sizeof(xfeatures_mask)*8];
static unsigned int xstate_features; static unsigned int xstate_features;
/* /*
@ -52,7 +52,7 @@ void __sanitize_i387_state(struct task_struct *tsk)
* None of the feature bits are in init state. So nothing else * None of the feature bits are in init state. So nothing else
* to do for us, as the memory layout is up to date. * to do for us, as the memory layout is up to date.
*/ */
if ((xstate_bv & pcntxt_mask) == pcntxt_mask) if ((xstate_bv & xfeatures_mask) == xfeatures_mask)
return; return;
/* /*
@ -74,7 +74,7 @@ void __sanitize_i387_state(struct task_struct *tsk)
if (!(xstate_bv & XSTATE_SSE)) if (!(xstate_bv & XSTATE_SSE))
memset(&fx->xmm_space[0], 0, 256); memset(&fx->xmm_space[0], 0, 256);
xstate_bv = (pcntxt_mask & ~xstate_bv) >> 2; xstate_bv = (xfeatures_mask & ~xstate_bv) >> 2;
/* /*
* Update all the other memory layouts for which the corresponding * Update all the other memory layouts for which the corresponding
@ -291,7 +291,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
if (fx_only) if (fx_only)
xsave_hdr->xstate_bv = XSTATE_FPSSE; xsave_hdr->xstate_bv = XSTATE_FPSSE;
else else
xsave_hdr->xstate_bv &= (pcntxt_mask & xstate_bv); xsave_hdr->xstate_bv &= (xfeatures_mask & xstate_bv);
} }
if (use_fxsr()) { if (use_fxsr()) {
@ -312,11 +312,11 @@ static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
{ {
if (use_xsave()) { if (use_xsave()) {
if ((unsigned long)buf % 64 || fx_only) { if ((unsigned long)buf % 64 || fx_only) {
u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE; u64 init_bv = xfeatures_mask & ~XSTATE_FPSSE;
xrstor_state(init_xstate_buf, init_bv); xrstor_state(init_xstate_buf, init_bv);
return fxrstor_user(buf); return fxrstor_user(buf);
} else { } else {
u64 init_bv = pcntxt_mask & ~xbv; u64 init_bv = xfeatures_mask & ~xbv;
if (unlikely(init_bv)) if (unlikely(init_bv))
xrstor_state(init_xstate_buf, init_bv); xrstor_state(init_xstate_buf, init_bv);
return xrestore_user(buf, xbv); return xrestore_user(buf, xbv);
@ -439,7 +439,7 @@ static void prepare_fx_sw_frame(void)
fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1; fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1;
fx_sw_reserved.extended_size = size; fx_sw_reserved.extended_size = size;
fx_sw_reserved.xstate_bv = pcntxt_mask; fx_sw_reserved.xstate_bv = xfeatures_mask;
fx_sw_reserved.xstate_size = xstate_size; fx_sw_reserved.xstate_size = xstate_size;
if (config_enabled(CONFIG_IA32_EMULATION)) { if (config_enabled(CONFIG_IA32_EMULATION)) {
@ -454,7 +454,7 @@ static void prepare_fx_sw_frame(void)
static inline void xstate_enable(void) static inline void xstate_enable(void)
{ {
cr4_set_bits(X86_CR4_OSXSAVE); cr4_set_bits(X86_CR4_OSXSAVE);
xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask); xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures_mask);
} }
/* /*
@ -465,7 +465,7 @@ static void __init setup_xstate_features(void)
{ {
int eax, ebx, ecx, edx, leaf = 0x2; int eax, ebx, ecx, edx, leaf = 0x2;
xstate_features = fls64(pcntxt_mask); xstate_features = fls64(xfeatures_mask);
xstate_offsets = alloc_bootmem(xstate_features * sizeof(int)); xstate_offsets = alloc_bootmem(xstate_features * sizeof(int));
xstate_sizes = alloc_bootmem(xstate_features * sizeof(int)); xstate_sizes = alloc_bootmem(xstate_features * sizeof(int));
@ -484,7 +484,7 @@ static void __init setup_xstate_features(void)
static void print_xstate_feature(u64 xstate_mask, const char *desc) static void print_xstate_feature(u64 xstate_mask, const char *desc)
{ {
if (pcntxt_mask & xstate_mask) { if (xfeatures_mask & xstate_mask) {
int xstate_feature = fls64(xstate_mask)-1; int xstate_feature = fls64(xstate_mask)-1;
pr_info("x86/fpu: Supporting XSAVE feature %2d: '%s'\n", xstate_feature, desc); pr_info("x86/fpu: Supporting XSAVE feature %2d: '%s'\n", xstate_feature, desc);
@ -516,7 +516,7 @@ static void print_xstate_features(void)
*/ */
void setup_xstate_comp(void) void setup_xstate_comp(void)
{ {
unsigned int xstate_comp_sizes[sizeof(pcntxt_mask)*8]; unsigned int xstate_comp_sizes[sizeof(xfeatures_mask)*8];
int i; int i;
/* /*
@ -529,7 +529,7 @@ void setup_xstate_comp(void)
if (!cpu_has_xsaves) { if (!cpu_has_xsaves) {
for (i = 2; i < xstate_features; i++) { for (i = 2; i < xstate_features; i++) {
if (test_bit(i, (unsigned long *)&pcntxt_mask)) { if (test_bit(i, (unsigned long *)&xfeatures_mask)) {
xstate_comp_offsets[i] = xstate_offsets[i]; xstate_comp_offsets[i] = xstate_offsets[i];
xstate_comp_sizes[i] = xstate_sizes[i]; xstate_comp_sizes[i] = xstate_sizes[i];
} }
@ -540,7 +540,7 @@ void setup_xstate_comp(void)
xstate_comp_offsets[2] = FXSAVE_SIZE + XSAVE_HDR_SIZE; xstate_comp_offsets[2] = FXSAVE_SIZE + XSAVE_HDR_SIZE;
for (i = 2; i < xstate_features; i++) { for (i = 2; i < xstate_features; i++) {
if (test_bit(i, (unsigned long *)&pcntxt_mask)) if (test_bit(i, (unsigned long *)&xfeatures_mask))
xstate_comp_sizes[i] = xstate_sizes[i]; xstate_comp_sizes[i] = xstate_sizes[i];
else else
xstate_comp_sizes[i] = 0; xstate_comp_sizes[i] = 0;
@ -573,8 +573,8 @@ static void __init setup_init_fpu_buf(void)
if (cpu_has_xsaves) { if (cpu_has_xsaves) {
init_xstate_buf->xsave_hdr.xcomp_bv = init_xstate_buf->xsave_hdr.xcomp_bv =
(u64)1 << 63 | pcntxt_mask; (u64)1 << 63 | xfeatures_mask;
init_xstate_buf->xsave_hdr.xstate_bv = pcntxt_mask; init_xstate_buf->xsave_hdr.xstate_bv = xfeatures_mask;
} }
/* /*
@ -604,7 +604,7 @@ __setup("eagerfpu=", eager_fpu_setup);
/* /*
* Calculate total size of enabled xstates in XCR0/pcntxt_mask. * Calculate total size of enabled xstates in XCR0/xfeatures_mask.
*/ */
static void __init init_xstate_size(void) static void __init init_xstate_size(void)
{ {
@ -619,7 +619,7 @@ static void __init init_xstate_size(void)
xstate_size = FXSAVE_SIZE + XSAVE_HDR_SIZE; xstate_size = FXSAVE_SIZE + XSAVE_HDR_SIZE;
for (i = 2; i < 64; i++) { for (i = 2; i < 64; i++) {
if (test_bit(i, (unsigned long *)&pcntxt_mask)) { if (test_bit(i, (unsigned long *)&xfeatures_mask)) {
cpuid_count(XSTATE_CPUID, i, &eax, &ebx, &ecx, &edx); cpuid_count(XSTATE_CPUID, i, &eax, &ebx, &ecx, &edx);
xstate_size += eax; xstate_size += eax;
} }
@ -642,17 +642,17 @@ static void /* __init */ xstate_enable_boot_cpu(void)
} }
cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx); cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
pcntxt_mask = eax + ((u64)edx << 32); xfeatures_mask = eax + ((u64)edx << 32);
if ((pcntxt_mask & XSTATE_FPSSE) != XSTATE_FPSSE) { if ((xfeatures_mask & XSTATE_FPSSE) != XSTATE_FPSSE) {
pr_err("x86/fpu: FP/SSE not present amongst the CPU's xstate features: 0x%llx.\n", pcntxt_mask); pr_err("x86/fpu: FP/SSE not present amongst the CPU's xstate features: 0x%llx.\n", xfeatures_mask);
BUG(); BUG();
} }
/* /*
* Support only the state known to OS. * Support only the state known to OS.
*/ */
pcntxt_mask = pcntxt_mask & XCNTXT_MASK; xfeatures_mask = xfeatures_mask & XCNTXT_MASK;
xstate_enable(); xstate_enable();
@ -661,7 +661,7 @@ static void /* __init */ xstate_enable_boot_cpu(void)
*/ */
init_xstate_size(); init_xstate_size();
update_regset_xstate_info(xstate_size, pcntxt_mask); update_regset_xstate_info(xstate_size, xfeatures_mask);
prepare_fx_sw_frame(); prepare_fx_sw_frame();
setup_init_fpu_buf(); setup_init_fpu_buf();
@ -669,18 +669,18 @@ static void /* __init */ xstate_enable_boot_cpu(void)
if (cpu_has_xsaveopt && eagerfpu != DISABLE) if (cpu_has_xsaveopt && eagerfpu != DISABLE)
eagerfpu = ENABLE; eagerfpu = ENABLE;
if (pcntxt_mask & XSTATE_EAGER) { if (xfeatures_mask & XSTATE_EAGER) {
if (eagerfpu == DISABLE) { if (eagerfpu == DISABLE) {
pr_err("x86/fpu: eagerfpu switching disabled, disabling the following xstate features: 0x%llx.\n", pr_err("x86/fpu: eagerfpu switching disabled, disabling the following xstate features: 0x%llx.\n",
pcntxt_mask & XSTATE_EAGER); xfeatures_mask & XSTATE_EAGER);
pcntxt_mask &= ~XSTATE_EAGER; xfeatures_mask &= ~XSTATE_EAGER;
} else { } else {
eagerfpu = ENABLE; eagerfpu = ENABLE;
} }
} }
pr_info("x86/fpu: Enabled xstate features 0x%llx, context size is 0x%x bytes, using '%s' format.\n", pr_info("x86/fpu: Enabled xstate features 0x%llx, context size is 0x%x bytes, using '%s' format.\n",
pcntxt_mask, xfeatures_mask,
xstate_size, xstate_size,
cpu_has_xsaves ? "compacted" : "standard"); cpu_has_xsaves ? "compacted" : "standard");
} }
@ -749,7 +749,7 @@ void __init_refok eager_fpu_init(void)
void *get_xsave_addr(struct xsave_struct *xsave, int xstate) void *get_xsave_addr(struct xsave_struct *xsave, int xstate)
{ {
int feature = fls64(xstate) - 1; int feature = fls64(xstate) - 1;
if (!test_bit(feature, (unsigned long *)&pcntxt_mask)) if (!test_bit(feature, (unsigned long *)&xfeatures_mask))
return NULL; return NULL;
return (void *)xsave + xstate_comp_offsets[feature]; return (void *)xsave + xstate_comp_offsets[feature];

View File

@ -21,7 +21,7 @@
#include <asm/xcr.h> #include <asm/xcr.h>
#include <asm/suspend.h> #include <asm/suspend.h>
#include <asm/debugreg.h> #include <asm/debugreg.h>
#include <asm/fpu/internal.h> /* pcntxt_mask */ #include <asm/fpu/internal.h> /* xfeatures_mask */
#include <asm/cpu.h> #include <asm/cpu.h>
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
@ -225,7 +225,7 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
* restore XCR0 for xsave capable cpu's. * restore XCR0 for xsave capable cpu's.
*/ */
if (cpu_has_xsave) if (cpu_has_xsave)
xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask); xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures_mask);
fix_processor_context(); fix_processor_context();