2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-21 11:44:01 +08:00

x86/cpufeature: Remove cpu_has_pse

Signed-off-by: Borislav Petkov <bp@suse.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1459266123-21878-11-git-send-email-bp@alien8.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Borislav Petkov 2016-03-29 17:42:03 +02:00 committed by Ingo Molnar
parent c109bf9599
commit 16bf92261b
8 changed files with 9 additions and 10 deletions

View File

@ -119,7 +119,6 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
} while (0)
#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU)
#define cpu_has_pse boot_cpu_has(X86_FEATURE_PSE)
#define cpu_has_tsc boot_cpu_has(X86_FEATURE_TSC)
#define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC)
#define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR)

View File

@ -183,7 +183,7 @@ static inline int pmd_trans_huge(pmd_t pmd)
static inline int has_transparent_hugepage(void)
{
return cpu_has_pse;
return boot_cpu_has(X86_FEATURE_PSE);
}
#ifdef __HAVE_ARCH_PTE_DEVMAP

View File

@ -157,12 +157,12 @@ static void __init probe_page_size_mask(void)
* This will simplify cpa(), which otherwise needs to support splitting
* large pages into small in interrupt context, etc.
*/
if (cpu_has_pse && !debug_pagealloc_enabled())
if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled())
page_size_mask |= 1 << PG_LEVEL_2M;
#endif
/* Enable PSE if available */
if (cpu_has_pse)
if (boot_cpu_has(X86_FEATURE_PSE))
cr4_set_bits_and_update_boot(X86_CR4_PSE);
/* Enable PGE if available */

View File

@ -284,7 +284,7 @@ kernel_physical_mapping_init(unsigned long start,
*/
mapping_iter = 1;
if (!cpu_has_pse)
if (!boot_cpu_has(X86_FEATURE_PSE))
use_pse = 0;
repeat:

View File

@ -1295,7 +1295,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
struct vmem_altmap *altmap = to_vmem_altmap(start);
int err;
if (cpu_has_pse)
if (boot_cpu_has(X86_FEATURE_PSE))
err = vmemmap_populate_hugepages(start, end, node, altmap);
else if (altmap) {
pr_err_once("%s: no cpu support for altmap allocations\n",
@ -1338,7 +1338,7 @@ void register_page_bootmem_memmap(unsigned long section_nr,
}
get_page_bootmem(section_nr, pud_page(*pud), MIX_SECTION_INFO);
if (!cpu_has_pse) {
if (!boot_cpu_has(X86_FEATURE_PSE)) {
next = (addr + PAGE_SIZE) & PAGE_MASK;
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd))

View File

@ -386,7 +386,7 @@ int __init arch_ioremap_pud_supported(void)
int __init arch_ioremap_pmd_supported(void)
{
return cpu_has_pse;
return boot_cpu_has(X86_FEATURE_PSE);
}
/*

View File

@ -106,7 +106,7 @@ static int resume_physical_mapping_init(pgd_t *pgd_base)
* normal page tables.
* NOTE: We can mark everything as executable here
*/
if (cpu_has_pse) {
if (boot_cpu_has(X86_FEATURE_PSE)) {
set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
pfn += PTRS_PER_PTE;
} else {

View File

@ -1469,7 +1469,7 @@ static void xen_pvh_set_cr_flags(int cpu)
* For BSP, PSE PGE are set in probe_page_size_mask(), for APs
* set them here. For all, OSFXSR OSXMMEXCPT are set in fpu__init_cpu().
*/
if (cpu_has_pse)
if (boot_cpu_has(X86_FEATURE_PSE))
cr4_set_bits_and_update_boot(X86_CR4_PSE);
if (boot_cpu_has(X86_FEATURE_PGE))