powerpc/mm: Convert early cpu/mmu feature check to use the new helpers

This switches early feature checks to use the non static key variant of
the function. In later patches we will be switching cpu_has_feature()
and mmu_has_feature() to use static keys and we can use them only after
static key/jump label is initialized. Any check for feature before jump
label init should be done using this new helper.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
Aneesh Kumar K.V 2016-07-23 14:42:35 +05:30 committed by Michael Ellerman
parent a141cca389
commit b8f1b4f860
5 changed files with 6 additions and 6 deletions

View File

@ -128,7 +128,7 @@ extern void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base, static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base,
phys_addr_t first_memblock_size) phys_addr_t first_memblock_size)
{ {
if (radix_enabled()) if (early_radix_enabled())
return radix__setup_initial_memory_limit(first_memblock_base, return radix__setup_initial_memory_limit(first_memblock_base,
first_memblock_size); first_memblock_size);
return hash__setup_initial_memory_limit(first_memblock_base, return hash__setup_initial_memory_limit(first_memblock_base,

View File

@ -184,7 +184,7 @@ void setup_paca(struct paca_struct *new_paca)
* if we do a GET_PACA() before the feature fixups have been * if we do a GET_PACA() before the feature fixups have been
* applied * applied
*/ */
if (cpu_has_feature(CPU_FTR_HVMODE)) if (early_cpu_has_feature(CPU_FTR_HVMODE))
mtspr(SPRN_SPRG_HPACA, local_paca); mtspr(SPRN_SPRG_HPACA, local_paca);
#endif #endif
mtspr(SPRN_SPRG_PACA, local_paca); mtspr(SPRN_SPRG_PACA, local_paca);

View File

@ -227,8 +227,8 @@ static void __init configure_exceptions(void)
opal_configure_cores(); opal_configure_cores();
/* Enable AIL if supported, and we are in hypervisor mode */ /* Enable AIL if supported, and we are in hypervisor mode */
if (cpu_has_feature(CPU_FTR_HVMODE) && if (early_cpu_has_feature(CPU_FTR_HVMODE) &&
cpu_has_feature(CPU_FTR_ARCH_207S)) { early_cpu_has_feature(CPU_FTR_ARCH_207S)) {
unsigned long lpcr = mfspr(SPRN_LPCR); unsigned long lpcr = mfspr(SPRN_LPCR);
mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3); mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
} }

View File

@ -549,7 +549,7 @@ static void __init htab_scan_page_sizes(void)
* Try to find the available page sizes in the device-tree * Try to find the available page sizes in the device-tree
*/ */
rc = of_scan_flat_dt(htab_dt_scan_page_sizes, NULL); rc = of_scan_flat_dt(htab_dt_scan_page_sizes, NULL);
if (rc == 0 && mmu_has_feature(MMU_FTR_16M_PAGE)) { if (rc == 0 && early_mmu_has_feature(MMU_FTR_16M_PAGE)) {
/* /*
* Nothing in the device-tree, but the CPU supports 16M pages, * Nothing in the device-tree, but the CPU supports 16M pages,
* so let's fallback on a known size list for 16M capable CPUs. * so let's fallback on a known size list for 16M capable CPUs.

View File

@ -427,7 +427,7 @@ void __init mmu_early_init_devtree(void)
if (disable_radix) if (disable_radix)
cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
if (radix_enabled()) if (early_radix_enabled())
radix__early_init_devtree(); radix__early_init_devtree();
else else
hash__early_init_devtree(); hash__early_init_devtree();