mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-17 01:34:00 +08:00
powerpc fixes for 4.15 #2
A small batch of fixes, about 50% tagged for stable and the rest for recently merged code. There's one more fix for the >128T handling on hash. Once a process had requested a single mmap above 128T we would then always search above 128T. The correct behaviour is to consider the hint address in isolation for each mmap request. Then a couple of fixes for the IMC PMU, a missing EXPORT_SYMBOL in VAS, a fix for STRICT_KERNEL_RWX on 32-bit, and a fix to correctly identify P9 DD2.1 but in code that is currently not used by default. Thanks to: Aneesh Kumar K.V, Christophe Leroy, Madhavan Srinivasan, Sukadev Bhattiprolu. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABAgAGBQJaF/VqAAoJEFHr6jzI4aWA994P/3NNXkSASJHjLrIlQAKXtmx9 lrv1v+6MbPWhyB8Q8LVnnC3Ab2LTHnkccjq2Jw0bP0RQ86HF4mH7Sb7N5Wj0cG+M 5NioikvGE057ncLfxVhesOK0C9Lhc7Zb+zphXZliYP76IGxwbxorJRepeZctVkyO KPMv4eaImdblVn71aoQQSlepON4+/rtiW2yo5u98uCqR+Ttds4J1fiDZ4TNrBYRP Ilh6DmA//CWvN+KsGT+brRd/PjEkxQKHyS8px3lxRl4cwCJucXPCik/Gn9t6OiMw 3S6y1Mu8nrh4z+YepKv6APvl2DEwwXn8w9f85kn+QiE9Qp3Z/wckW9/4LT5FeuKE L8E3dKq2NzJ9oDs/20sVbBvVR7CUvBoyWytsXVkmmlC6sVReTrYAJ1UP9HnNvcF6 be4zYUKusU83uG6saGgchRrPUrD31XKXw8Piv9EoWo1Uz7VgWCkxidclRNocgeDO k5VxYnRd9jPsv2pCzXH2YmuQAypGUh12IPTxEOnSt5uzXSXcamZJBLKp5fAJ/9dl jD6GlRQMX8JpNRJzxOBLly3CmwQBw2ekOuPLXI+M/ilks66AGK8lp4bg5cWwDGNe puzmRJ2mO3dnFlVUHBQ5LyX8ne4yunin1JZB1YQ4xm8yxZbGO2AdypEWMSkPKNPN fkrGPlwQ1JwFheMbHHLj =gv70 -----END PGP SIGNATURE----- Merge tag 'powerpc-4.15-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux Pull powerpc fixes from Michael Ellerman: "A small batch of fixes, about 50% tagged for stable and the rest for recently merged code. There's one more fix for the >128T handling on hash. Once a process had requested a single mmap above 128T we would then always search above 128T. The correct behaviour is to consider the hint address in isolation for each mmap request. Then a couple of fixes for the IMC PMU, a missing EXPORT_SYMBOL in VAS, a fix for STRICT_KERNEL_RWX on 32-bit, and a fix to correctly identify P9 DD2.1 but in code that is currently not used by default. Thanks to: Aneesh Kumar K.V, Christophe Leroy, Madhavan Srinivasan, Sukadev Bhattiprolu" * tag 'powerpc-4.15-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: powerpc/64s: Fix Power9 DD2.1 logic in DT CPU features powerpc/perf: Fix IMC_MAX_PMU macro powerpc/perf: Fix pmu_count to count only nest imc pmus powerpc: Fix boot on BOOK3S_32 with CONFIG_STRICT_KERNEL_RWX powerpc/perf/imc: Use cpu_to_node() not topology_physical_package_id() powerpc/vas: Export chip_to_vas_id() powerpc/64s/slice: Use addr limit when computing slice mask
This commit is contained in:
commit
83ada03196
@ -20,11 +20,6 @@
|
||||
#include <linux/io.h>
|
||||
#include <asm/opal.h>
|
||||
|
||||
/*
|
||||
* For static allocation of some of the structures.
|
||||
*/
|
||||
#define IMC_MAX_PMUS 32
|
||||
|
||||
/*
|
||||
* Compatibility macros for IMC devices
|
||||
*/
|
||||
@ -125,4 +120,5 @@ enum {
|
||||
extern int init_imc_pmu(struct device_node *parent,
|
||||
struct imc_pmu *pmu_ptr, int pmu_id);
|
||||
extern void thread_imc_disable(void);
|
||||
extern int get_max_nest_dev(void);
|
||||
#endif /* __ASM_POWERPC_IMC_PMU_H */
|
||||
|
@ -735,8 +735,8 @@ static __init void cpufeatures_cpu_quirks(void)
|
||||
*/
|
||||
if ((version & 0xffffff00) == 0x004e0100)
|
||||
cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD1;
|
||||
else if ((version & 0xffffefff) == 0x004e0200)
|
||||
cur_cpu_spec->cpu_features &= ~CPU_FTR_POWER9_DD2_1;
|
||||
else if ((version & 0xffffefff) == 0x004e0201)
|
||||
cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
|
||||
}
|
||||
|
||||
static void __init cpufeatures_setup_finished(void)
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/setup.h>
|
||||
|
||||
static int __patch_instruction(unsigned int *addr, unsigned int instr)
|
||||
{
|
||||
@ -146,11 +147,8 @@ int patch_instruction(unsigned int *addr, unsigned int instr)
|
||||
* During early early boot patch_instruction is called
|
||||
* when text_poke_area is not ready, but we still need
|
||||
* to allow patching. We just do the plain old patching
|
||||
* We use slab_is_available and per cpu read * via this_cpu_read
|
||||
* of text_poke_area. Per-CPU areas might not be up early
|
||||
* this can create problems with just using this_cpu_read()
|
||||
*/
|
||||
if (!slab_is_available() || !this_cpu_read(text_poke_area))
|
||||
if (!this_cpu_read(*PTRRELOC(&text_poke_area)))
|
||||
return __patch_instruction(addr, instr);
|
||||
|
||||
local_irq_save(flags);
|
||||
|
@ -122,7 +122,8 @@ static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
|
||||
return !slice_area_is_free(mm, start, end - start);
|
||||
}
|
||||
|
||||
static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret)
|
||||
static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret,
|
||||
unsigned long high_limit)
|
||||
{
|
||||
unsigned long i;
|
||||
|
||||
@ -133,15 +134,16 @@ static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret)
|
||||
if (!slice_low_has_vma(mm, i))
|
||||
ret->low_slices |= 1u << i;
|
||||
|
||||
if (mm->context.slb_addr_limit <= SLICE_LOW_TOP)
|
||||
if (high_limit <= SLICE_LOW_TOP)
|
||||
return;
|
||||
|
||||
for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); i++)
|
||||
for (i = 0; i < GET_HIGH_SLICE_INDEX(high_limit); i++)
|
||||
if (!slice_high_has_vma(mm, i))
|
||||
__set_bit(i, ret->high_slices);
|
||||
}
|
||||
|
||||
static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_mask *ret)
|
||||
static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_mask *ret,
|
||||
unsigned long high_limit)
|
||||
{
|
||||
unsigned char *hpsizes;
|
||||
int index, mask_index;
|
||||
@ -156,8 +158,11 @@ static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_ma
|
||||
if (((lpsizes >> (i * 4)) & 0xf) == psize)
|
||||
ret->low_slices |= 1u << i;
|
||||
|
||||
if (high_limit <= SLICE_LOW_TOP)
|
||||
return;
|
||||
|
||||
hpsizes = mm->context.high_slices_psize;
|
||||
for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); i++) {
|
||||
for (i = 0; i < GET_HIGH_SLICE_INDEX(high_limit); i++) {
|
||||
mask_index = i & 0x1;
|
||||
index = i >> 1;
|
||||
if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == psize)
|
||||
@ -169,6 +174,10 @@ static int slice_check_fit(struct mm_struct *mm,
|
||||
struct slice_mask mask, struct slice_mask available)
|
||||
{
|
||||
DECLARE_BITMAP(result, SLICE_NUM_HIGH);
|
||||
/*
|
||||
* Make sure we just do bit compare only to the max
|
||||
* addr limit and not the full bit map size.
|
||||
*/
|
||||
unsigned long slice_count = GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit);
|
||||
|
||||
bitmap_and(result, mask.high_slices,
|
||||
@ -472,7 +481,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
|
||||
/* First make up a "good" mask of slices that have the right size
|
||||
* already
|
||||
*/
|
||||
slice_mask_for_size(mm, psize, &good_mask);
|
||||
slice_mask_for_size(mm, psize, &good_mask, high_limit);
|
||||
slice_print_mask(" good_mask", good_mask);
|
||||
|
||||
/*
|
||||
@ -497,7 +506,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
|
||||
#ifdef CONFIG_PPC_64K_PAGES
|
||||
/* If we support combo pages, we can allow 64k pages in 4k slices */
|
||||
if (psize == MMU_PAGE_64K) {
|
||||
slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask);
|
||||
slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask, high_limit);
|
||||
if (fixed)
|
||||
slice_or_mask(&good_mask, &compat_mask);
|
||||
}
|
||||
@ -530,11 +539,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
|
||||
return newaddr;
|
||||
}
|
||||
}
|
||||
|
||||
/* We don't fit in the good mask, check what other slices are
|
||||
/*
|
||||
* We don't fit in the good mask, check what other slices are
|
||||
* empty and thus can be converted
|
||||
*/
|
||||
slice_mask_for_free(mm, &potential_mask);
|
||||
slice_mask_for_free(mm, &potential_mask, high_limit);
|
||||
slice_or_mask(&potential_mask, &good_mask);
|
||||
slice_print_mask(" potential", potential_mask);
|
||||
|
||||
@ -744,17 +753,18 @@ int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
|
||||
{
|
||||
struct slice_mask mask, available;
|
||||
unsigned int psize = mm->context.user_psize;
|
||||
unsigned long high_limit = mm->context.slb_addr_limit;
|
||||
|
||||
if (radix_enabled())
|
||||
return 0;
|
||||
|
||||
slice_range_to_mask(addr, len, &mask);
|
||||
slice_mask_for_size(mm, psize, &available);
|
||||
slice_mask_for_size(mm, psize, &available, high_limit);
|
||||
#ifdef CONFIG_PPC_64K_PAGES
|
||||
/* We need to account for 4k slices too */
|
||||
if (psize == MMU_PAGE_64K) {
|
||||
struct slice_mask compat_mask;
|
||||
slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask);
|
||||
slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask, high_limit);
|
||||
slice_or_mask(&available, &compat_mask);
|
||||
}
|
||||
#endif
|
||||
|
@ -26,7 +26,7 @@
|
||||
*/
|
||||
static DEFINE_MUTEX(nest_init_lock);
|
||||
static DEFINE_PER_CPU(struct imc_pmu_ref *, local_nest_imc_refc);
|
||||
static struct imc_pmu *per_nest_pmu_arr[IMC_MAX_PMUS];
|
||||
static struct imc_pmu **per_nest_pmu_arr;
|
||||
static cpumask_t nest_imc_cpumask;
|
||||
struct imc_pmu_ref *nest_imc_refc;
|
||||
static int nest_pmus;
|
||||
@ -286,13 +286,14 @@ static struct imc_pmu_ref *get_nest_pmu_ref(int cpu)
|
||||
static void nest_change_cpu_context(int old_cpu, int new_cpu)
|
||||
{
|
||||
struct imc_pmu **pn = per_nest_pmu_arr;
|
||||
int i;
|
||||
|
||||
if (old_cpu < 0 || new_cpu < 0)
|
||||
return;
|
||||
|
||||
for (i = 0; *pn && i < IMC_MAX_PMUS; i++, pn++)
|
||||
while (*pn) {
|
||||
perf_pmu_migrate_context(&(*pn)->pmu, old_cpu, new_cpu);
|
||||
pn++;
|
||||
}
|
||||
}
|
||||
|
||||
static int ppc_nest_imc_cpu_offline(unsigned int cpu)
|
||||
@ -467,7 +468,7 @@ static int nest_imc_event_init(struct perf_event *event)
|
||||
* Nest HW counter memory resides in a per-chip reserve-memory (HOMER).
|
||||
* Get the base memory addresss for this cpu.
|
||||
*/
|
||||
chip_id = topology_physical_package_id(event->cpu);
|
||||
chip_id = cpu_to_chip_id(event->cpu);
|
||||
pcni = pmu->mem_info;
|
||||
do {
|
||||
if (pcni->id == chip_id) {
|
||||
@ -524,19 +525,19 @@ static int nest_imc_event_init(struct perf_event *event)
|
||||
*/
|
||||
static int core_imc_mem_init(int cpu, int size)
|
||||
{
|
||||
int phys_id, rc = 0, core_id = (cpu / threads_per_core);
|
||||
int nid, rc = 0, core_id = (cpu / threads_per_core);
|
||||
struct imc_mem_info *mem_info;
|
||||
|
||||
/*
|
||||
* alloc_pages_node() will allocate memory for core in the
|
||||
* local node only.
|
||||
*/
|
||||
phys_id = topology_physical_package_id(cpu);
|
||||
nid = cpu_to_node(cpu);
|
||||
mem_info = &core_imc_pmu->mem_info[core_id];
|
||||
mem_info->id = core_id;
|
||||
|
||||
/* We need only vbase for core counters */
|
||||
mem_info->vbase = page_address(alloc_pages_node(phys_id,
|
||||
mem_info->vbase = page_address(alloc_pages_node(nid,
|
||||
GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
|
||||
__GFP_NOWARN, get_order(size)));
|
||||
if (!mem_info->vbase)
|
||||
@ -797,14 +798,14 @@ static int core_imc_event_init(struct perf_event *event)
|
||||
static int thread_imc_mem_alloc(int cpu_id, int size)
|
||||
{
|
||||
u64 ldbar_value, *local_mem = per_cpu(thread_imc_mem, cpu_id);
|
||||
int phys_id = topology_physical_package_id(cpu_id);
|
||||
int nid = cpu_to_node(cpu_id);
|
||||
|
||||
if (!local_mem) {
|
||||
/*
|
||||
* This case could happen only once at start, since we dont
|
||||
* free the memory in cpu offline path.
|
||||
*/
|
||||
local_mem = page_address(alloc_pages_node(phys_id,
|
||||
local_mem = page_address(alloc_pages_node(nid,
|
||||
GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
|
||||
__GFP_NOWARN, get_order(size)));
|
||||
if (!local_mem)
|
||||
@ -1194,6 +1195,7 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
|
||||
kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs);
|
||||
kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]);
|
||||
kfree(pmu_ptr);
|
||||
kfree(per_nest_pmu_arr);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1218,6 +1220,13 @@ static int imc_mem_init(struct imc_pmu *pmu_ptr, struct device_node *parent,
|
||||
return -ENOMEM;
|
||||
|
||||
/* Needed for hotplug/migration */
|
||||
if (!per_nest_pmu_arr) {
|
||||
per_nest_pmu_arr = kcalloc(get_max_nest_dev() + 1,
|
||||
sizeof(struct imc_pmu *),
|
||||
GFP_KERNEL);
|
||||
if (!per_nest_pmu_arr)
|
||||
return -ENOMEM;
|
||||
}
|
||||
per_nest_pmu_arr[pmu_index] = pmu_ptr;
|
||||
break;
|
||||
case IMC_DOMAIN_CORE:
|
||||
|
@ -153,6 +153,22 @@ static void disable_core_pmu_counters(void)
|
||||
put_online_cpus();
|
||||
}
|
||||
|
||||
int get_max_nest_dev(void)
|
||||
{
|
||||
struct device_node *node;
|
||||
u32 pmu_units = 0, type;
|
||||
|
||||
for_each_compatible_node(node, NULL, IMC_DTB_UNIT_COMPAT) {
|
||||
if (of_property_read_u32(node, "type", &type))
|
||||
continue;
|
||||
|
||||
if (type == IMC_TYPE_CHIP)
|
||||
pmu_units++;
|
||||
}
|
||||
|
||||
return pmu_units;
|
||||
}
|
||||
|
||||
static int opal_imc_counters_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device_node *imc_dev = pdev->dev.of_node;
|
||||
@ -191,8 +207,10 @@ static int opal_imc_counters_probe(struct platform_device *pdev)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!imc_pmu_create(imc_dev, pmu_count, domain))
|
||||
pmu_count++;
|
||||
if (!imc_pmu_create(imc_dev, pmu_count, domain)) {
|
||||
if (domain == IMC_DOMAIN_NEST)
|
||||
pmu_count++;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -135,6 +135,7 @@ int chip_to_vas_id(int chipid)
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
EXPORT_SYMBOL(chip_to_vas_id);
|
||||
|
||||
static int vas_probe(struct platform_device *pdev)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user