mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-05 18:14:07 +08:00
dfd437a257
- arm64 support for syscall emulation via PTRACE_SYSEMU{,_SINGLESTEP} - Wire up VM_FLUSH_RESET_PERMS for arm64, allowing the core code to manage the permissions of executable vmalloc regions more strictly - Slight performance improvement by keeping softirqs enabled while touching the FPSIMD/SVE state (kernel_neon_begin/end) - Expose a couple of ARMv8.5 features to user (HWCAP): CondM (new XAFLAG and AXFLAG instructions for floating point comparison flags manipulation) and FRINT (rounding floating point numbers to integers) - Re-instate ARM64_PSEUDO_NMI support which was previously marked as BROKEN due to some bugs (now fixed) - Improve parking of stopped CPUs and implement an arm64-specific panic_smp_self_stop() to avoid warning on not being able to stop secondary CPUs during panic - perf: enable the ARM Statistical Profiling Extensions (SPE) on ACPI platforms - perf: DDR performance monitor support for iMX8QXP - cache_line_size() can now be set from DT or ACPI/PPTT if provided to cope with a system cache info not exposed via the CPUID registers - Avoid warning on hardware cache line size greater than ARCH_DMA_MINALIGN if the system is fully coherent - arm64 do_page_fault() and hugetlb cleanups - Refactor set_pte_at() to avoid redundant READ_ONCE(*ptep) - Ignore ACPI 5.1 FADTs reported as 5.0 (infer from the 'arm_boot_flags' introduced in 5.1) - CONFIG_RANDOMIZE_BASE now enabled in defconfig - Allow the selection of ARM64_MODULE_PLTS, currently only done via RANDOMIZE_BASE (and an erratum workaround), allowing modules to spill over into the vmalloc area - Make ZONE_DMA32 configurable -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEE5RElWfyWxS+3PLO2a9axLQDIXvEFAl0eHqcACgkQa9axLQDI XvFyNA/+L+bnkz8m3ncydlqqfXomQn4eJJVQ8Uksb0knJz+1+3CUxxbO4ry4jXZN fMkbggYrDPRKpDbsUl0lsRipj7jW9bqan+N37c3SWqCkgb6HqDaHViwxdx6Ec/Uk gHudozDSPh/8c7hxGcSyt/CFyuW6b+8eYIQU5rtIgz8aVY2BypBvS/7YtYCbIkx0 w4CFleRTK1zXD5mJQhrc6jyDx659sVkrAvdhf6YIymOY8nBTv40vwdNo3beJMYp8 Po/+0Ixu+VkHUNtmYYZQgP/AGH96xiTcRnUqd172JdtRPpCLqnLqwFokXeVIlUKT KZFMDPzK+756Ayn4z4huEePPAOGlHbJje8JVNnFyreKhVVcCotW7YPY/oJR10bnc eo7yD+DxABTn+93G2yP436bNVa8qO1UqjOBfInWBtnNFJfANIkZweij/MQ6MjaTA o7KtviHnZFClefMPoiI7HDzwL8XSmsBDbeQ04s2Wxku1Y2xUHLx4iLmadwLQ1ZPb lZMTZP3N/T1554MoURVA1afCjAwiqU3bt1xDUGjbBVjLfSPBAn/25IacsG9Li9AF 7Rp1M9VhrfLftjFFkB2HwpbhRASOxaOSx+EI3kzEfCtM2O9I1WHgP3rvCdc3l0HU tbK0/IggQicNgz7GSZ8xDlWPwwSadXYGLys+xlMZEYd3pDIOiFc= =0TDT -----END PGP SIGNATURE----- Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux Pull arm64 updates from Catalin Marinas: - arm64 support for syscall emulation via PTRACE_SYSEMU{,_SINGLESTEP} - Wire up VM_FLUSH_RESET_PERMS for arm64, allowing the core code to manage the permissions of executable vmalloc regions more strictly - Slight performance improvement by keeping softirqs enabled while touching the FPSIMD/SVE state (kernel_neon_begin/end) - Expose a couple of ARMv8.5 features to user (HWCAP): CondM (new XAFLAG and AXFLAG instructions for floating point comparison flags manipulation) and FRINT (rounding floating point numbers to integers) - Re-instate ARM64_PSEUDO_NMI support which was previously marked as BROKEN due to some bugs (now fixed) - Improve parking of stopped CPUs and implement an arm64-specific panic_smp_self_stop() to avoid warning on not being able to stop secondary CPUs during panic - perf: enable the ARM Statistical Profiling Extensions (SPE) on ACPI platforms - perf: DDR performance monitor support for iMX8QXP - cache_line_size() can now be set from DT or ACPI/PPTT if provided to cope with a system cache info not exposed via the CPUID registers - Avoid warning on hardware cache line size greater than ARCH_DMA_MINALIGN if the system is fully coherent - arm64 do_page_fault() and hugetlb cleanups - Refactor set_pte_at() to avoid redundant READ_ONCE(*ptep) - Ignore ACPI 5.1 FADTs reported as 5.0 (infer from the 'arm_boot_flags' introduced in 5.1) - CONFIG_RANDOMIZE_BASE now enabled in defconfig - Allow the selection of ARM64_MODULE_PLTS, currently only done via RANDOMIZE_BASE (and an erratum workaround), allowing modules to spill over into the vmalloc area - Make ZONE_DMA32 configurable * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (54 commits) perf: arm_spe: Enable ACPI/Platform automatic module loading arm_pmu: acpi: spe: Add initial MADT/SPE probing ACPI/PPTT: Add function to return ACPI 6.3 Identical tokens ACPI/PPTT: Modify node flag detection to find last IDENTICAL x86/entry: Simplify _TIF_SYSCALL_EMU handling arm64: rename dump_instr as dump_kernel_instr arm64/mm: Drop [PTE|PMD]_TYPE_FAULT arm64: Implement panic_smp_self_stop() arm64: Improve parking of stopped CPUs arm64: Expose FRINT capabilities to userspace arm64: Expose ARMv8.5 CondM capability to userspace arm64: defconfig: enable CONFIG_RANDOMIZE_BASE arm64: ARM64_MODULES_PLTS must depend on MODULES arm64: bpf: do not allocate executable memory arm64/kprobes: set VM_FLUSH_RESET_PERMS on kprobe instruction pages arm64/mm: wire up CONFIG_ARCH_HAS_SET_DIRECT_MAP arm64: module: create module allocations without exec permissions arm64: Allow user selection of ARM64_MODULE_PLTS acpi/arm64: ignore 5.1 FADTs that are reported as 5.0 arm64: Allow selecting Pseudo-NMI again ...
103 lines
2.6 KiB
C
103 lines
2.6 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* ARM64 cacheinfo support
|
|
*
|
|
* Copyright (C) 2015 ARM Ltd.
|
|
* All Rights Reserved
|
|
*/
|
|
|
|
#include <linux/acpi.h>
|
|
#include <linux/cacheinfo.h>
|
|
#include <linux/of.h>
|
|
|
|
#define MAX_CACHE_LEVEL 7 /* Max 7 level supported */
|
|
/* Ctypen, bits[3(n - 1) + 2 : 3(n - 1)], for n = 1 to 7 */
|
|
#define CLIDR_CTYPE_SHIFT(level) (3 * (level - 1))
|
|
#define CLIDR_CTYPE_MASK(level) (7 << CLIDR_CTYPE_SHIFT(level))
|
|
#define CLIDR_CTYPE(clidr, level) \
|
|
(((clidr) & CLIDR_CTYPE_MASK(level)) >> CLIDR_CTYPE_SHIFT(level))
|
|
|
|
int cache_line_size(void)
|
|
{
|
|
if (coherency_max_size != 0)
|
|
return coherency_max_size;
|
|
|
|
return cache_line_size_of_cpu();
|
|
}
|
|
EXPORT_SYMBOL_GPL(cache_line_size);
|
|
|
|
static inline enum cache_type get_cache_type(int level)
|
|
{
|
|
u64 clidr;
|
|
|
|
if (level > MAX_CACHE_LEVEL)
|
|
return CACHE_TYPE_NOCACHE;
|
|
clidr = read_sysreg(clidr_el1);
|
|
return CLIDR_CTYPE(clidr, level);
|
|
}
|
|
|
|
static void ci_leaf_init(struct cacheinfo *this_leaf,
|
|
enum cache_type type, unsigned int level)
|
|
{
|
|
this_leaf->level = level;
|
|
this_leaf->type = type;
|
|
}
|
|
|
|
static int __init_cache_level(unsigned int cpu)
|
|
{
|
|
unsigned int ctype, level, leaves, fw_level;
|
|
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
|
|
|
|
for (level = 1, leaves = 0; level <= MAX_CACHE_LEVEL; level++) {
|
|
ctype = get_cache_type(level);
|
|
if (ctype == CACHE_TYPE_NOCACHE) {
|
|
level--;
|
|
break;
|
|
}
|
|
/* Separate instruction and data caches */
|
|
leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1;
|
|
}
|
|
|
|
if (acpi_disabled)
|
|
fw_level = of_find_last_cache_level(cpu);
|
|
else
|
|
fw_level = acpi_find_last_cache_level(cpu);
|
|
|
|
if (level < fw_level) {
|
|
/*
|
|
* some external caches not specified in CLIDR_EL1
|
|
* the information may be available in the device tree
|
|
* only unified external caches are considered here
|
|
*/
|
|
leaves += (fw_level - level);
|
|
level = fw_level;
|
|
}
|
|
|
|
this_cpu_ci->num_levels = level;
|
|
this_cpu_ci->num_leaves = leaves;
|
|
return 0;
|
|
}
|
|
|
|
static int __populate_cache_leaves(unsigned int cpu)
|
|
{
|
|
unsigned int level, idx;
|
|
enum cache_type type;
|
|
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
|
|
struct cacheinfo *this_leaf = this_cpu_ci->info_list;
|
|
|
|
for (idx = 0, level = 1; level <= this_cpu_ci->num_levels &&
|
|
idx < this_cpu_ci->num_leaves; idx++, level++) {
|
|
type = get_cache_type(level);
|
|
if (type == CACHE_TYPE_SEPARATE) {
|
|
ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level);
|
|
ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level);
|
|
} else {
|
|
ci_leaf_init(this_leaf++, type, level);
|
|
}
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
DEFINE_SMP_CALL_CACHE_FUNCTION(init_cache_level)
|
|
DEFINE_SMP_CALL_CACHE_FUNCTION(populate_cache_leaves)
|