mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-19 18:53:52 +08:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile
Pull arch/tile updates from Chris Metcalf: "Another grab-bag of miscellaneous changes" * git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile: tile: use __ro_after_init instead of tile-specific __write_once tile: migrate exception table users off module.h and onto extable.h tile: remove #pragma unroll from finv_buffer_remote() tile-module: Rename jump labels in module_alloc() tile-module: Use kmalloc_array() in module_alloc() tile/pci_gx: fix spelling mistake: "delievered" -> "delivered"
This commit is contained in:
commit
d9cb5bfcc3
@ -50,18 +50,15 @@
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Originally we used small TLB pages for kernel data and grouped some
|
* Originally we used small TLB pages for kernel data and grouped some
|
||||||
* things together as "write once", enforcing the property at the end
|
* things together as ro-after-init, enforcing the property at the end
|
||||||
* of initialization by making those pages read-only and non-coherent.
|
* of initialization by making those pages read-only and non-coherent.
|
||||||
* This allowed better cache utilization since cache inclusion did not
|
* This allowed better cache utilization since cache inclusion did not
|
||||||
* need to be maintained. However, to do this requires an extra TLB
|
* need to be maintained. However, to do this requires an extra TLB
|
||||||
* entry, which on balance is more of a performance hit than the
|
* entry, which on balance is more of a performance hit than the
|
||||||
* non-coherence is a performance gain, so we now just make "read
|
* non-coherence is a performance gain, so we now just make "read
|
||||||
* mostly" and "write once" be synonyms. We keep the attribute
|
* mostly" and "ro-after-init" be synonyms. We keep the attribute
|
||||||
* separate in case we change our minds at a future date.
|
* separate in case we change our minds at a future date.
|
||||||
*/
|
*/
|
||||||
#define __write_once __read_mostly
|
|
||||||
|
|
||||||
/* __ro_after_init is the generic name for the tile arch __write_once. */
|
|
||||||
#define __ro_after_init __read_mostly
|
#define __ro_after_init __read_mostly
|
||||||
|
|
||||||
#endif /* _ASM_TILE_CACHE_H */
|
#endif /* _ASM_TILE_CACHE_H */
|
||||||
|
@ -19,9 +19,6 @@
|
|||||||
|
|
||||||
#include <asm-generic/sections.h>
|
#include <asm-generic/sections.h>
|
||||||
|
|
||||||
/* Write-once data is writable only till the end of initialization. */
|
|
||||||
extern char __w1data_begin[], __w1data_end[];
|
|
||||||
|
|
||||||
extern char vdso_start[], vdso_end[];
|
extern char vdso_start[], vdso_end[];
|
||||||
#ifdef CONFIG_COMPAT
|
#ifdef CONFIG_COMPAT
|
||||||
extern char vdso32_start[], vdso32_end[];
|
extern char vdso32_start[], vdso32_end[];
|
||||||
|
@ -43,29 +43,28 @@ void *module_alloc(unsigned long size)
|
|||||||
int npages;
|
int npages;
|
||||||
|
|
||||||
npages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
|
npages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
|
||||||
pages = kmalloc(npages * sizeof(struct page *), GFP_KERNEL);
|
pages = kmalloc_array(npages, sizeof(*pages), GFP_KERNEL);
|
||||||
if (pages == NULL)
|
if (pages == NULL)
|
||||||
return NULL;
|
return NULL;
|
||||||
for (; i < npages; ++i) {
|
for (; i < npages; ++i) {
|
||||||
pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
|
pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
|
||||||
if (!pages[i])
|
if (!pages[i])
|
||||||
goto error;
|
goto free_pages;
|
||||||
}
|
}
|
||||||
|
|
||||||
area = __get_vm_area(size, VM_ALLOC, MEM_MODULE_START, MEM_MODULE_END);
|
area = __get_vm_area(size, VM_ALLOC, MEM_MODULE_START, MEM_MODULE_END);
|
||||||
if (!area)
|
if (!area)
|
||||||
goto error;
|
goto free_pages;
|
||||||
area->nr_pages = npages;
|
area->nr_pages = npages;
|
||||||
area->pages = pages;
|
area->pages = pages;
|
||||||
|
|
||||||
if (map_vm_area(area, prot_rwx, pages)) {
|
if (map_vm_area(area, prot_rwx, pages)) {
|
||||||
vunmap(area->addr);
|
vunmap(area->addr);
|
||||||
goto error;
|
goto free_pages;
|
||||||
}
|
}
|
||||||
|
|
||||||
return area->addr;
|
return area->addr;
|
||||||
|
free_pages:
|
||||||
error:
|
|
||||||
while (--i >= 0)
|
while (--i >= 0)
|
||||||
__free_page(pages[i]);
|
__free_page(pages[i]);
|
||||||
kfree(pages);
|
kfree(pages);
|
||||||
|
@ -57,7 +57,7 @@ static int pci_probe = 1;
|
|||||||
* This flag tells if the platform is TILEmpower that needs
|
* This flag tells if the platform is TILEmpower that needs
|
||||||
* special configuration for the PLX switch chip.
|
* special configuration for the PLX switch chip.
|
||||||
*/
|
*/
|
||||||
int __write_once tile_plx_gen1;
|
int __ro_after_init tile_plx_gen1;
|
||||||
|
|
||||||
static struct pci_controller controllers[TILE_NUM_PCIE];
|
static struct pci_controller controllers[TILE_NUM_PCIE];
|
||||||
static int num_controllers;
|
static int num_controllers;
|
||||||
|
@ -131,7 +131,7 @@ static int tile_irq_cpu(int irq)
|
|||||||
|
|
||||||
count = cpumask_weight(&intr_cpus_map);
|
count = cpumask_weight(&intr_cpus_map);
|
||||||
if (unlikely(count == 0)) {
|
if (unlikely(count == 0)) {
|
||||||
pr_warn("intr_cpus_map empty, interrupts will be delievered to dataplane tiles\n");
|
pr_warn("intr_cpus_map empty, interrupts will be delivered to dataplane tiles\n");
|
||||||
return irq % (smp_height * smp_width);
|
return irq % (smp_height * smp_width);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -49,7 +49,7 @@
|
|||||||
static inline int ABS(int x) { return x >= 0 ? x : -x; }
|
static inline int ABS(int x) { return x >= 0 ? x : -x; }
|
||||||
|
|
||||||
/* Chip information */
|
/* Chip information */
|
||||||
char chip_model[64] __write_once;
|
char chip_model[64] __ro_after_init;
|
||||||
|
|
||||||
#ifdef CONFIG_VT
|
#ifdef CONFIG_VT
|
||||||
struct screen_info screen_info;
|
struct screen_info screen_info;
|
||||||
@ -97,17 +97,17 @@ int node_controller[MAX_NUMNODES] = { [0 ... MAX_NUMNODES-1] = -1 };
|
|||||||
#ifdef CONFIG_HIGHMEM
|
#ifdef CONFIG_HIGHMEM
|
||||||
/* Map information from VAs to PAs */
|
/* Map information from VAs to PAs */
|
||||||
unsigned long pbase_map[1 << (32 - HPAGE_SHIFT)]
|
unsigned long pbase_map[1 << (32 - HPAGE_SHIFT)]
|
||||||
__write_once __attribute__((aligned(L2_CACHE_BYTES)));
|
__ro_after_init __attribute__((aligned(L2_CACHE_BYTES)));
|
||||||
EXPORT_SYMBOL(pbase_map);
|
EXPORT_SYMBOL(pbase_map);
|
||||||
|
|
||||||
/* Map information from PAs to VAs */
|
/* Map information from PAs to VAs */
|
||||||
void *vbase_map[NR_PA_HIGHBIT_VALUES]
|
void *vbase_map[NR_PA_HIGHBIT_VALUES]
|
||||||
__write_once __attribute__((aligned(L2_CACHE_BYTES)));
|
__ro_after_init __attribute__((aligned(L2_CACHE_BYTES)));
|
||||||
EXPORT_SYMBOL(vbase_map);
|
EXPORT_SYMBOL(vbase_map);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Node number as a function of the high PA bits */
|
/* Node number as a function of the high PA bits */
|
||||||
int highbits_to_node[NR_PA_HIGHBIT_VALUES] __write_once;
|
int highbits_to_node[NR_PA_HIGHBIT_VALUES] __ro_after_init;
|
||||||
EXPORT_SYMBOL(highbits_to_node);
|
EXPORT_SYMBOL(highbits_to_node);
|
||||||
|
|
||||||
static unsigned int __initdata maxmem_pfn = -1U;
|
static unsigned int __initdata maxmem_pfn = -1U;
|
||||||
@ -844,11 +844,11 @@ static void __init zone_sizes_init(void)
|
|||||||
#ifdef CONFIG_NUMA
|
#ifdef CONFIG_NUMA
|
||||||
|
|
||||||
/* which logical CPUs are on which nodes */
|
/* which logical CPUs are on which nodes */
|
||||||
struct cpumask node_2_cpu_mask[MAX_NUMNODES] __write_once;
|
struct cpumask node_2_cpu_mask[MAX_NUMNODES] __ro_after_init;
|
||||||
EXPORT_SYMBOL(node_2_cpu_mask);
|
EXPORT_SYMBOL(node_2_cpu_mask);
|
||||||
|
|
||||||
/* which node each logical CPU is on */
|
/* which node each logical CPU is on */
|
||||||
char cpu_2_node[NR_CPUS] __write_once __attribute__((aligned(L2_CACHE_BYTES)));
|
char cpu_2_node[NR_CPUS] __ro_after_init __attribute__((aligned(L2_CACHE_BYTES)));
|
||||||
EXPORT_SYMBOL(cpu_2_node);
|
EXPORT_SYMBOL(cpu_2_node);
|
||||||
|
|
||||||
/* Return cpu_to_node() except for cpus not yet assigned, which return -1 */
|
/* Return cpu_to_node() except for cpus not yet assigned, which return -1 */
|
||||||
@ -1269,7 +1269,7 @@ static void __init validate_va(void)
|
|||||||
* cpus plus any other cpus that are willing to share their cache.
|
* cpus plus any other cpus that are willing to share their cache.
|
||||||
* It is set by hv_inquire_tiles(HV_INQ_TILES_LOTAR).
|
* It is set by hv_inquire_tiles(HV_INQ_TILES_LOTAR).
|
||||||
*/
|
*/
|
||||||
struct cpumask __write_once cpu_lotar_map;
|
struct cpumask __ro_after_init cpu_lotar_map;
|
||||||
EXPORT_SYMBOL(cpu_lotar_map);
|
EXPORT_SYMBOL(cpu_lotar_map);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1291,7 +1291,7 @@ EXPORT_SYMBOL(hash_for_home_map);
|
|||||||
* cache, those tiles will only appear in cpu_lotar_map, NOT in
|
* cache, those tiles will only appear in cpu_lotar_map, NOT in
|
||||||
* cpu_cacheable_map, as they are a special case.
|
* cpu_cacheable_map, as they are a special case.
|
||||||
*/
|
*/
|
||||||
struct cpumask __write_once cpu_cacheable_map;
|
struct cpumask __ro_after_init cpu_cacheable_map;
|
||||||
EXPORT_SYMBOL(cpu_cacheable_map);
|
EXPORT_SYMBOL(cpu_cacheable_map);
|
||||||
|
|
||||||
static __initdata struct cpumask disabled_map;
|
static __initdata struct cpumask disabled_map;
|
||||||
@ -1506,7 +1506,7 @@ void __init setup_arch(char **cmdline_p)
|
|||||||
* Set up per-cpu memory.
|
* Set up per-cpu memory.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
unsigned long __per_cpu_offset[NR_CPUS] __write_once;
|
unsigned long __per_cpu_offset[NR_CPUS] __ro_after_init;
|
||||||
EXPORT_SYMBOL(__per_cpu_offset);
|
EXPORT_SYMBOL(__per_cpu_offset);
|
||||||
|
|
||||||
static size_t __initdata pfn_offset[MAX_NUMNODES] = { 0 };
|
static size_t __initdata pfn_offset[MAX_NUMNODES] = { 0 };
|
||||||
|
@ -27,7 +27,7 @@
|
|||||||
* We write to width and height with a single store in head_NN.S,
|
* We write to width and height with a single store in head_NN.S,
|
||||||
* so make the variable aligned to "long".
|
* so make the variable aligned to "long".
|
||||||
*/
|
*/
|
||||||
HV_Topology smp_topology __write_once __aligned(sizeof(long));
|
HV_Topology smp_topology __ro_after_init __aligned(sizeof(long));
|
||||||
EXPORT_SYMBOL(smp_topology);
|
EXPORT_SYMBOL(smp_topology);
|
||||||
|
|
||||||
#if CHIP_HAS_IPI()
|
#if CHIP_HAS_IPI()
|
||||||
|
@ -37,7 +37,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
/* How many cycles per second we are running at. */
|
/* How many cycles per second we are running at. */
|
||||||
static cycles_t cycles_per_sec __write_once;
|
static cycles_t cycles_per_sec __ro_after_init;
|
||||||
|
|
||||||
cycles_t get_clock_rate(void)
|
cycles_t get_clock_rate(void)
|
||||||
{
|
{
|
||||||
@ -68,7 +68,7 @@ EXPORT_SYMBOL(get_cycles);
|
|||||||
*/
|
*/
|
||||||
#define SCHED_CLOCK_SHIFT 10
|
#define SCHED_CLOCK_SHIFT 10
|
||||||
|
|
||||||
static unsigned long sched_clock_mult __write_once;
|
static unsigned long sched_clock_mult __ro_after_init;
|
||||||
|
|
||||||
static cycles_t clocksource_get_cycles(struct clocksource *cs)
|
static cycles_t clocksource_get_cycles(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
|
@ -22,7 +22,7 @@
|
|||||||
#include <linux/mman.h>
|
#include <linux/mman.h>
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
#include <linux/module.h>
|
#include <linux/extable.h>
|
||||||
#include <linux/compat.h>
|
#include <linux/compat.h>
|
||||||
#include <linux/prctl.h>
|
#include <linux/prctl.h>
|
||||||
#include <asm/cacheflush.h>
|
#include <asm/cacheflush.h>
|
||||||
|
@ -138,19 +138,13 @@ finv_buffer_remote(void *buffer, size_t size, int hfh)
|
|||||||
if ((unsigned long)base < (unsigned long)buffer)
|
if ((unsigned long)base < (unsigned long)buffer)
|
||||||
base = buffer;
|
base = buffer;
|
||||||
|
|
||||||
/*
|
/* Fire all the loads we need. */
|
||||||
* Fire all the loads we need. The MAF only has eight entries
|
|
||||||
* so we can have at most eight outstanding loads, so we
|
|
||||||
* unroll by that amount.
|
|
||||||
*/
|
|
||||||
#pragma unroll 8
|
|
||||||
for (; p >= base; p -= step_size)
|
for (; p >= base; p -= step_size)
|
||||||
force_load(p);
|
force_load(p);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Repeat, but with finv's instead of loads, to get rid of the
|
* Repeat, but with finv's instead of loads, to get rid of the
|
||||||
* data we just loaded into our own cache and the old home L3.
|
* data we just loaded into our own cache and the old home L3.
|
||||||
* No need to unroll since finv's don't target a register.
|
|
||||||
* The finv's are guaranteed not to actually flush the data in
|
* The finv's are guaranteed not to actually flush the data in
|
||||||
* the buffer back to their home, since we just read it, so the
|
* the buffer back to their home, since we just read it, so the
|
||||||
* lines are clean in cache; we will only invalidate those lines.
|
* lines are clean in cache; we will only invalidate those lines.
|
||||||
|
@ -12,7 +12,7 @@
|
|||||||
* more details.
|
* more details.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/module.h>
|
#include <linux/extable.h>
|
||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@
|
|||||||
#include <linux/tty.h>
|
#include <linux/tty.h>
|
||||||
#include <linux/vt_kern.h> /* For unblank_screen() */
|
#include <linux/vt_kern.h> /* For unblank_screen() */
|
||||||
#include <linux/highmem.h>
|
#include <linux/highmem.h>
|
||||||
#include <linux/module.h>
|
#include <linux/extable.h>
|
||||||
#include <linux/kprobes.h>
|
#include <linux/kprobes.h>
|
||||||
#include <linux/hugetlb.h>
|
#include <linux/hugetlb.h>
|
||||||
#include <linux/syscalls.h>
|
#include <linux/syscalls.h>
|
||||||
|
@ -47,7 +47,7 @@
|
|||||||
* The noallocl2 option suppresses all use of the L2 cache to cache
|
* The noallocl2 option suppresses all use of the L2 cache to cache
|
||||||
* locally from a remote home.
|
* locally from a remote home.
|
||||||
*/
|
*/
|
||||||
static int __write_once noallocl2;
|
static int __ro_after_init noallocl2;
|
||||||
static int __init set_noallocl2(char *str)
|
static int __init set_noallocl2(char *str)
|
||||||
{
|
{
|
||||||
noallocl2 = 1;
|
noallocl2 = 1;
|
||||||
|
@ -190,9 +190,9 @@ static void __init page_table_range_init(unsigned long start,
|
|||||||
|
|
||||||
static int __initdata ktext_hash = 1; /* .text pages */
|
static int __initdata ktext_hash = 1; /* .text pages */
|
||||||
static int __initdata kdata_hash = 1; /* .data and .bss pages */
|
static int __initdata kdata_hash = 1; /* .data and .bss pages */
|
||||||
int __write_once hash_default = 1; /* kernel allocator pages */
|
int __ro_after_init hash_default = 1; /* kernel allocator pages */
|
||||||
EXPORT_SYMBOL(hash_default);
|
EXPORT_SYMBOL(hash_default);
|
||||||
int __write_once kstack_hash = 1; /* if no homecaching, use h4h */
|
int __ro_after_init kstack_hash = 1; /* if no homecaching, use h4h */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* CPUs to use to for striping the pages of kernel data. If hash-for-home
|
* CPUs to use to for striping the pages of kernel data. If hash-for-home
|
||||||
@ -203,7 +203,7 @@ int __write_once kstack_hash = 1; /* if no homecaching, use h4h */
|
|||||||
static __initdata struct cpumask kdata_mask;
|
static __initdata struct cpumask kdata_mask;
|
||||||
static __initdata int kdata_arg_seen;
|
static __initdata int kdata_arg_seen;
|
||||||
|
|
||||||
int __write_once kdata_huge; /* if no homecaching, small pages */
|
int __ro_after_init kdata_huge; /* if no homecaching, small pages */
|
||||||
|
|
||||||
|
|
||||||
/* Combine a generic pgprot_t with cache home to get a cache-aware pgprot. */
|
/* Combine a generic pgprot_t with cache home to get a cache-aware pgprot. */
|
||||||
@ -896,8 +896,8 @@ void __init pgtable_cache_init(void)
|
|||||||
panic("pgtable_cache_init(): Cannot create pgd cache");
|
panic("pgtable_cache_init(): Cannot create pgd cache");
|
||||||
}
|
}
|
||||||
|
|
||||||
static long __write_once initfree = 1;
|
static long __ro_after_init initfree = 1;
|
||||||
static bool __write_once set_initfree_done;
|
static bool __ro_after_init set_initfree_done;
|
||||||
|
|
||||||
/* Select whether to free (1) or mark unusable (0) the __init pages. */
|
/* Select whether to free (1) or mark unusable (0) the __init pages. */
|
||||||
static int __init set_initfree(char *str)
|
static int __init set_initfree(char *str)
|
||||||
|
Loading…
Reference in New Issue
Block a user