2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-19 18:53:52 +08:00

xtensa/mm/highmem: Make generic kmap_atomic() work correctly

The conversion to the generic kmap_atomic() implementation missed the fact
that xtensa's fixmap works bottom up while all other implementations work
top down. There is no real reason why xtensa needs to work that way.

Cure it by:

  - Using the generic fix_to_virt()/virt_to_fix() functions which work top
    down
  - Adjusting the mapping defines
  - Using the generic index calculation for the non cache aliasing case
  - Making the cache colour offset reverse so the effective index is correct

While at it, remove the outdated and misleading comment above the fixmap
enum which originates from the initial copy&pasta of this code from i386.

[ Max: Fixed the off by one in the index calculation ]

Fixes: 629ed3f7da ("xtensa/mm/highmem: Switch to generic kmap atomic")
Reported-by: Max Filippov <jcmvbkbc@gmail.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Max Filippov <jcmvbkbc@gmail.com>
Link: https://lore.kernel.org/r/20201116193253.23875-1-jcmvbkbc@gmail.com
This commit is contained in:
Thomas Gleixner 2020-11-16 11:32:53 -08:00
parent 2a656cad33
commit 1eb0616c2d
5 changed files with 31 additions and 64 deletions

View File

@ -17,63 +17,22 @@
#include <linux/threads.h>
#include <linux/pgtable.h>
#include <asm/kmap_size.h>
#endif
/*
* Here we define all the compile-time 'special' virtual
* addresses. The point is to have a constant address at
* compile time, but to set the physical address only
* in the boot process. We allocate these special addresses
* from the start of the consistent memory region upwards.
* Also this lets us do fail-safe vmalloc(), we
* can guarantee that these special addresses and
* vmalloc()-ed addresses never overlap.
*
* these 'compile-time allocated' memory buffers are
* fixed-size 4k pages. (or larger if used with an increment
* higher than 1) use fixmap_set(idx,phys) to associate
* physical memory with fixmap indices.
*/
/* The map slots for temporary mappings via kmap_atomic/local(). */
enum fixed_addresses {
#ifdef CONFIG_HIGHMEM
/* reserved pte's for temporary kernel mappings */
FIX_KMAP_BEGIN,
FIX_KMAP_END = FIX_KMAP_BEGIN +
(KM_MAX_IDX * NR_CPUS * DCACHE_N_COLORS) - 1,
#endif
__end_of_fixed_addresses
};
#define FIXADDR_TOP (XCHAL_KSEG_CACHED_VADDR - PAGE_SIZE)
#define FIXADDR_END (XCHAL_KSEG_CACHED_VADDR - PAGE_SIZE)
#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START ((FIXADDR_TOP - FIXADDR_SIZE) & PMD_MASK)
/* Enforce that FIXADDR_START is PMD aligned to handle cache aliasing */
#define FIXADDR_START ((FIXADDR_END - FIXADDR_SIZE) & PMD_MASK)
#define FIXADDR_TOP (FIXADDR_START + FIXADDR_SIZE - PAGE_SIZE)
#define __fix_to_virt(x) (FIXADDR_START + ((x) << PAGE_SHIFT))
#define __virt_to_fix(x) (((x) - FIXADDR_START) >> PAGE_SHIFT)
#ifndef __ASSEMBLY__
/*
* 'index to address' translation. If anyone tries to use the idx
* directly without translation, we catch the bug with a NULL-deference
* kernel oops. Illegal ranges of incoming indices are caught too.
*/
static __always_inline unsigned long fix_to_virt(const unsigned int idx)
{
/* Check if this memory layout is broken because fixmap overlaps page
* table.
*/
BUILD_BUG_ON(FIXADDR_START <
TLBTEMP_BASE_1 + TLBTEMP_SIZE);
BUILD_BUG_ON(idx >= __end_of_fixed_addresses);
return __fix_to_virt(idx);
}
static inline unsigned long virt_to_fix(const unsigned long vaddr)
{
BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
return __virt_to_fix(vaddr);
}
#endif
#include <asm-generic/fixmap.h>
#endif /* CONFIG_HIGHMEM */
#endif

View File

@ -12,6 +12,7 @@
#ifndef _XTENSA_HIGHMEM_H
#define _XTENSA_HIGHMEM_H
#ifdef CONFIG_HIGHMEM
#include <linux/wait.h>
#include <linux/pgtable.h>
#include <asm/cacheflush.h>
@ -58,6 +59,13 @@ static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color)
{
return pkmap_map_wait_arr + color;
}
enum fixed_addresses kmap_local_map_idx(int type, unsigned long pfn);
#define arch_kmap_local_map_idx kmap_local_map_idx
enum fixed_addresses kmap_local_unmap_idx(int type, unsigned long addr);
#define arch_kmap_local_unmap_idx kmap_local_unmap_idx
#endif
extern pte_t *pkmap_page_table;
@ -67,15 +75,10 @@ static inline void flush_cache_kmaps(void)
flush_cache_all();
}
enum fixed_addresses kmap_local_map_idx(int type, unsigned long pfn);
#define arch_kmap_local_map_idx kmap_local_map_idx
enum fixed_addresses kmap_local_unmap_idx(int type, unsigned long addr);
#define arch_kmap_local_unmap_idx kmap_local_unmap_idx
#define arch_kmap_local_post_unmap(vaddr) \
local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE)
void kmap_init(void);
#endif /* CONFIG_HIGHMEM */
#endif

View File

@ -23,16 +23,16 @@ static void __init kmap_waitqueues_init(void)
for (i = 0; i < ARRAY_SIZE(pkmap_map_wait_arr); ++i)
init_waitqueue_head(pkmap_map_wait_arr + i);
}
#else
static inline void kmap_waitqueues_init(void)
{
}
#endif
static inline enum fixed_addresses kmap_idx(int type, unsigned long color)
{
return (type + KM_MAX_IDX * smp_processor_id()) * DCACHE_N_COLORS +
color;
int idx = (type + KM_MAX_IDX * smp_processor_id()) * DCACHE_N_COLORS;
/*
* The fixmap operates top down, so the color offset needs to be
* reverse as well.
*/
return idx + DCACHE_N_COLORS - 1 - color;
}
enum fixed_addresses kmap_local_map_idx(int type, unsigned long pfn)
@ -45,6 +45,10 @@ enum fixed_addresses kmap_local_unmap_idx(int type, unsigned long addr)
return kmap_idx(type, DCACHE_ALIAS(addr));
}
#else
static inline void kmap_waitqueues_init(void) { }
#endif
void __init kmap_init(void)
{
/* Check if this memory layout is broken because PKMAP overlaps

View File

@ -147,8 +147,8 @@ void __init mem_init(void)
#ifdef CONFIG_HIGHMEM
PKMAP_BASE, PKMAP_BASE + LAST_PKMAP * PAGE_SIZE,
(LAST_PKMAP*PAGE_SIZE) >> 10,
FIXADDR_START, FIXADDR_TOP,
(FIXADDR_TOP - FIXADDR_START) >> 10,
FIXADDR_START, FIXADDR_END,
(FIXADDR_END - FIXADDR_START) >> 10,
#endif
PAGE_OFFSET, PAGE_OFFSET +
(max_low_pfn - min_low_pfn) * PAGE_SIZE,

View File

@ -52,7 +52,8 @@ static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages)
static void __init fixedrange_init(void)
{
init_pmd(__fix_to_virt(0), __end_of_fixed_addresses);
BUILD_BUG_ON(FIXADDR_START < TLBTEMP_BASE_1 + TLBTEMP_SIZE);
init_pmd(FIXADDR_START, __end_of_fixed_addresses);
}
#endif