sh: uncached mapping helpers.

This adds some helper routines for uncached mapping support. This
simplifies some of the cases where we need to check the uncached mapping
boundaries in addition to giving us a centralized location for building
more complex manipulation on top of.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
This commit is contained in:
Paul Mundt 2010-02-17 16:28:00 +09:00
parent 51becfd962
commit 9edef28653
6 changed files with 58 additions and 30 deletions

View File

@ -50,13 +50,22 @@ extern unsigned long shm_align_mask;
extern unsigned long max_low_pfn, min_low_pfn;
extern unsigned long memory_start, memory_end;
#ifdef CONFIG_UNCACHED_MAPPING
extern unsigned long uncached_start, uncached_end;
extern int virt_addr_uncached(unsigned long kaddr);
extern void uncached_init(void);
#else
#define virt_addr_uncached(kaddr) (0)
#define uncached_init() do { } while (0)
#endif
static inline unsigned long
pages_do_alias(unsigned long addr1, unsigned long addr2)
{
return (addr1 ^ addr2) & shm_align_mask;
}
#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
extern void copy_page(void *to, void *from);
@ -135,6 +144,14 @@ typedef struct page *pgtable_t;
#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
#endif
#ifdef CONFIG_UNCACHED_MAPPING
#define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + uncached_start)
#define CAC_ADDR(addr) ((addr) - uncached_start + PAGE_OFFSET)
#else
#define UNCAC_ADDR(addr) ((addr))
#define CAC_ADDR(addr) ((addr))
#endif
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)

View File

@ -139,15 +139,8 @@ static inline unsigned long profile_pc(struct pt_regs *regs)
{
unsigned long pc = instruction_pointer(regs);
#ifdef CONFIG_UNCACHED_MAPPING
/*
* If PC points in to the uncached mapping, fix it up and hand
* back the cached equivalent.
*/
if ((pc >= (memory_start + cached_to_uncached)) &&
(pc < (memory_start + cached_to_uncached + uncached_size)))
pc -= cached_to_uncached;
#endif
if (virt_addr_uncached(pc))
return CAC_ADDR(pc);
return pc;
}

View File

@ -152,6 +152,7 @@ ENTRY(_stext)
mov #0, r10
#ifdef CONFIG_UNCACHED_MAPPING
/*
* Uncached mapping
*/
@ -171,6 +172,7 @@ ENTRY(_stext)
add r4, r1
add r4, r3
add #1, r10
#endif
/*
* Iterate over all of the available sizes from largest to
@ -216,6 +218,7 @@ ENTRY(_stext)
__PMB_ITER_BY_SIZE(64)
__PMB_ITER_BY_SIZE(16)
#ifdef CONFIG_UNCACHED_MAPPING
/*
* Now that we can access it, update cached_to_uncached and
* uncached_size.
@ -228,6 +231,7 @@ ENTRY(_stext)
shll16 r7
shll8 r7
mov.l r7, @r0
#endif
/*
* Clear the remaining PMB entries.
@ -306,7 +310,9 @@ ENTRY(stack_start)
.LFIRST_ADDR_ENTRY: .long PAGE_OFFSET | PMB_V
.LFIRST_DATA_ENTRY: .long __MEMORY_START | PMB_V
.LMMUCR: .long MMUCR
.LMEMORY_SIZE: .long __MEMORY_SIZE
#ifdef CONFIG_UNCACHED_MAPPING
.Lcached_to_uncached: .long cached_to_uncached
.Luncached_size: .long uncached_size
.LMEMORY_SIZE: .long __MEMORY_SIZE
#endif
#endif

View File

@ -36,6 +36,7 @@ obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_PMB) += pmb.o
obj-$(CONFIG_NUMA) += numa.o
obj-$(CONFIG_IOREMAP_FIXED) += ioremap_fixed.o
obj-$(CONFIG_UNCACHED_MAPPING) += uncached.o
# Special flags for fault_64.o. This puts restrictions on the number of
# caller-save registers that the compiler can target when building this file.

View File

@ -26,21 +26,6 @@
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
pgd_t swapper_pg_dir[PTRS_PER_PGD];
#ifdef CONFIG_UNCACHED_MAPPING
/*
* This is the offset of the uncached section from its cached alias.
*
* Legacy platforms handle trivial transitions between cached and
* uncached segments by making use of the 1:1 mapping relationship in
* 512MB lowmem, others via a special uncached mapping.
*
* Default value only valid in 29 bit mode, in 32bit mode this will be
* updated by the early PMB initialization code.
*/
unsigned long cached_to_uncached = 0x20000000;
unsigned long uncached_size = SZ_512M;
#endif
#ifdef CONFIG_MMU
static pte_t *__get_pte_phys(unsigned long addr)
{
@ -260,7 +245,7 @@ void __init mem_init(void)
memset(empty_zero_page, 0, PAGE_SIZE);
__flush_wback_region(empty_zero_page, PAGE_SIZE);
/* Initialize the vDSO */
uncached_init();
vsyscall_init();
codesize = (unsigned long) &_etext - (unsigned long) &_text;
@ -303,9 +288,7 @@ void __init mem_init(void)
((unsigned long)high_memory - (unsigned long)memory_start) >> 20,
#ifdef CONFIG_UNCACHED_MAPPING
(unsigned long)memory_start + cached_to_uncached,
(unsigned long)memory_start + cached_to_uncached + uncached_size,
uncached_size >> 20,
uncached_start, uncached_end, uncached_size >> 20,
#endif
(unsigned long)&__init_begin, (unsigned long)&__init_end,

28
arch/sh/mm/uncached.c Normal file
View File

@ -0,0 +1,28 @@
#include <linux/init.h>
#include <asm/sizes.h>
#include <asm/page.h>
/*
* This is the offset of the uncached section from its cached alias.
*
* Legacy platforms handle trivial transitions between cached and
* uncached segments by making use of the 1:1 mapping relationship in
* 512MB lowmem, others via a special uncached mapping.
*
* Default value only valid in 29 bit mode, in 32bit mode this will be
* updated by the early PMB initialization code.
*/
unsigned long cached_to_uncached = SZ_512M;
unsigned long uncached_size = SZ_512M;
unsigned long uncached_start, uncached_end;
int virt_addr_uncached(unsigned long kaddr)
{
return (kaddr >= uncached_start) && (kaddr < uncached_end);
}
void __init uncached_init(void)
{
uncached_start = memory_end;
uncached_end = uncached_start + uncached_size;
}