mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-26 21:54:11 +08:00
[POWERPC] Remove and replace uses of PPC_MEMSTART with memstart_addr
A number of users of PPC_MEMSTART (40x, ppc_mmu_32) can just always use 0 as we don't support booting these kernels at non-zero physical addresses since their exception vectors must be at 0 (or 0xfffx_xxxx). For the sub-arches that support relocatable interrupt vectors (book-e), it's reasonable to have memory start at a non-zero physical address. For those cases use the variable memstart_addr instead of the #define PPC_MEMSTART since the only uses of PPC_MEMSTART are for initialization and in the future we can set memstart_addr at runtime to have a relocatable kernel. Signed-off-by: Kumar Gala <galak@kernel.crashing.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
This commit is contained in:
parent
1993cbf4ae
commit
99c62dd773
@ -97,7 +97,7 @@ unsigned long __init mmu_mapin_ram(void)
|
||||
phys_addr_t p;
|
||||
|
||||
v = KERNELBASE;
|
||||
p = PPC_MEMSTART;
|
||||
p = 0;
|
||||
s = total_lowmem;
|
||||
|
||||
if (__map_without_ltlbs)
|
||||
|
@ -53,13 +53,12 @@
|
||||
#include <asm/machdep.h>
|
||||
#include <asm/setup.h>
|
||||
|
||||
#include "mmu_decl.h"
|
||||
|
||||
extern void loadcam_entry(unsigned int index);
|
||||
unsigned int tlbcam_index;
|
||||
unsigned int num_tlbcam_entries;
|
||||
static unsigned long __cam0, __cam1, __cam2;
|
||||
extern unsigned long total_lowmem;
|
||||
extern unsigned long __max_low_memory;
|
||||
extern unsigned long __initial_memory_limit;
|
||||
#define MAX_LOW_MEM CONFIG_LOWMEM_SIZE
|
||||
|
||||
#define NUM_TLBCAMS (16)
|
||||
@ -165,15 +164,15 @@ void invalidate_tlbcam_entry(int index)
|
||||
void __init cam_mapin_ram(unsigned long cam0, unsigned long cam1,
|
||||
unsigned long cam2)
|
||||
{
|
||||
settlbcam(0, PAGE_OFFSET, PPC_MEMSTART, cam0, _PAGE_KERNEL, 0);
|
||||
settlbcam(0, PAGE_OFFSET, memstart_addr, cam0, _PAGE_KERNEL, 0);
|
||||
tlbcam_index++;
|
||||
if (cam1) {
|
||||
tlbcam_index++;
|
||||
settlbcam(1, PAGE_OFFSET+cam0, PPC_MEMSTART+cam0, cam1, _PAGE_KERNEL, 0);
|
||||
settlbcam(1, PAGE_OFFSET+cam0, memstart_addr+cam0, cam1, _PAGE_KERNEL, 0);
|
||||
}
|
||||
if (cam2) {
|
||||
tlbcam_index++;
|
||||
settlbcam(2, PAGE_OFFSET+cam0+cam1, PPC_MEMSTART+cam0+cam1, cam2, _PAGE_KERNEL, 0);
|
||||
settlbcam(2, PAGE_OFFSET+cam0+cam1, memstart_addr+cam0+cam1, cam2, _PAGE_KERNEL, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -59,8 +59,8 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
|
||||
unsigned long total_memory;
|
||||
unsigned long total_lowmem;
|
||||
|
||||
unsigned long ppc_memstart;
|
||||
unsigned long ppc_memoffset = PAGE_OFFSET;
|
||||
phys_addr_t memstart_addr;
|
||||
phys_addr_t lowmem_end_addr;
|
||||
|
||||
int boot_mapsize;
|
||||
#ifdef CONFIG_PPC_PMAC
|
||||
@ -145,8 +145,7 @@ void __init MMU_init(void)
|
||||
printk(KERN_WARNING "Only using first contiguous memory region");
|
||||
}
|
||||
|
||||
total_memory = lmb_end_of_DRAM();
|
||||
total_lowmem = total_memory;
|
||||
total_lowmem = total_memory = lmb_end_of_DRAM() - memstart_addr;
|
||||
|
||||
#ifdef CONFIG_FSL_BOOKE
|
||||
/* Freescale Book-E parts expect lowmem to be mapped by fixed TLB
|
||||
|
@ -51,6 +51,7 @@ extern unsigned long __max_low_memory;
|
||||
extern unsigned long __initial_memory_limit;
|
||||
extern unsigned long total_memory;
|
||||
extern unsigned long total_lowmem;
|
||||
extern phys_addr_t memstart_addr;
|
||||
|
||||
/* ...and now those things that may be slightly different between processor
|
||||
* architectures. -- Dan
|
||||
|
@ -281,12 +281,13 @@ int map_page(unsigned long va, phys_addr_t pa, int flags)
|
||||
*/
|
||||
void __init mapin_ram(void)
|
||||
{
|
||||
unsigned long v, p, s, f;
|
||||
unsigned long v, s, f;
|
||||
phys_addr_t p;
|
||||
int ktext;
|
||||
|
||||
s = mmu_mapin_ram();
|
||||
v = KERNELBASE + s;
|
||||
p = PPC_MEMSTART + s;
|
||||
p = memstart_addr + s;
|
||||
for (; s < total_lowmem; s += PAGE_SIZE) {
|
||||
ktext = ((char *) v >= _stext && (char *) v < etext);
|
||||
f = ktext ?_PAGE_RAM_TEXT : _PAGE_RAM;
|
||||
|
@ -82,7 +82,6 @@ unsigned long __init mmu_mapin_ram(void)
|
||||
#else
|
||||
unsigned long tot, bl, done;
|
||||
unsigned long max_size = (256<<20);
|
||||
unsigned long align;
|
||||
|
||||
if (__map_without_bats) {
|
||||
printk(KERN_DEBUG "RAM mapped without BATs\n");
|
||||
@ -93,19 +92,13 @@ unsigned long __init mmu_mapin_ram(void)
|
||||
|
||||
/* Make sure we don't map a block larger than the
|
||||
smallest alignment of the physical address. */
|
||||
/* alignment of PPC_MEMSTART */
|
||||
align = ~(PPC_MEMSTART-1) & PPC_MEMSTART;
|
||||
/* set BAT block size to MIN(max_size, align) */
|
||||
if (align && align < max_size)
|
||||
max_size = align;
|
||||
|
||||
tot = total_lowmem;
|
||||
for (bl = 128<<10; bl < max_size; bl <<= 1) {
|
||||
if (bl * 2 > tot)
|
||||
break;
|
||||
}
|
||||
|
||||
setbat(2, KERNELBASE, PPC_MEMSTART, bl, _PAGE_RAM);
|
||||
setbat(2, KERNELBASE, 0, bl, _PAGE_RAM);
|
||||
done = (unsigned long)bat_addrs[2].limit - KERNELBASE + 1;
|
||||
if ((done < tot) && !bat_addrs[3].limit) {
|
||||
/* use BAT3 to cover a bit more */
|
||||
@ -113,7 +106,7 @@ unsigned long __init mmu_mapin_ram(void)
|
||||
for (bl = 128<<10; bl < max_size; bl <<= 1)
|
||||
if (bl * 2 > tot)
|
||||
break;
|
||||
setbat(3, KERNELBASE+done, PPC_MEMSTART+done, bl, _PAGE_RAM);
|
||||
setbat(3, KERNELBASE+done, done, bl, _PAGE_RAM);
|
||||
done = (unsigned long)bat_addrs[3].limit - KERNELBASE + 1;
|
||||
}
|
||||
|
||||
|
@ -3,8 +3,6 @@
|
||||
|
||||
#define VM_DATA_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS32
|
||||
|
||||
#define PPC_MEMSTART 0
|
||||
|
||||
#ifdef CONFIG_NOT_COHERENT_CACHE
|
||||
#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user