2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-11-20 00:26:39 +08:00

Merge branch 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus

* 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus: (38 commits)
  MIPS: Sibyte: Fix locking in set_irq_affinity
  MIPS: Use force_sig when handling address errors.
  MIPS: Cavium: Add struct clocksource * argument to octeon_cvmcount_read()
  MIPS: Rewrite <asm/div64.h> to work with gcc 4.4.0.
  MIPS: Fix highmem.
  MIPS: Fix sign-extension bug in 32-bit kernel on 32-bit hardware.
  MIPS: MSP71xx: Remove the RAMROOT functions
  MIPS: Use -mno-check-zero-division
  MIPS: Set compiler options only after the compiler prefix has ben set.
  MIPS: IP27: Get rid of #ident.  Gcc 4.4.0 doesn't like it.
  MIPS: uaccess: Switch lock annotations to might_fault().
  MIPS: MSP71xx: Resolve use of non-existent GPIO routines in msp71xx reset
  MIPS: MSP71xx: Resolve multiple definition of plat_timer_setup
  MIPS: Make uaccess.h slightly more sparse friendly.
  MIPS: Make access_ok() sideeffect proof.
  MIPS: IP27: Fix clash with NMI_OFFSET from hardirq.h
  MIPS: Alchemy: Timer build fix
  MIPS: Kconfig: Delete duplicate definition of RWSEM_GENERIC_SPINLOCK.
  MIPS: Cavium: Add support for 8k and 32k page sizes.
  MIPS: TXx9: Fix possible overflow in clock calculations
  ...
This commit is contained in:
Linus Torvalds 2009-05-14 19:19:43 -07:00
commit c48f2295a9
47 changed files with 318 additions and 338 deletions

View File

@ -1411,13 +1411,12 @@ config PAGE_SIZE_4KB
config PAGE_SIZE_8KB
bool "8kB"
depends on EXPERIMENTAL && CPU_R8000
depends on (EXPERIMENTAL && CPU_R8000) || CPU_CAVIUM_OCTEON
help
Using 8kB page size will result in higher performance kernel at
the price of higher memory consumption. This option is available
only on the R8000 processor. Not that at the time of this writing
this option is still high experimental; there are also issues with
compatibility of user applications.
only on R8000 and cnMIPS processors. Note that you will need a
suitable Linux distribution to support this.
config PAGE_SIZE_16KB
bool "16kB"
@ -1428,6 +1427,15 @@ config PAGE_SIZE_16KB
all non-R3000 family processors. Note that you will need a suitable
Linux distribution to support this.
config PAGE_SIZE_32KB
bool "32kB"
depends on CPU_CAVIUM_OCTEON
help
Using 32kB page size will result in higher performance kernel at
the price of higher memory consumption. This option is available
only on cnMIPS cores. Note that you will need a suitable Linux
distribution to support this.
config PAGE_SIZE_64KB
bool "64kB"
depends on EXPERIMENTAL && !CPU_R3000 && !CPU_TX39XX
@ -1958,10 +1966,6 @@ config SECCOMP
endmenu
config RWSEM_GENERIC_SPINLOCK
bool
default y
config LOCKDEP_SUPPORT
bool
default y

View File

@ -14,8 +14,6 @@
KBUILD_DEFCONFIG := ip22_defconfig
cflags-y := -ffunction-sections
#
# Select the object file format to substitute into the linker script.
#
@ -50,6 +48,9 @@ ifneq ($(SUBARCH),$(ARCH))
endif
endif
cflags-y := -ffunction-sections
cflags-y += $(call cc-option, -mno-check-zero-division)
ifdef CONFIG_32BIT
ld-emul = $(32bit-emul)
vmlinux-32 = vmlinux

View File

@ -44,7 +44,7 @@
extern int allow_au1k_wait; /* default off for CP0 Counter */
static cycle_t au1x_counter1_read(void)
static cycle_t au1x_counter1_read(struct clocksource *cs)
{
return au_readl(SYS_RTCREAD);
}

View File

@ -38,7 +38,7 @@ void octeon_init_cvmcount(void)
local_irq_restore(flags);
}
static cycle_t octeon_cvmcount_read(void)
static cycle_t octeon_cvmcount_read(struct clocksource *cs)
{
return read_c0_cvmcount();
}

View File

@ -567,7 +567,7 @@ static inline unsigned long __fls(unsigned long word)
int num;
if (BITS_PER_LONG == 32 &&
__builtin_constant_p(cpu_has_mips_r) && cpu_has_mips_r) {
__builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
__asm__(
" .set push \n"
" .set mips32 \n"
@ -644,7 +644,7 @@ static inline int fls(int x)
{
int r;
if (__builtin_constant_p(cpu_has_mips_r) && cpu_has_mips_r) {
if (__builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
__asm__("clz %0, %1" : "=r" (x) : "r" (x));
return 32 - x;

View File

@ -40,7 +40,7 @@ static inline
__wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len,
__wsum sum, int *err_ptr)
{
might_sleep();
might_fault();
return __csum_partial_copy_user((__force void *)src, dst,
len, sum, err_ptr);
}
@ -53,7 +53,7 @@ static inline
__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
__wsum sum, int *err_ptr)
{
might_sleep();
might_fault();
if (access_ok(VERIFY_WRITE, dst, len))
return __csum_partial_copy_user(src, (__force void *)dst,
len, sum, err_ptr);

View File

@ -3,7 +3,6 @@
/*
* Architecture specific compatibility types
*/
#include <linux/seccomp.h>
#include <linux/thread_info.h>
#include <linux/types.h>
#include <asm/page.h>

View File

@ -147,6 +147,15 @@
#define cpu_has_mips_r (cpu_has_mips32r1 | cpu_has_mips32r2 | \
cpu_has_mips64r1 | cpu_has_mips64r2)
/*
* MIPS32, MIPS64, VR5500, IDT32332, IDT32334 and maybe a few other
* pre-MIPS32/MIPS53 processors have CLO, CLZ. For 64-bit kernels
* cpu_has_clo_clz also indicates the availability of DCLO and DCLZ.
*/
# ifndef cpu_has_clo_clz
# define cpu_has_clo_clz cpu_has_mips_r
# endif
#ifndef cpu_has_dsp
#define cpu_has_dsp (cpu_data[0].ases & MIPS_ASE_DSP)
#endif

View File

@ -6,105 +6,63 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#ifndef _ASM_DIV64_H
#define _ASM_DIV64_H
#ifndef __ASM_DIV64_H
#define __ASM_DIV64_H
#include <asm-generic/div64.h>
#if BITS_PER_LONG == 64
#include <linux/types.h>
#if (_MIPS_SZLONG == 32)
#include <asm/compiler.h>
/*
* No traps on overflows for any of these...
*/
#define do_div64_32(res, high, low, base) ({ \
unsigned long __quot32, __mod32; \
unsigned long __cf, __tmp, __tmp2, __i; \
\
__asm__(".set push\n\t" \
".set noat\n\t" \
".set noreorder\n\t" \
"move %2, $0\n\t" \
"move %3, $0\n\t" \
"b 1f\n\t" \
" li %4, 0x21\n" \
"0:\n\t" \
"sll $1, %0, 0x1\n\t" \
"srl %3, %0, 0x1f\n\t" \
"or %0, $1, %5\n\t" \
"sll %1, %1, 0x1\n\t" \
"sll %2, %2, 0x1\n" \
"1:\n\t" \
"bnez %3, 2f\n\t" \
" sltu %5, %0, %z6\n\t" \
"bnez %5, 3f\n" \
"2:\n\t" \
" addiu %4, %4, -1\n\t" \
"subu %0, %0, %z6\n\t" \
"addiu %2, %2, 1\n" \
"3:\n\t" \
"bnez %4, 0b\n\t" \
" srl %5, %1, 0x1f\n\t" \
".set pop" \
: "=&r" (__mod32), "=&r" (__tmp), \
"=&r" (__quot32), "=&r" (__cf), \
"=&r" (__i), "=&r" (__tmp2) \
: "Jr" (base), "0" (high), "1" (low)); \
\
(res) = __quot32; \
__mod32; })
#define __div64_32(n, base) \
({ \
unsigned long __cf, __tmp, __tmp2, __i; \
unsigned long __quot32, __mod32; \
unsigned long __high, __low; \
unsigned long long __n; \
\
__high = *__n >> 32; \
__low = __n; \
__asm__( \
" .set push \n" \
" .set noat \n" \
" .set noreorder \n" \
" move %2, $0 \n" \
" move %3, $0 \n" \
" b 1f \n" \
" li %4, 0x21 \n" \
"0: \n" \
" sll $1, %0, 0x1 \n" \
" srl %3, %0, 0x1f \n" \
" or %0, $1, %5 \n" \
" sll %1, %1, 0x1 \n" \
" sll %2, %2, 0x1 \n" \
"1: \n" \
" bnez %3, 2f \n" \
" sltu %5, %0, %z6 \n" \
" bnez %5, 3f \n" \
"2: \n" \
" addiu %4, %4, -1 \n" \
" subu %0, %0, %z6 \n" \
" addiu %2, %2, 1 \n" \
"3: \n" \
" bnez %4, 0b\n\t" \
" srl %5, %1, 0x1f\n\t" \
" .set pop" \
: "=&r" (__mod32), "=&r" (__tmp), \
"=&r" (__quot32), "=&r" (__cf), \
"=&r" (__i), "=&r" (__tmp2) \
: "Jr" (base), "0" (__high), "1" (__low)); \
\
(__n) = __quot32; \
__mod32; \
})
#define do_div(n, base) ({ \
unsigned long long __quot; \
unsigned long __mod; \
unsigned long long __div; \
unsigned long __upper, __low, __high, __base; \
\
__div = (n); \
__base = (base); \
\
__high = __div >> 32; \
__low = __div; \
__upper = __high; \
\
if (__high) \
__asm__("divu $0, %z2, %z3" \
: "=h" (__upper), "=l" (__high) \
: "Jr" (__high), "Jr" (__base) \
: GCC_REG_ACCUM); \
\
__mod = do_div64_32(__low, __upper, __low, __base); \
\
__quot = __high; \
__quot = __quot << 32 | __low; \
(n) = __quot; \
__mod; })
#endif /* BITS_PER_LONG == 64 */
#endif /* (_MIPS_SZLONG == 32) */
#if (_MIPS_SZLONG == 64)
/*
* Hey, we're already 64-bit, no
* need to play games..
*/
#define do_div(n, base) ({ \
unsigned long __quot; \
unsigned int __mod; \
unsigned long __div; \
unsigned int __base; \
\
__div = (n); \
__base = (base); \
\
__mod = __div % __base; \
__quot = __div / __base; \
\
(n) = __quot; \
__mod; })
#endif /* (_MIPS_SZLONG == 64) */
#endif /* _ASM_DIV64_H */
#endif /* __ASM_DIV64_H */

View File

@ -24,8 +24,13 @@ extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction direction);
extern dma_addr_t dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction direction);
extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
size_t size, enum dma_data_direction direction);
static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
size_t size, enum dma_data_direction direction)
{
dma_unmap_single(dev, dma_address, size, direction);
}
extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
int nhwentries, enum dma_data_direction direction);
extern void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,

View File

@ -108,6 +108,9 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
return __virt_to_fix(vaddr);
}
#define kmap_get_fixmap_pte(vaddr) \
pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr))
/*
* Called from pgtable_init()
*/

View File

@ -138,8 +138,9 @@ do { \
__instruction_hazard(); \
} while (0)
#elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_CAVIUM_OCTEON) || \
defined(CONFIG_CPU_R5500) || defined(CONFIG_MACH_ALCHEMY)
#elif defined(CONFIG_MACH_ALCHEMY) || defined(CONFIG_CPU_CAVIUM_OCTEON) || \
defined(CONFIG_CPU_LOONGSON2) || defined(CONFIG_CPU_R10000) || \
defined(CONFIG_CPU_R5500)
/*
* R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.

View File

@ -30,8 +30,6 @@
/* declarations for highmem.c */
extern unsigned long highstart_pfn, highend_pfn;
extern pte_t *kmap_pte;
extern pgprot_t kmap_prot;
extern pte_t *pkmap_page_table;
/*
@ -62,6 +60,10 @@ extern struct page *__kmap_atomic_to_page(void *ptr);
#define flush_cache_kmaps() flush_cache_all()
extern void kmap_init(void);
#define kmap_prot PAGE_KERNEL
#endif /* __KERNEL__ */
#endif /* _ASM_HIGHMEM_H */

View File

@ -715,7 +715,7 @@ enum soc_au1500_ints {
#ifdef CONFIG_SOC_AU1100
enum soc_au1100_ints {
AU1100_FIRST_INT = MIPS_CPU_IRQ_BASE + 8,
AU1100_UART0_INT,
AU1100_UART0_INT = AU1100_FIRST_INT,
AU1100_UART1_INT,
AU1100_SD_INT,
AU1100_UART3_INT,
@ -902,8 +902,8 @@ enum soc_au1200_ints {
AU1000_RTC_MATCH0_INT,
AU1000_RTC_MATCH1_INT,
AU1000_RTC_MATCH2_INT,
AU1200_NAND_INT = AU1200_FIRST_INT + 23,
AU1200_GPIO_203,
AU1200_NAND_INT,
AU1200_GPIO_204,
AU1200_GPIO_205,
AU1200_GPIO_206,

View File

@ -46,20 +46,6 @@
#define CONFIG_BLK_DEV_IDE_AU1XXX_BURSTABLE_ON 0
#endif
#ifdef CONFIG_PM
/*
* This will enable the device to be powered up when write() or read()
* is called. If this is not defined, the driver will return -EBUSY.
*/
#define WAKE_ON_ACCESS 1
typedef struct {
spinlock_t lock; /* Used to block on state transitions */
au1xxx_power_dev_t *dev; /* Power Managers device structure */
unsigned stopped; /* Used to signal device is stopped */
} pm_state;
#endif
typedef struct {
u32 tx_dev_id, rx_dev_id, target_dev_id;
u32 tx_chan, rx_chan;
@ -72,9 +58,6 @@ typedef struct {
#endif
int irq;
u32 regbase;
#ifdef CONFIG_PM
pm_state pm;
#endif
} _auide_hwif;
/******************************************************************************/

View File

@ -0,0 +1,59 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2009 Wu Zhangjin <wuzj@lemote.com>
* Copyright (C) 2009 Philippe Vachon <philippe@cowpig.ca>
* Copyright (C) 2009 Zhang Le <r0bertz@gentoo.org>
*
* reference: /proc/cpuinfo,
* arch/mips/kernel/cpu-probe.c(cpu_probe_legacy),
* arch/mips/kernel/proc.c(show_cpuinfo),
* loongson2f user manual.
*/
#ifndef __ASM_MACH_LEMOTE_CPU_FEATURE_OVERRIDES_H
#define __ASM_MACH_LEMOTE_CPU_FEATURE_OVERRIDES_H
#define cpu_dcache_line_size() 32
#define cpu_icache_line_size() 32
#define cpu_scache_line_size() 32
#define cpu_has_32fpr 1
#define cpu_has_3k_cache 0
#define cpu_has_4k_cache 1
#define cpu_has_4kex 1
#define cpu_has_64bits 1
#define cpu_has_cache_cdex_p 0
#define cpu_has_cache_cdex_s 0
#define cpu_has_counter 1
#define cpu_has_dc_aliases 1
#define cpu_has_divec 0
#define cpu_has_dsp 0
#define cpu_has_ejtag 0
#define cpu_has_fpu 1
#define cpu_has_ic_fills_f_dc 0
#define cpu_has_inclusive_pcaches 1
#define cpu_has_llsc 1
#define cpu_has_mcheck 0
#define cpu_has_mdmx 0
#define cpu_has_mips16 0
#define cpu_has_mips32r1 0
#define cpu_has_mips32r2 0
#define cpu_has_mips3d 0
#define cpu_has_mips64r1 0
#define cpu_has_mips64r2 0
#define cpu_has_mipsmt 0
#define cpu_has_prefetch 0
#define cpu_has_smartmips 0
#define cpu_has_tlb 1
#define cpu_has_tx39_cache 0
#define cpu_has_userlocal 0
#define cpu_has_vce 0
#define cpu_has_vtag_icache 0
#define cpu_has_watch 1
#define cpu_icache_snoops_remote_store 1
#endif /* __ASM_MACH_LEMOTE_CPU_FEATURE_OVERRIDES_H */

View File

@ -184,12 +184,19 @@
#else
#define PM_4K 0x00000000
#define PM_8K 0x00002000
#define PM_16K 0x00006000
#define PM_32K 0x0000e000
#define PM_64K 0x0001e000
#define PM_128K 0x0003e000
#define PM_256K 0x0007e000
#define PM_512K 0x000fe000
#define PM_1M 0x001fe000
#define PM_2M 0x003fe000
#define PM_4M 0x007fe000
#define PM_8M 0x00ffe000
#define PM_16M 0x01ffe000
#define PM_32M 0x03ffe000
#define PM_64M 0x07ffe000
#define PM_256M 0x1fffe000
#define PM_1G 0x7fffe000
@ -201,8 +208,12 @@
*/
#ifdef CONFIG_PAGE_SIZE_4KB
#define PM_DEFAULT_MASK PM_4K
#elif defined(CONFIG_PAGE_SIZE_8KB)
#define PM_DEFAULT_MASK PM_8K
#elif defined(CONFIG_PAGE_SIZE_16KB)
#define PM_DEFAULT_MASK PM_16K
#elif defined(CONFIG_PAGE_SIZE_32KB)
#define PM_DEFAULT_MASK PM_32K
#elif defined(CONFIG_PAGE_SIZE_64KB)
#define PM_DEFAULT_MASK PM_64K
#else
@ -717,8 +728,8 @@ do { \
".set\tmips64\n\t" \
"dmfc0\t%M0, " #source "\n\t" \
"dsll\t%L0, %M0, 32\n\t" \
"dsrl\t%M0, %M0, 32\n\t" \
"dsrl\t%L0, %L0, 32\n\t" \
"dsra\t%M0, %M0, 32\n\t" \
"dsra\t%L0, %L0, 32\n\t" \
".set\tmips0" \
: "=r" (__val)); \
else \
@ -726,8 +737,8 @@ do { \
".set\tmips64\n\t" \
"dmfc0\t%M0, " #source ", " #sel "\n\t" \
"dsll\t%L0, %M0, 32\n\t" \
"dsrl\t%M0, %M0, 32\n\t" \
"dsrl\t%L0, %L0, 32\n\t" \
"dsra\t%M0, %M0, 32\n\t" \
"dsra\t%L0, %L0, 32\n\t" \
".set\tmips0" \
: "=r" (__val)); \
local_irq_restore(__flags); \
@ -1484,14 +1495,15 @@ static inline unsigned int \
set_c0_##name(unsigned int set) \
{ \
unsigned int res; \
unsigned int new; \
unsigned int omt; \
unsigned long flags; \
\
local_irq_save(flags); \
omt = __dmt(); \
res = read_c0_##name(); \
res |= set; \
write_c0_##name(res); \
new = res | set; \
write_c0_##name(new); \
__emt(omt); \
local_irq_restore(flags); \
\
@ -1502,14 +1514,15 @@ static inline unsigned int \
clear_c0_##name(unsigned int clear) \
{ \
unsigned int res; \
unsigned int new; \
unsigned int omt; \
unsigned long flags; \
\
local_irq_save(flags); \
omt = __dmt(); \
res = read_c0_##name(); \
res &= ~clear; \
write_c0_##name(res); \
new = res & ~clear; \
write_c0_##name(new); \
__emt(omt); \
local_irq_restore(flags); \
\
@ -1517,9 +1530,10 @@ clear_c0_##name(unsigned int clear) \
} \
\
static inline unsigned int \
change_c0_##name(unsigned int change, unsigned int new) \
change_c0_##name(unsigned int change, unsigned int newbits) \
{ \
unsigned int res; \
unsigned int new; \
unsigned int omt; \
unsigned long flags; \
\
@ -1527,9 +1541,9 @@ change_c0_##name(unsigned int change, unsigned int new) \
\
omt = __dmt(); \
res = read_c0_##name(); \
res &= ~change; \
res |= (new & change); \
write_c0_##name(res); \
new = res & ~change; \
new |= (newbits & change); \
write_c0_##name(new); \
__emt(omt); \
local_irq_restore(flags); \
\

View File

@ -23,6 +23,9 @@
#ifdef CONFIG_PAGE_SIZE_16KB
#define PAGE_SHIFT 14
#endif
#ifdef CONFIG_PAGE_SIZE_32KB
#define PAGE_SHIFT 15
#endif
#ifdef CONFIG_PAGE_SIZE_64KB
#define PAGE_SHIFT 16
#endif

View File

@ -83,6 +83,12 @@
#define PMD_ORDER 0
#define PTE_ORDER 0
#endif
#ifdef CONFIG_PAGE_SIZE_32KB
#define PGD_ORDER 0
#define PUD_ORDER aieeee_attempt_to_allocate_pud
#define PMD_ORDER 0
#define PTE_ORDER 0
#endif
#ifdef CONFIG_PAGE_SIZE_64KB
#define PGD_ORDER 0
#define PUD_ORDER aieeee_attempt_to_allocate_pud

View File

@ -359,11 +359,11 @@
TO_NODE_UNCAC((nasid), LAUNCH_OFFSET(nasid, slice))
#define LAUNCH_SIZE(nasid) KLD_LAUNCH(nasid)->size
#define NMI_OFFSET(nasid, slice) \
#define SN_NMI_OFFSET(nasid, slice) \
(KLD_NMI(nasid)->offset + \
KLD_NMI(nasid)->stride * (slice))
#define NMI_ADDR(nasid, slice) \
TO_NODE_UNCAC((nasid), NMI_OFFSET(nasid, slice))
TO_NODE_UNCAC((nasid), SN_NMI_OFFSET(nasid, slice))
#define NMI_SIZE(nasid) KLD_NMI(nasid)->size
#define KLCONFIG_OFFSET(nasid) KLD_KLCONFIG(nasid)->offset

View File

@ -3,13 +3,13 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Derived from IRIX <sys/SN/nmi.h>, Revision 1.5.
*
* Copyright (C) 1992 - 1997 Silicon Graphics, Inc.
*/
#ifndef __ASM_SN_NMI_H
#define __ASM_SN_NMI_H
#ident "$Revision: 1.5 $"
#include <asm/sn/addrs.h>
/*

View File

@ -75,6 +75,9 @@ register struct thread_info *__current_thread_info __asm__("$28");
#ifdef CONFIG_PAGE_SIZE_16KB
#define THREAD_SIZE_ORDER (0)
#endif
#ifdef CONFIG_PAGE_SIZE_32KB
#define THREAD_SIZE_ORDER (0)
#endif
#ifdef CONFIG_PAGE_SIZE_64KB
#define THREAD_SIZE_ORDER (0)
#endif

View File

@ -57,7 +57,11 @@ extern int r4k_clockevent_init(void);
static inline int mips_clockevent_init(void)
{
#ifdef CONFIG_CEVT_R4K
#ifdef CONFIG_MIPS_MT_SMTC
extern int smtc_clockevent_init(void);
return smtc_clockevent_init();
#elif defined(CONFIG_CEVT_R4K)
return r4k_clockevent_init();
#else
return -ENXIO;

View File

@ -105,10 +105,20 @@
#define __access_mask get_fs().seg
#define __access_ok(addr, size, mask) \
(((signed long)((mask) & ((addr) | ((addr) + (size)) | __ua_size(size)))) == 0)
({ \
unsigned long __addr = (unsigned long) (addr); \
unsigned long __size = size; \
unsigned long __mask = mask; \
unsigned long __ok; \
\
__chk_user_ptr(addr); \
__ok = (signed long)(__mask & (__addr | (__addr + __size) | \
__ua_size(__size))); \
__ok == 0; \
})
#define access_ok(type, addr, size) \
likely(__access_ok((unsigned long)(addr), (size), __access_mask))
likely(__access_ok((addr), (size), __access_mask))
/*
* put_user: - Write a simple value into user space.
@ -225,6 +235,7 @@ do { \
({ \
int __gu_err; \
\
__chk_user_ptr(ptr); \
__get_user_common((x), size, ptr); \
__gu_err; \
})
@ -234,6 +245,7 @@ do { \
int __gu_err = -EFAULT; \
const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \
\
might_fault(); \
if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \
__get_user_common((x), size, __gu_ptr); \
\
@ -305,6 +317,7 @@ do { \
__typeof__(*(ptr)) __pu_val; \
int __pu_err = 0; \
\
__chk_user_ptr(ptr); \
__pu_val = (x); \
switch (size) { \
case 1: __put_user_asm("sb", ptr); break; \
@ -322,6 +335,7 @@ do { \
__typeof__(*(ptr)) __pu_val = (x); \
int __pu_err = -EFAULT; \
\
might_fault(); \
if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \
switch (size) { \
case 1: __put_user_asm("sb", __pu_addr); break; \
@ -696,10 +710,10 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);
const void *__cu_from; \
long __cu_len; \
\
might_sleep(); \
__cu_to = (to); \
__cu_from = (from); \
__cu_len = (n); \
might_fault(); \
__cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
__cu_len; \
})
@ -752,13 +766,14 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
const void *__cu_from; \
long __cu_len; \
\
might_sleep(); \
__cu_to = (to); \
__cu_from = (from); \
__cu_len = (n); \
if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) \
if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) { \
might_fault(); \
__cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
__cu_len); \
} \
__cu_len; \
})
@ -831,10 +846,10 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
const void __user *__cu_from; \
long __cu_len; \
\
might_sleep(); \
__cu_to = (to); \
__cu_from = (from); \
__cu_len = (n); \
might_fault(); \
__cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
__cu_len); \
__cu_len; \
@ -862,17 +877,31 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
const void __user *__cu_from; \
long __cu_len; \
\
might_sleep(); \
__cu_to = (to); \
__cu_from = (from); \
__cu_len = (n); \
if (access_ok(VERIFY_READ, __cu_from, __cu_len)) \
if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \
might_fault(); \
__cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
__cu_len); \
} \
__cu_len; \
})
#define __copy_in_user(to, from, n) __copy_from_user(to, from, n)
#define __copy_in_user(to, from, n) \
({ \
void __user *__cu_to; \
const void __user *__cu_from; \
long __cu_len; \
\
__cu_to = (to); \
__cu_from = (from); \
__cu_len = (n); \
might_fault(); \
__cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
__cu_len); \
__cu_len; \
})
#define copy_in_user(to, from, n) \
({ \
@ -880,14 +909,15 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
const void __user *__cu_from; \
long __cu_len; \
\
might_sleep(); \
__cu_to = (to); \
__cu_from = (from); \
__cu_len = (n); \
if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) && \
access_ok(VERIFY_WRITE, __cu_to, __cu_len))) \
access_ok(VERIFY_WRITE, __cu_to, __cu_len))) { \
might_fault(); \
__cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
__cu_len); \
} \
__cu_len; \
})
@ -907,7 +937,7 @@ __clear_user(void __user *addr, __kernel_size_t size)
{
__kernel_size_t res;
might_sleep();
might_fault();
__asm__ __volatile__(
"move\t$4, %1\n\t"
"move\t$5, $0\n\t"
@ -956,7 +986,7 @@ __strncpy_from_user(char *__to, const char __user *__from, long __len)
{
long res;
might_sleep();
might_fault();
__asm__ __volatile__(
"move\t$4, %1\n\t"
"move\t$5, %2\n\t"
@ -993,7 +1023,7 @@ strncpy_from_user(char *__to, const char __user *__from, long __len)
{
long res;
might_sleep();
might_fault();
__asm__ __volatile__(
"move\t$4, %1\n\t"
"move\t$5, %2\n\t"
@ -1012,7 +1042,7 @@ static inline long __strlen_user(const char __user *s)
{
long res;
might_sleep();
might_fault();
__asm__ __volatile__(
"move\t$4, %1\n\t"
__MODULE_JAL(__strlen_user_nocheck_asm)
@ -1042,7 +1072,7 @@ static inline long strlen_user(const char __user *s)
{
long res;
might_sleep();
might_fault();
__asm__ __volatile__(
"move\t$4, %1\n\t"
__MODULE_JAL(__strlen_user_asm)
@ -1059,7 +1089,7 @@ static inline long __strnlen_user(const char __user *s, long n)
{
long res;
might_sleep();
might_fault();
__asm__ __volatile__(
"move\t$4, %1\n\t"
"move\t$5, %2\n\t"
@ -1090,7 +1120,7 @@ static inline long strnlen_user(const char __user *s, long n)
{
long res;
might_sleep();
might_fault();
__asm__ __volatile__(
"move\t$4, %1\n\t"
"move\t$5, %2\n\t"

View File

@ -245,7 +245,7 @@ irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
}
int __cpuinit mips_clockevent_init(void)
int __cpuinit smtc_clockevent_init(void)
{
uint64_t mips_freq = mips_hpt_frequency;
unsigned int cpu = smp_processor_id();

View File

@ -405,8 +405,8 @@ EXPORT(sysn32_call_table)
PTR sys_eventfd
PTR sys_fallocate
PTR sys_timerfd_create
PTR sys_timerfd_gettime /* 5285 */
PTR sys_timerfd_settime
PTR compat_sys_timerfd_gettime /* 5285 */
PTR compat_sys_timerfd_settime
PTR sys_signalfd4
PTR sys_eventfd2
PTR sys_epoll_create1

View File

@ -525,8 +525,8 @@ sys_call_table:
PTR sys_eventfd
PTR sys32_fallocate /* 4320 */
PTR sys_timerfd_create
PTR sys_timerfd_gettime
PTR sys_timerfd_settime
PTR compat_sys_timerfd_gettime
PTR compat_sys_timerfd_settime
PTR compat_sys_signalfd4
PTR sys_eventfd2 /* 4325 */
PTR sys_epoll_create1

View File

@ -482,19 +482,19 @@ fault:
return;
die_if_kernel("Unhandled kernel unaligned access", regs);
send_sig(SIGSEGV, current, 1);
force_sig(SIGSEGV, current);
return;
sigbus:
die_if_kernel("Unhandled kernel unaligned access", regs);
send_sig(SIGBUS, current, 1);
force_sig(SIGBUS, current);
return;
sigill:
die_if_kernel("Unhandled kernel unaligned access or invalid instruction", regs);
send_sig(SIGILL, current, 1);
force_sig(SIGILL, current);
}
asmlinkage void do_ade(struct pt_regs *regs)

View File

@ -19,6 +19,15 @@ static inline const char *msk2str(unsigned int mask)
case PM_16K: return "16kb";
case PM_64K: return "64kb";
case PM_256K: return "256kb";
#ifdef CONFIG_CPU_CAVIUM_OCTEON
case PM_8K: return "8kb";
case PM_32K: return "32kb";
case PM_128K: return "128kb";
case PM_512K: return "512kb";
case PM_2M: return "2Mb";
case PM_8M: return "8Mb";
case PM_32M: return "32Mb";
#endif
#ifndef CONFIG_CPU_VR41XX
case PM_1M: return "1Mb";
case PM_4M: return "4Mb";

View File

@ -1041,7 +1041,7 @@ static void __cpuinit probe_pcache(void)
printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
icache_size >> 10,
cpu_has_vtag_icache ? "VIVT" : "VIPT",
c->icache.flags & MIPS_CACHE_VTAG ? "VIVT" : "VIPT",
way_string[c->icache.ways], c->icache.linesz);
printk("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",

View File

@ -209,7 +209,7 @@ dma_addr_t dma_map_page(struct device *dev, struct page *page,
unsigned long addr;
addr = (unsigned long) page_address(page) + offset;
dma_cache_wback_inv(addr, size);
__dma_sync(addr, size, direction);
}
return plat_map_dma_mem_page(dev, page) + offset;
@ -217,23 +217,6 @@ dma_addr_t dma_map_page(struct device *dev, struct page *page,
EXPORT_SYMBOL(dma_map_page);
void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
if (!plat_device_is_coherent(dev) && direction != DMA_TO_DEVICE) {
unsigned long addr;
addr = dma_addr_to_virt(dma_address);
dma_cache_wback_inv(addr, size);
}
plat_unmap_dma_mem(dev, dma_address);
}
EXPORT_SYMBOL(dma_unmap_page);
void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
enum dma_data_direction direction)
{

View File

@ -1,7 +1,12 @@
#include <linux/module.h>
#include <linux/highmem.h>
#include <asm/fixmap.h>
#include <asm/tlbflush.h>
static pte_t *kmap_pte;
unsigned long highstart_pfn, highend_pfn;
void *__kmap(struct page *page)
{
void *addr;
@ -14,6 +19,7 @@ void *__kmap(struct page *page)
return addr;
}
EXPORT_SYMBOL(__kmap);
void __kunmap(struct page *page)
{
@ -22,6 +28,7 @@ void __kunmap(struct page *page)
return;
kunmap_high(page);
}
EXPORT_SYMBOL(__kunmap);
/*
* kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
@ -48,11 +55,12 @@ void *__kmap_atomic(struct page *page, enum km_type type)
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(!pte_none(*(kmap_pte - idx)));
#endif
set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL));
local_flush_tlb_one((unsigned long)vaddr);
return (void*) vaddr;
}
EXPORT_SYMBOL(__kmap_atomic);
void __kunmap_atomic(void *kvaddr, enum km_type type)
{
@ -77,6 +85,7 @@ void __kunmap_atomic(void *kvaddr, enum km_type type)
pagefault_enable();
}
EXPORT_SYMBOL(__kunmap_atomic);
/*
* This is the same as kmap_atomic() but can map memory that doesn't
@ -92,7 +101,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
debug_kmap_atomic(type);
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
flush_tlb_one(vaddr);
return (void*) vaddr;
@ -111,7 +120,11 @@ struct page *__kmap_atomic_to_page(void *ptr)
return pte_page(*pte);
}
EXPORT_SYMBOL(__kmap);
EXPORT_SYMBOL(__kunmap);
EXPORT_SYMBOL(__kmap_atomic);
EXPORT_SYMBOL(__kunmap_atomic);
void __init kmap_init(void)
{
unsigned long kmap_vstart;
/* cache the first kmap pte */
kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
}

View File

@ -104,14 +104,6 @@ unsigned long setup_zero_pages(void)
return 1UL << order;
}
/*
* These are almost like kmap_atomic / kunmap_atmic except they take an
* additional address argument as the hint.
*/
#define kmap_get_fixmap_pte(vaddr) \
pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr))
#ifdef CONFIG_MIPS_MT_SMTC
static pte_t *kmap_coherent_pte;
static void __init kmap_coherent_init(void)
@ -264,24 +256,6 @@ void copy_from_user_page(struct vm_area_struct *vma,
}
}
#ifdef CONFIG_HIGHMEM
unsigned long highstart_pfn, highend_pfn;
pte_t *kmap_pte;
pgprot_t kmap_prot;
static void __init kmap_init(void)
{
unsigned long kmap_vstart;
/* cache the first kmap pte */
kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
kmap_prot = PAGE_KERNEL;
}
#endif /* CONFIG_HIGHMEM */
void __init fixrange_init(unsigned long start, unsigned long end,
pgd_t *pgd_base)
{

View File

@ -29,7 +29,7 @@ extern unsigned long icache_way_size, dcache_way_size;
#include <asm/r4kcache.h>
int rm7k_tcache_enabled;
static int rm7k_tcache_enabled;
/*
* Writeback and invalidate the primary cache dcache before DMA.
@ -121,7 +121,7 @@ static void rm7k_sc_disable(void)
clear_c0_config(RM7K_CONF_SE);
}
struct bcache_ops rm7k_sc_ops = {
static struct bcache_ops rm7k_sc_ops = {
.bc_enable = rm7k_sc_enable,
.bc_disable = rm7k_sc_disable,
.bc_wback_inv = rm7k_sc_wback_inv,

View File

@ -36,18 +36,6 @@ config PMC_MSP7120_FPGA
endchoice
menu "Options for PMC-Sierra MSP chipsets"
depends on PMC_MSP
config PMC_MSP_EMBEDDED_ROOTFS
bool "Root filesystem embedded in kernel image"
select MTD
select MTD_BLOCK
select MTD_PMC_MSP_RAMROOT
select MTD_RAM
endmenu
config HYPERTRANSPORT
bool "Hypertransport Support for PMC-Sierra Yosemite"
depends on PMC_YOSEMITE

View File

@ -40,12 +40,6 @@
#include <linux/string.h>
#include <linux/interrupt.h>
#include <linux/mm.h>
#ifdef CONFIG_CRAMFS
#include <linux/cramfs_fs.h>
#endif
#ifdef CONFIG_SQUASHFS
#include <linux/squashfs_fs.h>
#endif
#include <asm/addrspace.h>
#include <asm/bootinfo.h>
@ -435,10 +429,6 @@ struct prom_pmemblock *__init prom_getmdesc(void)
char *str;
unsigned int memsize;
unsigned int heaptop;
#ifdef CONFIG_MTD_PMC_MSP_RAMROOT
void *ramroot_start;
unsigned long ramroot_size;
#endif
int i;
str = prom_getenv(memsz_env);
@ -506,19 +496,7 @@ struct prom_pmemblock *__init prom_getmdesc(void)
i++; /* 3 */
mdesc[i].type = BOOT_MEM_RESERVED;
mdesc[i].base = CPHYSADDR((u32)_text);
#ifdef CONFIG_MTD_PMC_MSP_RAMROOT
if (get_ramroot(&ramroot_start, &ramroot_size)) {
/*
* Rootfs in RAM -- follows kernel
* Combine rootfs image with kernel block so a
* page (4k) isn't wasted between memory blocks
*/
mdesc[i].size = CPHYSADDR(PAGE_ALIGN(
(u32)ramroot_start + ramroot_size)) - mdesc[i].base;
} else
#endif
mdesc[i].size = CPHYSADDR(PAGE_ALIGN(
(u32)_end)) - mdesc[i].base;
mdesc[i].size = CPHYSADDR(PAGE_ALIGN((u32)_end)) - mdesc[i].base;
/* Remainder of RAM -- under memsize */
i++; /* 5 */
@ -528,39 +506,3 @@ struct prom_pmemblock *__init prom_getmdesc(void)
return &mdesc[0];
}
/* rootfs functions */
#ifdef CONFIG_MTD_PMC_MSP_RAMROOT
bool get_ramroot(void **start, unsigned long *size)
{
extern char _end[];
/* Check for start following the end of the kernel */
void *check_start = (void *)_end;
/* Check for supported rootfs types */
#ifdef CONFIG_CRAMFS
if (*(__u32 *)check_start == CRAMFS_MAGIC) {
/* Get CRAMFS size */
*start = check_start;
*size = PAGE_ALIGN(((struct cramfs_super *)
check_start)->size);
return true;
}
#endif
#ifdef CONFIG_SQUASHFS
if (*((unsigned int *)check_start) == SQUASHFS_MAGIC) {
/* Get SQUASHFS size */
*start = check_start;
*size = PAGE_ALIGN(((struct squashfs_super_block *)
check_start)->bytes_used);
return true;
}
#endif
return false;
}
EXPORT_SYMBOL(get_ramroot);
#endif

View File

@ -21,7 +21,6 @@
#if defined(CONFIG_PMC_MSP7120_GW)
#include <msp_regops.h>
#include <msp_gpio.h>
#define MSP_BOARD_RESET_GPIO 9
#endif
@ -88,11 +87,8 @@ void msp7120_reset(void)
* as GPIO char driver may not be enabled and it would look up
* data inRAM!
*/
set_value_reg32(GPIO_CFG3_REG,
basic_mode_mask(MSP_BOARD_RESET_GPIO),
basic_mode(MSP_GPIO_OUTPUT, MSP_BOARD_RESET_GPIO));
set_reg32(GPIO_DATA3_REG,
basic_data_mask(MSP_BOARD_RESET_GPIO));
set_value_reg32(GPIO_CFG3_REG, 0xf000, 0x8000);
set_reg32(GPIO_DATA3_REG, 8);
/*
* In case GPIO9 doesn't reset the board (jumper configurable!)

View File

@ -81,10 +81,7 @@ void __init plat_time_init(void)
mips_hpt_frequency = cpu_rate/2;
}
void __init plat_timer_setup(struct irqaction *irq)
unsigned int __init get_c0_compare_int(void)
{
#ifdef CONFIG_IRQ_MSP_CIC
/* we are using the vpe0 counter for timer interrupts */
setup_irq(MSP_INT_VPE0_TIMER, irq);
#endif
return MSP_INT_VPE0_TIMER;
}

View File

@ -16,7 +16,7 @@
#include <asm/ptrace.h>
#include <asm/tlbdebug.h>
int ip32_be_handler(struct pt_regs *regs, int is_fixup)
static int ip32_be_handler(struct pt_regs *regs, int is_fixup)
{
int data = regs->cp0_cause & 4;

View File

@ -112,13 +112,13 @@ static void inline flush_mace_bus(void)
extern irqreturn_t crime_memerr_intr(int irq, void *dev_id);
extern irqreturn_t crime_cpuerr_intr(int irq, void *dev_id);
struct irqaction memerr_irq = {
static struct irqaction memerr_irq = {
.handler = crime_memerr_intr,
.flags = IRQF_DISABLED,
.name = "CRIME memory error",
};
struct irqaction cpuerr_irq = {
static struct irqaction cpuerr_irq = {
.handler = crime_cpuerr_intr,
.flags = IRQF_DISABLED,
.name = "CRIME CPU error",

View File

@ -113,7 +113,6 @@ static void bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask)
{
int i = 0, old_cpu, cpu, int_on, k;
u64 cur_ints;
struct irq_desc *desc = irq_desc + irq;
unsigned long flags;
unsigned int irq_dirty;
@ -127,8 +126,7 @@ static void bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask)
cpu = cpu_logical_map(i);
/* Protect against other affinity changers and IMR manipulation */
spin_lock_irqsave(&desc->lock, flags);
spin_lock(&bcm1480_imr_lock);
spin_lock_irqsave(&bcm1480_imr_lock, flags);
/* Swizzle each CPU's IMR (but leave the IP selection alone) */
old_cpu = bcm1480_irq_owner[irq];
@ -153,8 +151,7 @@ static void bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask)
____raw_writeq(cur_ints, IOADDR(A_BCM1480_IMR_MAPPER(cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
}
}
spin_unlock(&bcm1480_imr_lock);
spin_unlock_irqrestore(&desc->lock, flags);
spin_unlock_irqrestore(&bcm1480_imr_lock, flags);
}
#endif

View File

@ -107,7 +107,6 @@ static void sb1250_set_affinity(unsigned int irq, const struct cpumask *mask)
{
int i = 0, old_cpu, cpu, int_on;
u64 cur_ints;
struct irq_desc *desc = irq_desc + irq;
unsigned long flags;
i = cpumask_first(mask);
@ -121,8 +120,7 @@ static void sb1250_set_affinity(unsigned int irq, const struct cpumask *mask)
cpu = cpu_logical_map(i);
/* Protect against other affinity changers and IMR manipulation */
spin_lock_irqsave(&desc->lock, flags);
spin_lock(&sb1250_imr_lock);
spin_lock_irqsave(&sb1250_imr_lock, flags);
/* Swizzle each CPU's IMR (but leave the IP selection alone) */
old_cpu = sb1250_irq_owner[irq];
@ -144,8 +142,7 @@ static void sb1250_set_affinity(unsigned int irq, const struct cpumask *mask)
____raw_writeq(cur_ints, IOADDR(A_IMR_MAPPER(cpu) +
R_IMR_INTERRUPT_MASK));
}
spin_unlock(&sb1250_imr_lock);
spin_unlock_irqrestore(&desc->lock, flags);
spin_unlock_irqrestore(&sb1250_imr_lock, flags);
}
#endif

View File

@ -88,7 +88,7 @@ void __init tx4927_setup(void)
{
int i;
__u32 divmode;
int cpuclk = 0;
unsigned int cpuclk = 0;
u64 ccfg;
txx9_reg_res_init(TX4927_REV_PCODE(), TX4927_REG_BASE,

View File

@ -93,7 +93,7 @@ void __init tx4938_setup(void)
{
int i;
__u32 divmode;
int cpuclk = 0;
unsigned int cpuclk = 0;
u64 ccfg;
txx9_reg_res_init(TX4938_REV_PCODE(), TX4938_REG_BASE,

View File

@ -114,7 +114,7 @@ void __init tx4939_setup(void)
int i;
__u32 divmode;
__u64 pcfg;
int cpuclk = 0;
unsigned int cpuclk = 0;
txx9_reg_res_init(TX4939_REV_PCODE(), TX4939_REG_BASE,
TX4939_REG_SIZE);

View File

@ -536,7 +536,7 @@ static void __init rbtx4939_setup(void)
}
struct txx9_board_vec rbtx4939_vec __initdata = {
.system = "Tothiba RBTX4939",
.system = "Toshiba RBTX4939",
.prom_init = rbtx4939_prom_init,
.mem_setup = rbtx4939_setup,
.irq_setup = rbtx4939_irq_setup,

View File

@ -75,7 +75,7 @@ struct gbefb_par {
static unsigned int gbe_mem_size = CONFIG_FB_GBE_MEM * 1024*1024;
static void *gbe_mem;
static dma_addr_t gbe_dma_addr;
unsigned long gbe_mem_phys;
static unsigned long gbe_mem_phys;
static struct {
uint16_t *cpu;
@ -185,8 +185,8 @@ static struct fb_videomode default_mode_LCD __initdata = {
.vmode = FB_VMODE_NONINTERLACED,
};
struct fb_videomode *default_mode __initdata = &default_mode_CRT;
struct fb_var_screeninfo *default_var __initdata = &default_var_CRT;
static struct fb_videomode *default_mode __initdata = &default_mode_CRT;
static struct fb_var_screeninfo *default_var __initdata = &default_var_CRT;
static int flat_panel_enabled = 0;
@ -205,7 +205,7 @@ static void gbe_reset(void)
* console.
*/
void gbe_turn_off(void)
static void gbe_turn_off(void)
{
int i;
unsigned int val, x, y, vpixen_off;
@ -1097,7 +1097,7 @@ static void gbefb_create_sysfs(struct device *dev)
* Initialization
*/
int __init gbefb_setup(char *options)
static int __init gbefb_setup(char *options)
{
char *this_opt;
@ -1283,7 +1283,7 @@ static struct platform_driver gbefb_driver = {
static struct platform_device *gbefb_device;
int __init gbefb_init(void)
static int __init gbefb_init(void)
{
int ret = platform_driver_register(&gbefb_driver);
if (!ret) {
@ -1301,7 +1301,7 @@ int __init gbefb_init(void)
return ret;
}
void __exit gbefb_exit(void)
static void __exit gbefb_exit(void)
{
platform_device_unregister(gbefb_device);
platform_driver_unregister(&gbefb_driver);