Merge branches 'from-henrik', 'hidraw', 'logitech', 'picolcd', 'ps3', 'uclogic', 'wacom' and 'wiimote' into for-linus

This commit is contained in:
Jiri Kosina 2012-10-01 14:36:26 +02:00
545 changed files with 8151 additions and 5145 deletions

View File

@ -1,3 +1,16 @@
WWhat: /sys/class/hidraw/hidraw*/device/oled*_img
Date: June 2012
Contact: linux-bluetooth@vger.kernel.org
Description:
The /sys/class/hidraw/hidraw*/device/oled*_img files control
OLED mocro displays on Intuos4 Wireless tablet. Accepted image
has to contain 256 bytes (64x32 px 1 bit colour). The format
is the same as PBM image 62x32px without header (64 bits per
horizontal line, 32 lines). An example of setting OLED No. 0:
dd bs=256 count=1 if=img_file of=[path to oled0_img]/oled0_img
The attribute is read only and no local copy of the image is
stored.
What: /sys/class/hidraw/hidraw*/device/speed What: /sys/class/hidraw/hidraw*/device/speed
Date: April 2010 Date: April 2010
Kernel Version: 2.6.35 Kernel Version: 2.6.35

View File

@ -21,6 +21,7 @@ Supported adapters:
* Intel DH89xxCC (PCH) * Intel DH89xxCC (PCH)
* Intel Panther Point (PCH) * Intel Panther Point (PCH)
* Intel Lynx Point (PCH) * Intel Lynx Point (PCH)
* Intel Lynx Point-LP (PCH)
Datasheets: Publicly available at the Intel website Datasheets: Publicly available at the Intel website
On Intel Patsburg and later chipsets, both the normal host SMBus controller On Intel Patsburg and later chipsets, both the normal host SMBus controller

View File

@ -3388,7 +3388,7 @@ M: "Wolfram Sang (embedded platforms)" <w.sang@pengutronix.de>
L: linux-i2c@vger.kernel.org L: linux-i2c@vger.kernel.org
W: http://i2c.wiki.kernel.org/ W: http://i2c.wiki.kernel.org/
T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-i2c/ T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-i2c/
T: git git://git.fluff.org/bjdooks/linux.git T: git git://git.pengutronix.de/git/wsa/linux.git
S: Maintained S: Maintained
F: Documentation/i2c/ F: Documentation/i2c/
F: drivers/i2c/ F: drivers/i2c/
@ -3666,11 +3666,12 @@ F: Documentation/networking/README.ipw2200
F: drivers/net/wireless/ipw2x00/ F: drivers/net/wireless/ipw2x00/
INTEL(R) TRUSTED EXECUTION TECHNOLOGY (TXT) INTEL(R) TRUSTED EXECUTION TECHNOLOGY (TXT)
M: Joseph Cihula <joseph.cihula@intel.com> M: Richard L Maliszewski <richard.l.maliszewski@intel.com>
M: Gang Wei <gang.wei@intel.com>
M: Shane Wang <shane.wang@intel.com> M: Shane Wang <shane.wang@intel.com>
L: tboot-devel@lists.sourceforge.net L: tboot-devel@lists.sourceforge.net
W: http://tboot.sourceforge.net W: http://tboot.sourceforge.net
T: Mercurial http://www.bughost.org/repos.hg/tboot.hg T: hg http://tboot.hg.sourceforge.net:8000/hgroot/tboot/tboot
S: Supported S: Supported
F: Documentation/intel_txt.txt F: Documentation/intel_txt.txt
F: include/linux/tboot.h F: include/linux/tboot.h
@ -5320,6 +5321,12 @@ L: linux-mtd@lists.infradead.org
S: Maintained S: Maintained
F: drivers/mtd/devices/phram.c F: drivers/mtd/devices/phram.c
PICOLCD HID DRIVER
M: Bruno Prémont <bonbons@linux-vserver.org>
L: linux-input@vger.kernel.org
S: Maintained
F: drivers/hid/hid-picolcd*
PICOXCELL SUPPORT PICOXCELL SUPPORT
M: Jamie Iles <jamie@jamieiles.com> M: Jamie Iles <jamie@jamieiles.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)

View File

@ -1,7 +1,7 @@
VERSION = 3 VERSION = 3
PATCHLEVEL = 6 PATCHLEVEL = 6
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc4 EXTRAVERSION = -rc6
NAME = Saber-toothed Squirrel NAME = Saber-toothed Squirrel
# *DOCUMENTATION* # *DOCUMENTATION*

View File

@ -6,7 +6,7 @@ config ARM
select HAVE_DMA_API_DEBUG select HAVE_DMA_API_DEBUG
select HAVE_IDE if PCI || ISA || PCMCIA select HAVE_IDE if PCI || ISA || PCMCIA
select HAVE_DMA_ATTRS select HAVE_DMA_ATTRS
select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7) select HAVE_DMA_CONTIGUOUS if MMU
select HAVE_MEMBLOCK select HAVE_MEMBLOCK
select RTC_LIB select RTC_LIB
select SYS_SUPPORTS_APM_EMULATION select SYS_SUPPORTS_APM_EMULATION

View File

@ -356,15 +356,15 @@ choice
is nothing connected to read from the DCC. is nothing connected to read from the DCC.
config DEBUG_SEMIHOSTING config DEBUG_SEMIHOSTING
bool "Kernel low-level debug output via semihosting I" bool "Kernel low-level debug output via semihosting I/O"
help help
Semihosting enables code running on an ARM target to use Semihosting enables code running on an ARM target to use
the I/O facilities on a host debugger/emulator through a the I/O facilities on a host debugger/emulator through a
simple SVC calls. The host debugger or emulator must have simple SVC call. The host debugger or emulator must have
semihosting enabled for the special svc call to be trapped semihosting enabled for the special svc call to be trapped
otherwise the kernel will crash. otherwise the kernel will crash.
This is known to work with OpenOCD, as wellas This is known to work with OpenOCD, as well as
ARM's Fast Models, or any other controlling environment ARM's Fast Models, or any other controlling environment
that implements semihosting. that implements semihosting.

View File

@ -284,10 +284,10 @@ zImage Image xipImage bootpImage uImage: vmlinux
zinstall uinstall install: vmlinux zinstall uinstall install: vmlinux
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@ $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@
%.dtb: %.dtb: scripts
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@ $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
dtbs: dtbs: scripts
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@ $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
# We use MRPROPER_FILES and CLEAN_FILES now # We use MRPROPER_FILES and CLEAN_FILES now

View File

@ -659,10 +659,14 @@ __armv7_mmu_cache_on:
#ifdef CONFIG_CPU_ENDIAN_BE8 #ifdef CONFIG_CPU_ENDIAN_BE8
orr r0, r0, #1 << 25 @ big-endian page tables orr r0, r0, #1 << 25 @ big-endian page tables
#endif #endif
mrcne p15, 0, r6, c2, c0, 2 @ read ttb control reg
orrne r0, r0, #1 @ MMU enabled orrne r0, r0, #1 @ MMU enabled
movne r1, #0xfffffffd @ domain 0 = client movne r1, #0xfffffffd @ domain 0 = client
bic r6, r6, #1 << 31 @ 32-bit translation system
bic r6, r6, #3 << 0 @ use only ttbr0
mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
mcrne p15, 0, r1, c3, c0, 0 @ load domain access control mcrne p15, 0, r1, c3, c0, 0 @ load domain access control
mcrne p15, 0, r6, c2, c0, 2 @ load ttb control
#endif #endif
mcr p15, 0, r0, c7, c5, 4 @ ISB mcr p15, 0, r0, c7, c5, 4 @ ISB
mcr p15, 0, r0, c1, c0, 0 @ load control register mcr p15, 0, r0, c1, c0, 0 @ load control register

View File

@ -104,6 +104,7 @@
#gpio-cells = <2>; #gpio-cells = <2>;
gpio-controller; gpio-controller;
interrupt-controller; interrupt-controller;
#interrupt-cells = <2>;
}; };
pioB: gpio@fffff600 { pioB: gpio@fffff600 {
@ -113,6 +114,7 @@
#gpio-cells = <2>; #gpio-cells = <2>;
gpio-controller; gpio-controller;
interrupt-controller; interrupt-controller;
#interrupt-cells = <2>;
}; };
pioC: gpio@fffff800 { pioC: gpio@fffff800 {
@ -122,6 +124,7 @@
#gpio-cells = <2>; #gpio-cells = <2>;
gpio-controller; gpio-controller;
interrupt-controller; interrupt-controller;
#interrupt-cells = <2>;
}; };
dbgu: serial@fffff200 { dbgu: serial@fffff200 {

View File

@ -95,6 +95,7 @@
#gpio-cells = <2>; #gpio-cells = <2>;
gpio-controller; gpio-controller;
interrupt-controller; interrupt-controller;
#interrupt-cells = <2>;
}; };
pioB: gpio@fffff400 { pioB: gpio@fffff400 {
@ -104,6 +105,7 @@
#gpio-cells = <2>; #gpio-cells = <2>;
gpio-controller; gpio-controller;
interrupt-controller; interrupt-controller;
#interrupt-cells = <2>;
}; };
pioC: gpio@fffff600 { pioC: gpio@fffff600 {
@ -113,6 +115,7 @@
#gpio-cells = <2>; #gpio-cells = <2>;
gpio-controller; gpio-controller;
interrupt-controller; interrupt-controller;
#interrupt-cells = <2>;
}; };
pioD: gpio@fffff800 { pioD: gpio@fffff800 {
@ -122,6 +125,7 @@
#gpio-cells = <2>; #gpio-cells = <2>;
gpio-controller; gpio-controller;
interrupt-controller; interrupt-controller;
#interrupt-cells = <2>;
}; };
pioE: gpio@fffffa00 { pioE: gpio@fffffa00 {
@ -131,6 +135,7 @@
#gpio-cells = <2>; #gpio-cells = <2>;
gpio-controller; gpio-controller;
interrupt-controller; interrupt-controller;
#interrupt-cells = <2>;
}; };
dbgu: serial@ffffee00 { dbgu: serial@ffffee00 {

View File

@ -113,6 +113,7 @@
#gpio-cells = <2>; #gpio-cells = <2>;
gpio-controller; gpio-controller;
interrupt-controller; interrupt-controller;
#interrupt-cells = <2>;
}; };
pioB: gpio@fffff400 { pioB: gpio@fffff400 {
@ -122,6 +123,7 @@
#gpio-cells = <2>; #gpio-cells = <2>;
gpio-controller; gpio-controller;
interrupt-controller; interrupt-controller;
#interrupt-cells = <2>;
}; };
pioC: gpio@fffff600 { pioC: gpio@fffff600 {
@ -131,6 +133,7 @@
#gpio-cells = <2>; #gpio-cells = <2>;
gpio-controller; gpio-controller;
interrupt-controller; interrupt-controller;
#interrupt-cells = <2>;
}; };
pioD: gpio@fffff800 { pioD: gpio@fffff800 {
@ -140,6 +143,7 @@
#gpio-cells = <2>; #gpio-cells = <2>;
gpio-controller; gpio-controller;
interrupt-controller; interrupt-controller;
#interrupt-cells = <2>;
}; };
pioE: gpio@fffffa00 { pioE: gpio@fffffa00 {
@ -149,6 +153,7 @@
#gpio-cells = <2>; #gpio-cells = <2>;
gpio-controller; gpio-controller;
interrupt-controller; interrupt-controller;
#interrupt-cells = <2>;
}; };
dbgu: serial@ffffee00 { dbgu: serial@ffffee00 {

View File

@ -107,6 +107,7 @@
#gpio-cells = <2>; #gpio-cells = <2>;
gpio-controller; gpio-controller;
interrupt-controller; interrupt-controller;
#interrupt-cells = <2>;
}; };
pioB: gpio@fffff600 { pioB: gpio@fffff600 {
@ -116,6 +117,7 @@
#gpio-cells = <2>; #gpio-cells = <2>;
gpio-controller; gpio-controller;
interrupt-controller; interrupt-controller;
#interrupt-cells = <2>;
}; };
pioC: gpio@fffff800 { pioC: gpio@fffff800 {
@ -125,6 +127,7 @@
#gpio-cells = <2>; #gpio-cells = <2>;
gpio-controller; gpio-controller;
interrupt-controller; interrupt-controller;
#interrupt-cells = <2>;
}; };
pioD: gpio@fffffa00 { pioD: gpio@fffffa00 {
@ -134,6 +137,7 @@
#gpio-cells = <2>; #gpio-cells = <2>;
gpio-controller; gpio-controller;
interrupt-controller; interrupt-controller;
#interrupt-cells = <2>;
}; };
dbgu: serial@fffff200 { dbgu: serial@fffff200 {

View File

@ -115,6 +115,7 @@
#gpio-cells = <2>; #gpio-cells = <2>;
gpio-controller; gpio-controller;
interrupt-controller; interrupt-controller;
#interrupt-cells = <2>;
}; };
pioB: gpio@fffff600 { pioB: gpio@fffff600 {
@ -124,6 +125,7 @@
#gpio-cells = <2>; #gpio-cells = <2>;
gpio-controller; gpio-controller;
interrupt-controller; interrupt-controller;
#interrupt-cells = <2>;
}; };
pioC: gpio@fffff800 { pioC: gpio@fffff800 {
@ -133,6 +135,7 @@
#gpio-cells = <2>; #gpio-cells = <2>;
gpio-controller; gpio-controller;
interrupt-controller; interrupt-controller;
#interrupt-cells = <2>;
}; };
pioD: gpio@fffffa00 { pioD: gpio@fffffa00 {
@ -142,6 +145,7 @@
#gpio-cells = <2>; #gpio-cells = <2>;
gpio-controller; gpio-controller;
interrupt-controller; interrupt-controller;
#interrupt-cells = <2>;
}; };
dbgu: serial@fffff200 { dbgu: serial@fffff200 {

View File

@ -320,4 +320,12 @@
.size \name , . - \name .size \name , . - \name
.endm .endm
.macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
#ifndef CONFIG_CPU_USE_DOMAINS
adds \tmp, \addr, #\size - 1
sbcccs \tmp, \tmp, \limit
bcs \bad
#endif
.endm
#endif /* __ASM_ASSEMBLER_H__ */ #endif /* __ASM_ASSEMBLER_H__ */

View File

@ -202,6 +202,13 @@ static inline void dma_free_writecombine(struct device *dev, size_t size,
return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs); return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
} }
/*
* This can be called during early boot to increase the size of the atomic
* coherent DMA pool above the default value of 256KiB. It must be called
* before postcore_initcall.
*/
extern void __init init_dma_coherent_pool_size(unsigned long size);
/* /*
* This can be called during boot to increase the size of the consistent * This can be called during boot to increase the size of the consistent
* DMA region above it's default value of 2MB. It must be called before the * DMA region above it's default value of 2MB. It must be called before the

View File

@ -187,6 +187,7 @@ static inline unsigned long __phys_to_virt(unsigned long x)
#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET) #define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET)
#endif #endif
#endif #endif
#endif /* __ASSEMBLY__ */
#ifndef PHYS_OFFSET #ifndef PHYS_OFFSET
#ifdef PLAT_PHYS_OFFSET #ifdef PLAT_PHYS_OFFSET
@ -196,6 +197,8 @@ static inline unsigned long __phys_to_virt(unsigned long x)
#endif #endif
#endif #endif
#ifndef __ASSEMBLY__
/* /*
* PFNs are used to describe any physical page; this means * PFNs are used to describe any physical page; this means
* PFN 0 == physical address 0. * PFN 0 == physical address 0.

View File

@ -199,6 +199,9 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
{ {
pgtable_page_dtor(pte); pgtable_page_dtor(pte);
#ifdef CONFIG_ARM_LPAE
tlb_add_flush(tlb, addr);
#else
/* /*
* With the classic ARM MMU, a pte page has two corresponding pmd * With the classic ARM MMU, a pte page has two corresponding pmd
* entries, each covering 1MB. * entries, each covering 1MB.
@ -206,6 +209,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
addr &= PMD_MASK; addr &= PMD_MASK;
tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE); tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE);
tlb_add_flush(tlb, addr + SZ_1M); tlb_add_flush(tlb, addr + SZ_1M);
#endif
tlb_remove_page(tlb, pte); tlb_remove_page(tlb, pte);
} }

View File

@ -101,28 +101,39 @@ extern int __get_user_1(void *);
extern int __get_user_2(void *); extern int __get_user_2(void *);
extern int __get_user_4(void *); extern int __get_user_4(void *);
#define __get_user_x(__r2,__p,__e,__s,__i...) \ #define __GUP_CLOBBER_1 "lr", "cc"
#ifdef CONFIG_CPU_USE_DOMAINS
#define __GUP_CLOBBER_2 "ip", "lr", "cc"
#else
#define __GUP_CLOBBER_2 "lr", "cc"
#endif
#define __GUP_CLOBBER_4 "lr", "cc"
#define __get_user_x(__r2,__p,__e,__l,__s) \
__asm__ __volatile__ ( \ __asm__ __volatile__ ( \
__asmeq("%0", "r0") __asmeq("%1", "r2") \ __asmeq("%0", "r0") __asmeq("%1", "r2") \
__asmeq("%3", "r1") \
"bl __get_user_" #__s \ "bl __get_user_" #__s \
: "=&r" (__e), "=r" (__r2) \ : "=&r" (__e), "=r" (__r2) \
: "0" (__p) \ : "0" (__p), "r" (__l) \
: __i, "cc") : __GUP_CLOBBER_##__s)
#define get_user(x,p) \ #define __get_user_check(x,p) \
({ \ ({ \
unsigned long __limit = current_thread_info()->addr_limit - 1; \
register const typeof(*(p)) __user *__p asm("r0") = (p);\ register const typeof(*(p)) __user *__p asm("r0") = (p);\
register unsigned long __r2 asm("r2"); \ register unsigned long __r2 asm("r2"); \
register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \ register int __e asm("r0"); \
switch (sizeof(*(__p))) { \ switch (sizeof(*(__p))) { \
case 1: \ case 1: \
__get_user_x(__r2, __p, __e, 1, "lr"); \ __get_user_x(__r2, __p, __e, __l, 1); \
break; \ break; \
case 2: \ case 2: \
__get_user_x(__r2, __p, __e, 2, "r3", "lr"); \ __get_user_x(__r2, __p, __e, __l, 2); \
break; \ break; \
case 4: \ case 4: \
__get_user_x(__r2, __p, __e, 4, "lr"); \ __get_user_x(__r2, __p, __e, __l, 4); \
break; \ break; \
default: __e = __get_user_bad(); break; \ default: __e = __get_user_bad(); break; \
} \ } \
@ -130,42 +141,57 @@ extern int __get_user_4(void *);
__e; \ __e; \
}) })
#define get_user(x,p) \
({ \
might_fault(); \
__get_user_check(x,p); \
})
extern int __put_user_1(void *, unsigned int); extern int __put_user_1(void *, unsigned int);
extern int __put_user_2(void *, unsigned int); extern int __put_user_2(void *, unsigned int);
extern int __put_user_4(void *, unsigned int); extern int __put_user_4(void *, unsigned int);
extern int __put_user_8(void *, unsigned long long); extern int __put_user_8(void *, unsigned long long);
#define __put_user_x(__r2,__p,__e,__s) \ #define __put_user_x(__r2,__p,__e,__l,__s) \
__asm__ __volatile__ ( \ __asm__ __volatile__ ( \
__asmeq("%0", "r0") __asmeq("%2", "r2") \ __asmeq("%0", "r0") __asmeq("%2", "r2") \
__asmeq("%3", "r1") \
"bl __put_user_" #__s \ "bl __put_user_" #__s \
: "=&r" (__e) \ : "=&r" (__e) \
: "0" (__p), "r" (__r2) \ : "0" (__p), "r" (__r2), "r" (__l) \
: "ip", "lr", "cc") : "ip", "lr", "cc")
#define put_user(x,p) \ #define __put_user_check(x,p) \
({ \ ({ \
unsigned long __limit = current_thread_info()->addr_limit - 1; \
register const typeof(*(p)) __r2 asm("r2") = (x); \ register const typeof(*(p)) __r2 asm("r2") = (x); \
register const typeof(*(p)) __user *__p asm("r0") = (p);\ register const typeof(*(p)) __user *__p asm("r0") = (p);\
register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \ register int __e asm("r0"); \
switch (sizeof(*(__p))) { \ switch (sizeof(*(__p))) { \
case 1: \ case 1: \
__put_user_x(__r2, __p, __e, 1); \ __put_user_x(__r2, __p, __e, __l, 1); \
break; \ break; \
case 2: \ case 2: \
__put_user_x(__r2, __p, __e, 2); \ __put_user_x(__r2, __p, __e, __l, 2); \
break; \ break; \
case 4: \ case 4: \
__put_user_x(__r2, __p, __e, 4); \ __put_user_x(__r2, __p, __e, __l, 4); \
break; \ break; \
case 8: \ case 8: \
__put_user_x(__r2, __p, __e, 8); \ __put_user_x(__r2, __p, __e, __l, 8); \
break; \ break; \
default: __e = __put_user_bad(); break; \ default: __e = __put_user_bad(); break; \
} \ } \
__e; \ __e; \
}) })
#define put_user(x,p) \
({ \
might_fault(); \
__put_user_check(x,p); \
})
#else /* CONFIG_MMU */ #else /* CONFIG_MMU */
/* /*
@ -219,6 +245,7 @@ do { \
unsigned long __gu_addr = (unsigned long)(ptr); \ unsigned long __gu_addr = (unsigned long)(ptr); \
unsigned long __gu_val; \ unsigned long __gu_val; \
__chk_user_ptr(ptr); \ __chk_user_ptr(ptr); \
might_fault(); \
switch (sizeof(*(ptr))) { \ switch (sizeof(*(ptr))) { \
case 1: __get_user_asm_byte(__gu_val,__gu_addr,err); break; \ case 1: __get_user_asm_byte(__gu_val,__gu_addr,err); break; \
case 2: __get_user_asm_half(__gu_val,__gu_addr,err); break; \ case 2: __get_user_asm_half(__gu_val,__gu_addr,err); break; \
@ -300,6 +327,7 @@ do { \
unsigned long __pu_addr = (unsigned long)(ptr); \ unsigned long __pu_addr = (unsigned long)(ptr); \
__typeof__(*(ptr)) __pu_val = (x); \ __typeof__(*(ptr)) __pu_val = (x); \
__chk_user_ptr(ptr); \ __chk_user_ptr(ptr); \
might_fault(); \
switch (sizeof(*(ptr))) { \ switch (sizeof(*(ptr))) { \
case 1: __put_user_asm_byte(__pu_val,__pu_addr,err); break; \ case 1: __put_user_asm_byte(__pu_val,__pu_addr,err); break; \
case 2: __put_user_asm_half(__pu_val,__pu_addr,err); break; \ case 2: __put_user_asm_half(__pu_val,__pu_addr,err); break; \

View File

@ -159,6 +159,12 @@ static int debug_arch_supported(void)
arch >= ARM_DEBUG_ARCH_V7_1; arch >= ARM_DEBUG_ARCH_V7_1;
} }
/* Can we determine the watchpoint access type from the fsr? */
static int debug_exception_updates_fsr(void)
{
return 0;
}
/* Determine number of WRP registers available. */ /* Determine number of WRP registers available. */
static int get_num_wrp_resources(void) static int get_num_wrp_resources(void)
{ {
@ -604,13 +610,14 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
/* Aligned */ /* Aligned */
break; break;
case 1: case 1:
/* Allow single byte watchpoint. */
if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
break;
case 2: case 2:
/* Allow halfword watchpoints and breakpoints. */ /* Allow halfword watchpoints and breakpoints. */
if (info->ctrl.len == ARM_BREAKPOINT_LEN_2) if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)
break; break;
case 3:
/* Allow single byte watchpoint. */
if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
break;
default: default:
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
@ -619,18 +626,35 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
info->address &= ~alignment_mask; info->address &= ~alignment_mask;
info->ctrl.len <<= offset; info->ctrl.len <<= offset;
/* if (!bp->overflow_handler) {
* Currently we rely on an overflow handler to take /*
* care of single-stepping the breakpoint when it fires. * Mismatch breakpoints are required for single-stepping
* In the case of userspace breakpoints on a core with V7 debug, * breakpoints.
* we can use the mismatch feature as a poor-man's hardware */
* single-step, but this only works for per-task breakpoints. if (!core_has_mismatch_brps())
*/ return -EINVAL;
if (!bp->overflow_handler && (arch_check_bp_in_kernelspace(bp) ||
!core_has_mismatch_brps() || !bp->hw.bp_target)) { /* We don't allow mismatch breakpoints in kernel space. */
pr_warning("overflow handler required but none found\n"); if (arch_check_bp_in_kernelspace(bp))
ret = -EINVAL; return -EPERM;
/*
* Per-cpu breakpoints are not supported by our stepping
* mechanism.
*/
if (!bp->hw.bp_target)
return -EINVAL;
/*
* We only support specific access types if the fsr
* reports them.
*/
if (!debug_exception_updates_fsr() &&
(info->ctrl.type == ARM_BREAKPOINT_LOAD ||
info->ctrl.type == ARM_BREAKPOINT_STORE))
return -EINVAL;
} }
out: out:
return ret; return ret;
} }
@ -706,10 +730,12 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
goto unlock; goto unlock;
/* Check that the access type matches. */ /* Check that the access type matches. */
access = (fsr & ARM_FSR_ACCESS_MASK) ? HW_BREAKPOINT_W : if (debug_exception_updates_fsr()) {
HW_BREAKPOINT_R; access = (fsr & ARM_FSR_ACCESS_MASK) ?
if (!(access & hw_breakpoint_type(wp))) HW_BREAKPOINT_W : HW_BREAKPOINT_R;
goto unlock; if (!(access & hw_breakpoint_type(wp)))
goto unlock;
}
/* We have a winner. */ /* We have a winner. */
info->trigger = addr; info->trigger = addr;

View File

@ -420,20 +420,23 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
#endif #endif
instr = *(u32 *) pc; instr = *(u32 *) pc;
} else if (thumb_mode(regs)) { } else if (thumb_mode(regs)) {
get_user(instr, (u16 __user *)pc); if (get_user(instr, (u16 __user *)pc))
goto die_sig;
if (is_wide_instruction(instr)) { if (is_wide_instruction(instr)) {
unsigned int instr2; unsigned int instr2;
get_user(instr2, (u16 __user *)pc+1); if (get_user(instr2, (u16 __user *)pc+1))
goto die_sig;
instr <<= 16; instr <<= 16;
instr |= instr2; instr |= instr2;
} }
} else { } else if (get_user(instr, (u32 __user *)pc)) {
get_user(instr, (u32 __user *)pc); goto die_sig;
} }
if (call_undef_hook(regs, instr) == 0) if (call_undef_hook(regs, instr) == 0)
return; return;
die_sig:
#ifdef CONFIG_DEBUG_USER #ifdef CONFIG_DEBUG_USER
if (user_debug & UDBG_UNDEFINED) { if (user_debug & UDBG_UNDEFINED) {
printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n", printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n",

View File

@ -59,6 +59,7 @@ void __init init_current_timer_delay(unsigned long freq)
{ {
pr_info("Switching to timer-based delay loop\n"); pr_info("Switching to timer-based delay loop\n");
lpj_fine = freq / HZ; lpj_fine = freq / HZ;
loops_per_jiffy = lpj_fine;
arm_delay_ops.delay = __timer_delay; arm_delay_ops.delay = __timer_delay;
arm_delay_ops.const_udelay = __timer_const_udelay; arm_delay_ops.const_udelay = __timer_const_udelay;
arm_delay_ops.udelay = __timer_udelay; arm_delay_ops.udelay = __timer_udelay;

View File

@ -16,8 +16,9 @@
* __get_user_X * __get_user_X
* *
* Inputs: r0 contains the address * Inputs: r0 contains the address
* r1 contains the address limit, which must be preserved
* Outputs: r0 is the error code * Outputs: r0 is the error code
* r2, r3 contains the zero-extended value * r2 contains the zero-extended value
* lr corrupted * lr corrupted
* *
* No other registers must be altered. (see <asm/uaccess.h> * No other registers must be altered. (see <asm/uaccess.h>
@ -27,33 +28,39 @@
* Note also that it is intended that __get_user_bad is not global. * Note also that it is intended that __get_user_bad is not global.
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/domain.h> #include <asm/domain.h>
ENTRY(__get_user_1) ENTRY(__get_user_1)
check_uaccess r0, 1, r1, r2, __get_user_bad
1: TUSER(ldrb) r2, [r0] 1: TUSER(ldrb) r2, [r0]
mov r0, #0 mov r0, #0
mov pc, lr mov pc, lr
ENDPROC(__get_user_1) ENDPROC(__get_user_1)
ENTRY(__get_user_2) ENTRY(__get_user_2)
#ifdef CONFIG_THUMB2_KERNEL check_uaccess r0, 2, r1, r2, __get_user_bad
2: TUSER(ldrb) r2, [r0] #ifdef CONFIG_CPU_USE_DOMAINS
3: TUSER(ldrb) r3, [r0, #1] rb .req ip
2: ldrbt r2, [r0], #1
3: ldrbt rb, [r0], #0
#else #else
2: TUSER(ldrb) r2, [r0], #1 rb .req r0
3: TUSER(ldrb) r3, [r0] 2: ldrb r2, [r0]
3: ldrb rb, [r0, #1]
#endif #endif
#ifndef __ARMEB__ #ifndef __ARMEB__
orr r2, r2, r3, lsl #8 orr r2, r2, rb, lsl #8
#else #else
orr r2, r3, r2, lsl #8 orr r2, rb, r2, lsl #8
#endif #endif
mov r0, #0 mov r0, #0
mov pc, lr mov pc, lr
ENDPROC(__get_user_2) ENDPROC(__get_user_2)
ENTRY(__get_user_4) ENTRY(__get_user_4)
check_uaccess r0, 4, r1, r2, __get_user_bad
4: TUSER(ldr) r2, [r0] 4: TUSER(ldr) r2, [r0]
mov r0, #0 mov r0, #0
mov pc, lr mov pc, lr

View File

@ -16,6 +16,7 @@
* __put_user_X * __put_user_X
* *
* Inputs: r0 contains the address * Inputs: r0 contains the address
* r1 contains the address limit, which must be preserved
* r2, r3 contains the value * r2, r3 contains the value
* Outputs: r0 is the error code * Outputs: r0 is the error code
* lr corrupted * lr corrupted
@ -27,16 +28,19 @@
* Note also that it is intended that __put_user_bad is not global. * Note also that it is intended that __put_user_bad is not global.
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/domain.h> #include <asm/domain.h>
ENTRY(__put_user_1) ENTRY(__put_user_1)
check_uaccess r0, 1, r1, ip, __put_user_bad
1: TUSER(strb) r2, [r0] 1: TUSER(strb) r2, [r0]
mov r0, #0 mov r0, #0
mov pc, lr mov pc, lr
ENDPROC(__put_user_1) ENDPROC(__put_user_1)
ENTRY(__put_user_2) ENTRY(__put_user_2)
check_uaccess r0, 2, r1, ip, __put_user_bad
mov ip, r2, lsr #8 mov ip, r2, lsr #8
#ifdef CONFIG_THUMB2_KERNEL #ifdef CONFIG_THUMB2_KERNEL
#ifndef __ARMEB__ #ifndef __ARMEB__
@ -60,12 +64,14 @@ ENTRY(__put_user_2)
ENDPROC(__put_user_2) ENDPROC(__put_user_2)
ENTRY(__put_user_4) ENTRY(__put_user_4)
check_uaccess r0, 4, r1, ip, __put_user_bad
4: TUSER(str) r2, [r0] 4: TUSER(str) r2, [r0]
mov r0, #0 mov r0, #0
mov pc, lr mov pc, lr
ENDPROC(__put_user_4) ENDPROC(__put_user_4)
ENTRY(__put_user_8) ENTRY(__put_user_8)
check_uaccess r0, 8, r1, ip, __put_user_bad
#ifdef CONFIG_THUMB2_KERNEL #ifdef CONFIG_THUMB2_KERNEL
5: TUSER(str) r2, [r0] 5: TUSER(str) r2, [r0]
6: TUSER(str) r3, [r0, #4] 6: TUSER(str) r3, [r0, #4]

View File

@ -222,10 +222,8 @@ int __init mx25_clocks_init(void)
clk_register_clkdev(clk[lcdc_ipg], "ipg", "imx-fb.0"); clk_register_clkdev(clk[lcdc_ipg], "ipg", "imx-fb.0");
clk_register_clkdev(clk[lcdc_ahb], "ahb", "imx-fb.0"); clk_register_clkdev(clk[lcdc_ahb], "ahb", "imx-fb.0");
clk_register_clkdev(clk[wdt_ipg], NULL, "imx2-wdt.0"); clk_register_clkdev(clk[wdt_ipg], NULL, "imx2-wdt.0");
clk_register_clkdev(clk[ssi1_ipg_per], "per", "imx-ssi.0"); clk_register_clkdev(clk[ssi1_ipg], NULL, "imx-ssi.0");
clk_register_clkdev(clk[ssi1_ipg], "ipg", "imx-ssi.0"); clk_register_clkdev(clk[ssi2_ipg], NULL, "imx-ssi.1");
clk_register_clkdev(clk[ssi2_ipg_per], "per", "imx-ssi.1");
clk_register_clkdev(clk[ssi2_ipg], "ipg", "imx-ssi.1");
clk_register_clkdev(clk[esdhc1_ipg_per], "per", "sdhci-esdhc-imx25.0"); clk_register_clkdev(clk[esdhc1_ipg_per], "per", "sdhci-esdhc-imx25.0");
clk_register_clkdev(clk[esdhc1_ipg], "ipg", "sdhci-esdhc-imx25.0"); clk_register_clkdev(clk[esdhc1_ipg], "ipg", "sdhci-esdhc-imx25.0");
clk_register_clkdev(clk[esdhc1_ahb], "ahb", "sdhci-esdhc-imx25.0"); clk_register_clkdev(clk[esdhc1_ahb], "ahb", "sdhci-esdhc-imx25.0");
@ -243,6 +241,6 @@ int __init mx25_clocks_init(void)
clk_register_clkdev(clk[sdma_ahb], "ahb", "imx35-sdma"); clk_register_clkdev(clk[sdma_ahb], "ahb", "imx35-sdma");
clk_register_clkdev(clk[iim_ipg], "iim", NULL); clk_register_clkdev(clk[iim_ipg], "iim", NULL);
mxc_timer_init(MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), 54); mxc_timer_init(MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), MX25_INT_GPT1);
return 0; return 0;
} }

View File

@ -230,10 +230,8 @@ int __init mx35_clocks_init()
clk_register_clkdev(clk[ipu_gate], NULL, "mx3_sdc_fb"); clk_register_clkdev(clk[ipu_gate], NULL, "mx3_sdc_fb");
clk_register_clkdev(clk[owire_gate], NULL, "mxc_w1"); clk_register_clkdev(clk[owire_gate], NULL, "mxc_w1");
clk_register_clkdev(clk[sdma_gate], NULL, "imx35-sdma"); clk_register_clkdev(clk[sdma_gate], NULL, "imx35-sdma");
clk_register_clkdev(clk[ipg], "ipg", "imx-ssi.0"); clk_register_clkdev(clk[ssi1_gate], NULL, "imx-ssi.0");
clk_register_clkdev(clk[ssi1_div_post], "per", "imx-ssi.0"); clk_register_clkdev(clk[ssi2_gate], NULL, "imx-ssi.1");
clk_register_clkdev(clk[ipg], "ipg", "imx-ssi.1");
clk_register_clkdev(clk[ssi2_div_post], "per", "imx-ssi.1");
/* i.mx35 has the i.mx21 type uart */ /* i.mx35 has the i.mx21 type uart */
clk_register_clkdev(clk[uart1_gate], "per", "imx21-uart.0"); clk_register_clkdev(clk[uart1_gate], "per", "imx21-uart.0");
clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.0"); clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.0");

View File

@ -526,7 +526,8 @@ static void __init armadillo5x0_init(void)
imx31_add_mxc_nand(&armadillo5x0_nand_board_info); imx31_add_mxc_nand(&armadillo5x0_nand_board_info);
/* set NAND page size to 2k if not configured via boot mode pins */ /* set NAND page size to 2k if not configured via boot mode pins */
__raw_writel(__raw_readl(MXC_CCM_RCSR) | (1 << 30), MXC_CCM_RCSR); __raw_writel(__raw_readl(mx3_ccm_base + MXC_CCM_RCSR) |
(1 << 30), mx3_ccm_base + MXC_CCM_RCSR);
/* RTC */ /* RTC */
/* Get RTC IRQ and register the chip */ /* Get RTC IRQ and register the chip */

View File

@ -517,6 +517,13 @@ void __init kirkwood_wdt_init(void)
void __init kirkwood_init_early(void) void __init kirkwood_init_early(void)
{ {
orion_time_set_base(TIMER_VIRT_BASE); orion_time_set_base(TIMER_VIRT_BASE);
/*
* Some Kirkwood devices allocate their coherent buffers from atomic
* context. Increase size of atomic coherent pool to make sure such
* the allocations won't fail.
*/
init_dma_coherent_pool_size(SZ_1M);
} }
int kirkwood_tclk; int kirkwood_tclk;

View File

@ -232,10 +232,11 @@ config MACH_OMAP3_PANDORA
select OMAP_PACKAGE_CBB select OMAP_PACKAGE_CBB
select REGULATOR_FIXED_VOLTAGE if REGULATOR select REGULATOR_FIXED_VOLTAGE if REGULATOR
config MACH_OMAP3_TOUCHBOOK config MACH_TOUCHBOOK
bool "OMAP3 Touch Book" bool "OMAP3 Touch Book"
depends on ARCH_OMAP3 depends on ARCH_OMAP3
default y default y
select OMAP_PACKAGE_CBB
config MACH_OMAP_3430SDP config MACH_OMAP_3430SDP
bool "OMAP 3430 SDP board" bool "OMAP 3430 SDP board"

View File

@ -255,7 +255,7 @@ obj-$(CONFIG_MACH_OMAP_3630SDP) += board-zoom-display.o
obj-$(CONFIG_MACH_CM_T35) += board-cm-t35.o obj-$(CONFIG_MACH_CM_T35) += board-cm-t35.o
obj-$(CONFIG_MACH_CM_T3517) += board-cm-t3517.o obj-$(CONFIG_MACH_CM_T3517) += board-cm-t3517.o
obj-$(CONFIG_MACH_IGEP0020) += board-igep0020.o obj-$(CONFIG_MACH_IGEP0020) += board-igep0020.o
obj-$(CONFIG_MACH_OMAP3_TOUCHBOOK) += board-omap3touchbook.o obj-$(CONFIG_MACH_TOUCHBOOK) += board-omap3touchbook.o
obj-$(CONFIG_MACH_OMAP_4430SDP) += board-4430sdp.o obj-$(CONFIG_MACH_OMAP_4430SDP) += board-4430sdp.o
obj-$(CONFIG_MACH_OMAP4_PANDA) += board-omap4panda.o obj-$(CONFIG_MACH_OMAP4_PANDA) += board-omap4panda.o

View File

@ -1036,13 +1036,13 @@ static struct omap_clk am33xx_clks[] = {
CLK(NULL, "mmu_fck", &mmu_fck, CK_AM33XX), CLK(NULL, "mmu_fck", &mmu_fck, CK_AM33XX),
CLK(NULL, "smartreflex0_fck", &smartreflex0_fck, CK_AM33XX), CLK(NULL, "smartreflex0_fck", &smartreflex0_fck, CK_AM33XX),
CLK(NULL, "smartreflex1_fck", &smartreflex1_fck, CK_AM33XX), CLK(NULL, "smartreflex1_fck", &smartreflex1_fck, CK_AM33XX),
CLK(NULL, "gpt1_fck", &timer1_fck, CK_AM33XX), CLK(NULL, "timer1_fck", &timer1_fck, CK_AM33XX),
CLK(NULL, "gpt2_fck", &timer2_fck, CK_AM33XX), CLK(NULL, "timer2_fck", &timer2_fck, CK_AM33XX),
CLK(NULL, "gpt3_fck", &timer3_fck, CK_AM33XX), CLK(NULL, "timer3_fck", &timer3_fck, CK_AM33XX),
CLK(NULL, "gpt4_fck", &timer4_fck, CK_AM33XX), CLK(NULL, "timer4_fck", &timer4_fck, CK_AM33XX),
CLK(NULL, "gpt5_fck", &timer5_fck, CK_AM33XX), CLK(NULL, "timer5_fck", &timer5_fck, CK_AM33XX),
CLK(NULL, "gpt6_fck", &timer6_fck, CK_AM33XX), CLK(NULL, "timer6_fck", &timer6_fck, CK_AM33XX),
CLK(NULL, "gpt7_fck", &timer7_fck, CK_AM33XX), CLK(NULL, "timer7_fck", &timer7_fck, CK_AM33XX),
CLK(NULL, "usbotg_fck", &usbotg_fck, CK_AM33XX), CLK(NULL, "usbotg_fck", &usbotg_fck, CK_AM33XX),
CLK(NULL, "ieee5000_fck", &ieee5000_fck, CK_AM33XX), CLK(NULL, "ieee5000_fck", &ieee5000_fck, CK_AM33XX),
CLK(NULL, "wdt1_fck", &wdt1_fck, CK_AM33XX), CLK(NULL, "wdt1_fck", &wdt1_fck, CK_AM33XX),

View File

@ -241,6 +241,52 @@ static void omap3_clkdm_deny_idle(struct clockdomain *clkdm)
_clkdm_del_autodeps(clkdm); _clkdm_del_autodeps(clkdm);
} }
static int omap3xxx_clkdm_clk_enable(struct clockdomain *clkdm)
{
bool hwsup = false;
if (!clkdm->clktrctrl_mask)
return 0;
hwsup = omap2_cm_is_clkdm_in_hwsup(clkdm->pwrdm.ptr->prcm_offs,
clkdm->clktrctrl_mask);
if (hwsup) {
/* Disable HW transitions when we are changing deps */
_disable_hwsup(clkdm);
_clkdm_add_autodeps(clkdm);
_enable_hwsup(clkdm);
} else {
if (clkdm->flags & CLKDM_CAN_FORCE_WAKEUP)
omap3_clkdm_wakeup(clkdm);
}
return 0;
}
static int omap3xxx_clkdm_clk_disable(struct clockdomain *clkdm)
{
bool hwsup = false;
if (!clkdm->clktrctrl_mask)
return 0;
hwsup = omap2_cm_is_clkdm_in_hwsup(clkdm->pwrdm.ptr->prcm_offs,
clkdm->clktrctrl_mask);
if (hwsup) {
/* Disable HW transitions when we are changing deps */
_disable_hwsup(clkdm);
_clkdm_del_autodeps(clkdm);
_enable_hwsup(clkdm);
} else {
if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP)
omap3_clkdm_sleep(clkdm);
}
return 0;
}
struct clkdm_ops omap2_clkdm_operations = { struct clkdm_ops omap2_clkdm_operations = {
.clkdm_add_wkdep = omap2_clkdm_add_wkdep, .clkdm_add_wkdep = omap2_clkdm_add_wkdep,
.clkdm_del_wkdep = omap2_clkdm_del_wkdep, .clkdm_del_wkdep = omap2_clkdm_del_wkdep,
@ -267,6 +313,6 @@ struct clkdm_ops omap3_clkdm_operations = {
.clkdm_wakeup = omap3_clkdm_wakeup, .clkdm_wakeup = omap3_clkdm_wakeup,
.clkdm_allow_idle = omap3_clkdm_allow_idle, .clkdm_allow_idle = omap3_clkdm_allow_idle,
.clkdm_deny_idle = omap3_clkdm_deny_idle, .clkdm_deny_idle = omap3_clkdm_deny_idle,
.clkdm_clk_enable = omap2_clkdm_clk_enable, .clkdm_clk_enable = omap3xxx_clkdm_clk_enable,
.clkdm_clk_disable = omap2_clkdm_clk_disable, .clkdm_clk_disable = omap3xxx_clkdm_clk_disable,
}; };

View File

@ -67,6 +67,7 @@
#define OMAP3430_EN_IVA2_DPLL_MASK (0x7 << 0) #define OMAP3430_EN_IVA2_DPLL_MASK (0x7 << 0)
/* CM_IDLEST_IVA2 */ /* CM_IDLEST_IVA2 */
#define OMAP3430_ST_IVA2_SHIFT 0
#define OMAP3430_ST_IVA2_MASK (1 << 0) #define OMAP3430_ST_IVA2_MASK (1 << 0)
/* CM_IDLEST_PLL_IVA2 */ /* CM_IDLEST_PLL_IVA2 */

View File

@ -46,7 +46,7 @@
static void __iomem *wakeupgen_base; static void __iomem *wakeupgen_base;
static void __iomem *sar_base; static void __iomem *sar_base;
static DEFINE_SPINLOCK(wakeupgen_lock); static DEFINE_SPINLOCK(wakeupgen_lock);
static unsigned int irq_target_cpu[NR_IRQS]; static unsigned int irq_target_cpu[MAX_IRQS];
static unsigned int irq_banks = MAX_NR_REG_BANKS; static unsigned int irq_banks = MAX_NR_REG_BANKS;
static unsigned int max_irqs = MAX_IRQS; static unsigned int max_irqs = MAX_IRQS;
static unsigned int omap_secure_apis; static unsigned int omap_secure_apis;

View File

@ -1889,6 +1889,7 @@ static int _enable(struct omap_hwmod *oh)
_enable_sysc(oh); _enable_sysc(oh);
} }
} else { } else {
_omap4_disable_module(oh);
_disable_clocks(oh); _disable_clocks(oh);
pr_debug("omap_hwmod: %s: _wait_target_ready: %d\n", pr_debug("omap_hwmod: %s: _wait_target_ready: %d\n",
oh->name, r); oh->name, r);

View File

@ -100,9 +100,9 @@ static struct omap_hwmod omap3xxx_mpu_hwmod = {
/* IVA2 (IVA2) */ /* IVA2 (IVA2) */
static struct omap_hwmod_rst_info omap3xxx_iva_resets[] = { static struct omap_hwmod_rst_info omap3xxx_iva_resets[] = {
{ .name = "logic", .rst_shift = 0 }, { .name = "logic", .rst_shift = 0, .st_shift = 8 },
{ .name = "seq0", .rst_shift = 1 }, { .name = "seq0", .rst_shift = 1, .st_shift = 9 },
{ .name = "seq1", .rst_shift = 2 }, { .name = "seq1", .rst_shift = 2, .st_shift = 10 },
}; };
static struct omap_hwmod omap3xxx_iva_hwmod = { static struct omap_hwmod omap3xxx_iva_hwmod = {
@ -112,6 +112,15 @@ static struct omap_hwmod omap3xxx_iva_hwmod = {
.rst_lines = omap3xxx_iva_resets, .rst_lines = omap3xxx_iva_resets,
.rst_lines_cnt = ARRAY_SIZE(omap3xxx_iva_resets), .rst_lines_cnt = ARRAY_SIZE(omap3xxx_iva_resets),
.main_clk = "iva2_ck", .main_clk = "iva2_ck",
.prcm = {
.omap2 = {
.module_offs = OMAP3430_IVA2_MOD,
.prcm_reg_id = 1,
.module_bit = OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_SHIFT,
.idlest_reg_id = 1,
.idlest_idle_bit = OMAP3430_ST_IVA2_SHIFT,
}
},
}; };
/* timer class */ /* timer class */

View File

@ -4210,7 +4210,7 @@ static struct omap_hwmod_ocp_if omap44xx_dsp__iva = {
}; };
/* dsp -> sl2if */ /* dsp -> sl2if */
static struct omap_hwmod_ocp_if omap44xx_dsp__sl2if = { static struct omap_hwmod_ocp_if __maybe_unused omap44xx_dsp__sl2if = {
.master = &omap44xx_dsp_hwmod, .master = &omap44xx_dsp_hwmod,
.slave = &omap44xx_sl2if_hwmod, .slave = &omap44xx_sl2if_hwmod,
.clk = "dpll_iva_m5x2_ck", .clk = "dpll_iva_m5x2_ck",
@ -4828,7 +4828,7 @@ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__iss = {
}; };
/* iva -> sl2if */ /* iva -> sl2if */
static struct omap_hwmod_ocp_if omap44xx_iva__sl2if = { static struct omap_hwmod_ocp_if __maybe_unused omap44xx_iva__sl2if = {
.master = &omap44xx_iva_hwmod, .master = &omap44xx_iva_hwmod,
.slave = &omap44xx_sl2if_hwmod, .slave = &omap44xx_sl2if_hwmod,
.clk = "dpll_iva_m5x2_ck", .clk = "dpll_iva_m5x2_ck",
@ -5362,7 +5362,7 @@ static struct omap_hwmod_ocp_if omap44xx_l4_wkup__scrm = {
}; };
/* l3_main_2 -> sl2if */ /* l3_main_2 -> sl2if */
static struct omap_hwmod_ocp_if omap44xx_l3_main_2__sl2if = { static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l3_main_2__sl2if = {
.master = &omap44xx_l3_main_2_hwmod, .master = &omap44xx_l3_main_2_hwmod,
.slave = &omap44xx_sl2if_hwmod, .slave = &omap44xx_sl2if_hwmod,
.clk = "l3_div_ck", .clk = "l3_div_ck",
@ -6032,7 +6032,7 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
&omap44xx_l4_abe__dmic, &omap44xx_l4_abe__dmic,
&omap44xx_l4_abe__dmic_dma, &omap44xx_l4_abe__dmic_dma,
&omap44xx_dsp__iva, &omap44xx_dsp__iva,
&omap44xx_dsp__sl2if, /* &omap44xx_dsp__sl2if, */
&omap44xx_l4_cfg__dsp, &omap44xx_l4_cfg__dsp,
&omap44xx_l3_main_2__dss, &omap44xx_l3_main_2__dss,
&omap44xx_l4_per__dss, &omap44xx_l4_per__dss,
@ -6068,7 +6068,7 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
&omap44xx_l4_per__i2c4, &omap44xx_l4_per__i2c4,
&omap44xx_l3_main_2__ipu, &omap44xx_l3_main_2__ipu,
&omap44xx_l3_main_2__iss, &omap44xx_l3_main_2__iss,
&omap44xx_iva__sl2if, /* &omap44xx_iva__sl2if, */
&omap44xx_l3_main_2__iva, &omap44xx_l3_main_2__iva,
&omap44xx_l4_wkup__kbd, &omap44xx_l4_wkup__kbd,
&omap44xx_l4_cfg__mailbox, &omap44xx_l4_cfg__mailbox,
@ -6099,7 +6099,7 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
&omap44xx_l4_cfg__cm_core, &omap44xx_l4_cfg__cm_core,
&omap44xx_l4_wkup__prm, &omap44xx_l4_wkup__prm,
&omap44xx_l4_wkup__scrm, &omap44xx_l4_wkup__scrm,
&omap44xx_l3_main_2__sl2if, /* &omap44xx_l3_main_2__sl2if, */
&omap44xx_l4_abe__slimbus1, &omap44xx_l4_abe__slimbus1,
&omap44xx_l4_abe__slimbus1_dma, &omap44xx_l4_abe__slimbus1_dma,
&omap44xx_l4_per__slimbus2, &omap44xx_l4_per__slimbus2,

View File

@ -260,6 +260,7 @@ static u32 notrace dmtimer_read_sched_clock(void)
return 0; return 0;
} }
#ifdef CONFIG_OMAP_32K_TIMER
/* Setup free-running counter for clocksource */ /* Setup free-running counter for clocksource */
static int __init omap2_sync32k_clocksource_init(void) static int __init omap2_sync32k_clocksource_init(void)
{ {
@ -299,6 +300,12 @@ static int __init omap2_sync32k_clocksource_init(void)
return ret; return ret;
} }
#else
static inline int omap2_sync32k_clocksource_init(void)
{
return -ENODEV;
}
#endif
static void __init omap2_gptimer_clocksource_init(int gptimer_id, static void __init omap2_gptimer_clocksource_init(int gptimer_id,
const char *fck_source) const char *fck_source)

View File

@ -346,11 +346,11 @@ static struct resource sh_mmcif_resources[] = {
.flags = IORESOURCE_MEM, .flags = IORESOURCE_MEM,
}, },
[1] = { [1] = {
.start = gic_spi(141), .start = gic_spi(140),
.flags = IORESOURCE_IRQ, .flags = IORESOURCE_IRQ,
}, },
[2] = { [2] = {
.start = gic_spi(140), .start = gic_spi(141),
.flags = IORESOURCE_IRQ, .flags = IORESOURCE_IRQ,
}, },
}; };

View File

@ -63,10 +63,11 @@ static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd,
pid = task_pid_nr(thread->task) << ASID_BITS; pid = task_pid_nr(thread->task) << ASID_BITS;
asm volatile( asm volatile(
" mrc p15, 0, %0, c13, c0, 1\n" " mrc p15, 0, %0, c13, c0, 1\n"
" bfi %1, %0, #0, %2\n" " and %0, %0, %2\n"
" mcr p15, 0, %1, c13, c0, 1\n" " orr %0, %0, %1\n"
" mcr p15, 0, %0, c13, c0, 1\n"
: "=r" (contextidr), "+r" (pid) : "=r" (contextidr), "+r" (pid)
: "I" (ASID_BITS)); : "I" (~ASID_MASK));
isb(); isb();
return NOTIFY_OK; return NOTIFY_OK;

View File

@ -267,17 +267,19 @@ static void __dma_free_remap(void *cpu_addr, size_t size)
vunmap(cpu_addr); vunmap(cpu_addr);
} }
#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
struct dma_pool { struct dma_pool {
size_t size; size_t size;
spinlock_t lock; spinlock_t lock;
unsigned long *bitmap; unsigned long *bitmap;
unsigned long nr_pages; unsigned long nr_pages;
void *vaddr; void *vaddr;
struct page *page; struct page **pages;
}; };
static struct dma_pool atomic_pool = { static struct dma_pool atomic_pool = {
.size = SZ_256K, .size = DEFAULT_DMA_COHERENT_POOL_SIZE,
}; };
static int __init early_coherent_pool(char *p) static int __init early_coherent_pool(char *p)
@ -287,6 +289,21 @@ static int __init early_coherent_pool(char *p)
} }
early_param("coherent_pool", early_coherent_pool); early_param("coherent_pool", early_coherent_pool);
void __init init_dma_coherent_pool_size(unsigned long size)
{
/*
* Catch any attempt to set the pool size too late.
*/
BUG_ON(atomic_pool.vaddr);
/*
* Set architecture specific coherent pool size only if
* it has not been changed by kernel command line parameter.
*/
if (atomic_pool.size == DEFAULT_DMA_COHERENT_POOL_SIZE)
atomic_pool.size = size;
}
/* /*
* Initialise the coherent pool for atomic allocations. * Initialise the coherent pool for atomic allocations.
*/ */
@ -297,6 +314,7 @@ static int __init atomic_pool_init(void)
unsigned long nr_pages = pool->size >> PAGE_SHIFT; unsigned long nr_pages = pool->size >> PAGE_SHIFT;
unsigned long *bitmap; unsigned long *bitmap;
struct page *page; struct page *page;
struct page **pages;
void *ptr; void *ptr;
int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long); int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long);
@ -304,21 +322,31 @@ static int __init atomic_pool_init(void)
if (!bitmap) if (!bitmap)
goto no_bitmap; goto no_bitmap;
pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
if (!pages)
goto no_pages;
if (IS_ENABLED(CONFIG_CMA)) if (IS_ENABLED(CONFIG_CMA))
ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page); ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page);
else else
ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot, ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot,
&page, NULL); &page, NULL);
if (ptr) { if (ptr) {
int i;
for (i = 0; i < nr_pages; i++)
pages[i] = page + i;
spin_lock_init(&pool->lock); spin_lock_init(&pool->lock);
pool->vaddr = ptr; pool->vaddr = ptr;
pool->page = page; pool->pages = pages;
pool->bitmap = bitmap; pool->bitmap = bitmap;
pool->nr_pages = nr_pages; pool->nr_pages = nr_pages;
pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n", pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n",
(unsigned)pool->size / 1024); (unsigned)pool->size / 1024);
return 0; return 0;
} }
no_pages:
kfree(bitmap); kfree(bitmap);
no_bitmap: no_bitmap:
pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n", pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
@ -443,27 +471,45 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page)
if (pageno < pool->nr_pages) { if (pageno < pool->nr_pages) {
bitmap_set(pool->bitmap, pageno, count); bitmap_set(pool->bitmap, pageno, count);
ptr = pool->vaddr + PAGE_SIZE * pageno; ptr = pool->vaddr + PAGE_SIZE * pageno;
*ret_page = pool->page + pageno; *ret_page = pool->pages[pageno];
} else {
pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n"
"Please increase it with coherent_pool= kernel parameter!\n",
(unsigned)pool->size / 1024);
} }
spin_unlock_irqrestore(&pool->lock, flags); spin_unlock_irqrestore(&pool->lock, flags);
return ptr; return ptr;
} }
static bool __in_atomic_pool(void *start, size_t size)
{
struct dma_pool *pool = &atomic_pool;
void *end = start + size;
void *pool_start = pool->vaddr;
void *pool_end = pool->vaddr + pool->size;
if (start < pool_start || start >= pool_end)
return false;
if (end <= pool_end)
return true;
WARN(1, "Wrong coherent size(%p-%p) from atomic pool(%p-%p)\n",
start, end - 1, pool_start, pool_end - 1);
return false;
}
static int __free_from_pool(void *start, size_t size) static int __free_from_pool(void *start, size_t size)
{ {
struct dma_pool *pool = &atomic_pool; struct dma_pool *pool = &atomic_pool;
unsigned long pageno, count; unsigned long pageno, count;
unsigned long flags; unsigned long flags;
if (start < pool->vaddr || start > pool->vaddr + pool->size) if (!__in_atomic_pool(start, size))
return 0; return 0;
if (start + size > pool->vaddr + pool->size) {
WARN(1, "freeing wrong coherent size from pool\n");
return 0;
}
pageno = (start - pool->vaddr) >> PAGE_SHIFT; pageno = (start - pool->vaddr) >> PAGE_SHIFT;
count = size >> PAGE_SHIFT; count = size >> PAGE_SHIFT;
@ -1090,10 +1136,22 @@ static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t si
return 0; return 0;
} }
static struct page **__atomic_get_pages(void *addr)
{
struct dma_pool *pool = &atomic_pool;
struct page **pages = pool->pages;
int offs = (addr - pool->vaddr) >> PAGE_SHIFT;
return pages + offs;
}
static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs) static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
{ {
struct vm_struct *area; struct vm_struct *area;
if (__in_atomic_pool(cpu_addr, PAGE_SIZE))
return __atomic_get_pages(cpu_addr);
if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
return cpu_addr; return cpu_addr;
@ -1103,6 +1161,34 @@ static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
return NULL; return NULL;
} }
static void *__iommu_alloc_atomic(struct device *dev, size_t size,
dma_addr_t *handle)
{
struct page *page;
void *addr;
addr = __alloc_from_pool(size, &page);
if (!addr)
return NULL;
*handle = __iommu_create_mapping(dev, &page, size);
if (*handle == DMA_ERROR_CODE)
goto err_mapping;
return addr;
err_mapping:
__free_from_pool(addr, size);
return NULL;
}
static void __iommu_free_atomic(struct device *dev, struct page **pages,
dma_addr_t handle, size_t size)
{
__iommu_remove_mapping(dev, handle, size);
__free_from_pool(page_address(pages[0]), size);
}
static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
{ {
@ -1113,6 +1199,9 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
*handle = DMA_ERROR_CODE; *handle = DMA_ERROR_CODE;
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
if (gfp & GFP_ATOMIC)
return __iommu_alloc_atomic(dev, size, handle);
pages = __iommu_alloc_buffer(dev, size, gfp); pages = __iommu_alloc_buffer(dev, size, gfp);
if (!pages) if (!pages)
return NULL; return NULL;
@ -1179,6 +1268,11 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
return; return;
} }
if (__in_atomic_pool(cpu_addr, size)) {
__iommu_free_atomic(dev, pages, handle, size);
return;
}
if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) { if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) {
unmap_kernel_range((unsigned long)cpu_addr, size); unmap_kernel_range((unsigned long)cpu_addr, size);
vunmap(cpu_addr); vunmap(cpu_addr);

View File

@ -55,6 +55,9 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
/* permanent static mappings from iotable_init() */ /* permanent static mappings from iotable_init() */
#define VM_ARM_STATIC_MAPPING 0x40000000 #define VM_ARM_STATIC_MAPPING 0x40000000
/* empty mapping */
#define VM_ARM_EMPTY_MAPPING 0x20000000
/* mapping type (attributes) for permanent static mappings */ /* mapping type (attributes) for permanent static mappings */
#define VM_ARM_MTYPE(mt) ((mt) << 20) #define VM_ARM_MTYPE(mt) ((mt) << 20)
#define VM_ARM_MTYPE_MASK (0x1f << 20) #define VM_ARM_MTYPE_MASK (0x1f << 20)

View File

@ -807,7 +807,7 @@ static void __init pmd_empty_section_gap(unsigned long addr)
vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm)); vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
vm->addr = (void *)addr; vm->addr = (void *)addr;
vm->size = SECTION_SIZE; vm->size = SECTION_SIZE;
vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
vm->caller = pmd_empty_section_gap; vm->caller = pmd_empty_section_gap;
vm_area_add_early(vm); vm_area_add_early(vm);
} }
@ -820,7 +820,7 @@ static void __init fill_pmd_gaps(void)
/* we're still single threaded hence no lock needed here */ /* we're still single threaded hence no lock needed here */
for (vm = vmlist; vm; vm = vm->next) { for (vm = vmlist; vm; vm = vm->next) {
if (!(vm->flags & VM_ARM_STATIC_MAPPING)) if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING)))
continue; continue;
addr = (unsigned long)vm->addr; addr = (unsigned long)vm->addr;
if (addr < next) if (addr < next)
@ -961,8 +961,8 @@ void __init sanity_check_meminfo(void)
* Check whether this memory bank would partially overlap * Check whether this memory bank would partially overlap
* the vmalloc area. * the vmalloc area.
*/ */
if (__va(bank->start + bank->size) > vmalloc_min || if (__va(bank->start + bank->size - 1) >= vmalloc_min ||
__va(bank->start + bank->size) < __va(bank->start)) { __va(bank->start + bank->size - 1) <= __va(bank->start)) {
unsigned long newsize = vmalloc_min - __va(bank->start); unsigned long newsize = vmalloc_min - __va(bank->start);
printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx " printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx "
"to -%.8llx (vmalloc region overlap).\n", "to -%.8llx (vmalloc region overlap).\n",

View File

@ -98,6 +98,7 @@
#define MX25_INT_UART1 (NR_IRQS_LEGACY + 45) #define MX25_INT_UART1 (NR_IRQS_LEGACY + 45)
#define MX25_INT_GPIO2 (NR_IRQS_LEGACY + 51) #define MX25_INT_GPIO2 (NR_IRQS_LEGACY + 51)
#define MX25_INT_GPIO1 (NR_IRQS_LEGACY + 52) #define MX25_INT_GPIO1 (NR_IRQS_LEGACY + 52)
#define MX25_INT_GPT1 (NR_IRQS_LEGACY + 54)
#define MX25_INT_FEC (NR_IRQS_LEGACY + 57) #define MX25_INT_FEC (NR_IRQS_LEGACY + 57)
#define MX25_DMA_REQ_SSI2_RX1 22 #define MX25_DMA_REQ_SSI2_RX1 22

View File

@ -68,6 +68,7 @@
static unsigned long omap_sram_start; static unsigned long omap_sram_start;
static void __iomem *omap_sram_base; static void __iomem *omap_sram_base;
static unsigned long omap_sram_skip;
static unsigned long omap_sram_size; static unsigned long omap_sram_size;
static void __iomem *omap_sram_ceil; static void __iomem *omap_sram_ceil;
@ -106,6 +107,7 @@ static int is_sram_locked(void)
*/ */
static void __init omap_detect_sram(void) static void __init omap_detect_sram(void)
{ {
omap_sram_skip = SRAM_BOOTLOADER_SZ;
if (cpu_class_is_omap2()) { if (cpu_class_is_omap2()) {
if (is_sram_locked()) { if (is_sram_locked()) {
if (cpu_is_omap34xx()) { if (cpu_is_omap34xx()) {
@ -113,6 +115,7 @@ static void __init omap_detect_sram(void)
if ((omap_type() == OMAP2_DEVICE_TYPE_EMU) || if ((omap_type() == OMAP2_DEVICE_TYPE_EMU) ||
(omap_type() == OMAP2_DEVICE_TYPE_SEC)) { (omap_type() == OMAP2_DEVICE_TYPE_SEC)) {
omap_sram_size = 0x7000; /* 28K */ omap_sram_size = 0x7000; /* 28K */
omap_sram_skip += SZ_16K;
} else { } else {
omap_sram_size = 0x8000; /* 32K */ omap_sram_size = 0x8000; /* 32K */
} }
@ -175,8 +178,10 @@ static void __init omap_map_sram(void)
return; return;
#ifdef CONFIG_OMAP4_ERRATA_I688 #ifdef CONFIG_OMAP4_ERRATA_I688
if (cpu_is_omap44xx()) {
omap_sram_start += PAGE_SIZE; omap_sram_start += PAGE_SIZE;
omap_sram_size -= SZ_16K; omap_sram_size -= SZ_16K;
}
#endif #endif
if (cpu_is_omap34xx()) { if (cpu_is_omap34xx()) {
/* /*
@ -203,8 +208,8 @@ static void __init omap_map_sram(void)
* Looks like we need to preserve some bootloader code at the * Looks like we need to preserve some bootloader code at the
* beginning of SRAM for jumping to flash for reboot to work... * beginning of SRAM for jumping to flash for reboot to work...
*/ */
memset_io(omap_sram_base + SRAM_BOOTLOADER_SZ, 0, memset_io(omap_sram_base + omap_sram_skip, 0,
omap_sram_size - SRAM_BOOTLOADER_SZ); omap_sram_size - omap_sram_skip);
} }
/* /*
@ -218,7 +223,7 @@ void *omap_sram_push_address(unsigned long size)
{ {
unsigned long available, new_ceil = (unsigned long)omap_sram_ceil; unsigned long available, new_ceil = (unsigned long)omap_sram_ceil;
available = omap_sram_ceil - (omap_sram_base + SRAM_BOOTLOADER_SZ); available = omap_sram_ceil - (omap_sram_base + omap_sram_skip);
if (size > available) { if (size > available) {
pr_err("Not enough space in SRAM\n"); pr_err("Not enough space in SRAM\n");

View File

@ -144,6 +144,7 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
int clk_set_rate(struct clk *clk, unsigned long rate) int clk_set_rate(struct clk *clk, unsigned long rate)
{ {
unsigned long flags;
int ret; int ret;
if (IS_ERR(clk)) if (IS_ERR(clk))
@ -159,9 +160,9 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
if (clk->ops == NULL || clk->ops->set_rate == NULL) if (clk->ops == NULL || clk->ops->set_rate == NULL)
return -EINVAL; return -EINVAL;
spin_lock(&clocks_lock); spin_lock_irqsave(&clocks_lock, flags);
ret = (clk->ops->set_rate)(clk, rate); ret = (clk->ops->set_rate)(clk, rate);
spin_unlock(&clocks_lock); spin_unlock_irqrestore(&clocks_lock, flags);
return ret; return ret;
} }
@ -173,17 +174,18 @@ struct clk *clk_get_parent(struct clk *clk)
int clk_set_parent(struct clk *clk, struct clk *parent) int clk_set_parent(struct clk *clk, struct clk *parent)
{ {
unsigned long flags;
int ret = 0; int ret = 0;
if (IS_ERR(clk)) if (IS_ERR(clk))
return -EINVAL; return -EINVAL;
spin_lock(&clocks_lock); spin_lock_irqsave(&clocks_lock, flags);
if (clk->ops && clk->ops->set_parent) if (clk->ops && clk->ops->set_parent)
ret = (clk->ops->set_parent)(clk, parent); ret = (clk->ops->set_parent)(clk, parent);
spin_unlock(&clocks_lock); spin_unlock_irqrestore(&clocks_lock, flags);
return ret; return ret;
} }

View File

@ -38,6 +38,7 @@ config BLACKFIN
select GENERIC_ATOMIC64 select GENERIC_ATOMIC64
select GENERIC_IRQ_PROBE select GENERIC_IRQ_PROBE
select IRQ_PER_CPU if SMP select IRQ_PER_CPU if SMP
select USE_GENERIC_SMP_HELPERS if SMP
select HAVE_NMI_WATCHDOG if NMI_WATCHDOG select HAVE_NMI_WATCHDOG if NMI_WATCHDOG
select GENERIC_SMP_IDLE_THREAD select GENERIC_SMP_IDLE_THREAD
select ARCH_USES_GETTIMEOFFSET if !GENERIC_CLOCKEVENTS select ARCH_USES_GETTIMEOFFSET if !GENERIC_CLOCKEVENTS

View File

@ -20,7 +20,6 @@ endif
KBUILD_AFLAGS += $(call cc-option,-mno-fdpic) KBUILD_AFLAGS += $(call cc-option,-mno-fdpic)
KBUILD_CFLAGS_MODULE += -mlong-calls KBUILD_CFLAGS_MODULE += -mlong-calls
LDFLAGS += -m elf32bfin LDFLAGS += -m elf32bfin
KALLSYMS += --symbol-prefix=_
KBUILD_DEFCONFIG := BF537-STAMP_defconfig KBUILD_DEFCONFIG := BF537-STAMP_defconfig

View File

@ -18,6 +18,8 @@
#define raw_smp_processor_id() blackfin_core_id() #define raw_smp_processor_id() blackfin_core_id()
extern void bfin_relocate_coreb_l1_mem(void); extern void bfin_relocate_coreb_l1_mem(void);
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
#if defined(CONFIG_SMP) && defined(CONFIG_ICACHE_FLUSH_L1) #if defined(CONFIG_SMP) && defined(CONFIG_ICACHE_FLUSH_L1)
asmlinkage void blackfin_icache_flush_range_l1(unsigned long *ptr); asmlinkage void blackfin_icache_flush_range_l1(unsigned long *ptr);

View File

@ -48,10 +48,13 @@ unsigned long blackfin_iflush_l1_entry[NR_CPUS];
struct blackfin_initial_pda __cpuinitdata initial_pda_coreb; struct blackfin_initial_pda __cpuinitdata initial_pda_coreb;
#define BFIN_IPI_TIMER 0 enum ipi_message_type {
#define BFIN_IPI_RESCHEDULE 1 BFIN_IPI_TIMER,
#define BFIN_IPI_CALL_FUNC 2 BFIN_IPI_RESCHEDULE,
#define BFIN_IPI_CPU_STOP 3 BFIN_IPI_CALL_FUNC,
BFIN_IPI_CALL_FUNC_SINGLE,
BFIN_IPI_CPU_STOP,
};
struct blackfin_flush_data { struct blackfin_flush_data {
unsigned long start; unsigned long start;
@ -60,35 +63,20 @@ struct blackfin_flush_data {
void *secondary_stack; void *secondary_stack;
struct smp_call_struct {
void (*func)(void *info);
void *info;
int wait;
cpumask_t *waitmask;
};
static struct blackfin_flush_data smp_flush_data; static struct blackfin_flush_data smp_flush_data;
static DEFINE_SPINLOCK(stop_lock); static DEFINE_SPINLOCK(stop_lock);
struct ipi_message {
unsigned long type;
struct smp_call_struct call_struct;
};
/* A magic number - stress test shows this is safe for common cases */ /* A magic number - stress test shows this is safe for common cases */
#define BFIN_IPI_MSGQ_LEN 5 #define BFIN_IPI_MSGQ_LEN 5
/* Simple FIFO buffer, overflow leads to panic */ /* Simple FIFO buffer, overflow leads to panic */
struct ipi_message_queue { struct ipi_data {
spinlock_t lock;
unsigned long count; unsigned long count;
unsigned long head; /* head of the queue */ unsigned long bits;
struct ipi_message ipi_message[BFIN_IPI_MSGQ_LEN];
}; };
static DEFINE_PER_CPU(struct ipi_message_queue, ipi_msg_queue); static DEFINE_PER_CPU(struct ipi_data, bfin_ipi);
static void ipi_cpu_stop(unsigned int cpu) static void ipi_cpu_stop(unsigned int cpu)
{ {
@ -129,28 +117,6 @@ static void ipi_flush_icache(void *info)
blackfin_icache_flush_range(fdata->start, fdata->end); blackfin_icache_flush_range(fdata->start, fdata->end);
} }
static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
{
int wait;
void (*func)(void *info);
void *info;
func = msg->call_struct.func;
info = msg->call_struct.info;
wait = msg->call_struct.wait;
func(info);
if (wait) {
#ifdef __ARCH_SYNC_CORE_DCACHE
/*
* 'wait' usually means synchronization between CPUs.
* Invalidate D cache in case shared data was changed
* by func() to ensure cache coherence.
*/
resync_core_dcache();
#endif
cpumask_clear_cpu(cpu, msg->call_struct.waitmask);
}
}
/* Use IRQ_SUPPLE_0 to request reschedule. /* Use IRQ_SUPPLE_0 to request reschedule.
* When returning from interrupt to user space, * When returning from interrupt to user space,
* there is chance to reschedule */ * there is chance to reschedule */
@ -172,152 +138,95 @@ void ipi_timer(void)
static irqreturn_t ipi_handler_int1(int irq, void *dev_instance) static irqreturn_t ipi_handler_int1(int irq, void *dev_instance)
{ {
struct ipi_message *msg; struct ipi_data *bfin_ipi_data;
struct ipi_message_queue *msg_queue;
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
unsigned long flags; unsigned long pending;
unsigned long msg;
platform_clear_ipi(cpu, IRQ_SUPPLE_1); platform_clear_ipi(cpu, IRQ_SUPPLE_1);
msg_queue = &__get_cpu_var(ipi_msg_queue); bfin_ipi_data = &__get_cpu_var(bfin_ipi);
spin_lock_irqsave(&msg_queue->lock, flags); while ((pending = xchg(&bfin_ipi_data->bits, 0)) != 0) {
msg = 0;
do {
msg = find_next_bit(&pending, BITS_PER_LONG, msg + 1);
switch (msg) {
case BFIN_IPI_TIMER:
ipi_timer();
break;
case BFIN_IPI_RESCHEDULE:
scheduler_ipi();
break;
case BFIN_IPI_CALL_FUNC:
generic_smp_call_function_interrupt();
break;
while (msg_queue->count) { case BFIN_IPI_CALL_FUNC_SINGLE:
msg = &msg_queue->ipi_message[msg_queue->head]; generic_smp_call_function_single_interrupt();
switch (msg->type) { break;
case BFIN_IPI_TIMER:
ipi_timer(); case BFIN_IPI_CPU_STOP:
break; ipi_cpu_stop(cpu);
case BFIN_IPI_RESCHEDULE: break;
scheduler_ipi(); }
break; } while (msg < BITS_PER_LONG);
case BFIN_IPI_CALL_FUNC:
ipi_call_function(cpu, msg); smp_mb();
break;
case BFIN_IPI_CPU_STOP:
ipi_cpu_stop(cpu);
break;
default:
printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%lx\n",
cpu, msg->type);
break;
}
msg_queue->head++;
msg_queue->head %= BFIN_IPI_MSGQ_LEN;
msg_queue->count--;
} }
spin_unlock_irqrestore(&msg_queue->lock, flags);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static void ipi_queue_init(void) static void bfin_ipi_init(void)
{ {
unsigned int cpu; unsigned int cpu;
struct ipi_message_queue *msg_queue; struct ipi_data *bfin_ipi_data;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
msg_queue = &per_cpu(ipi_msg_queue, cpu); bfin_ipi_data = &per_cpu(bfin_ipi, cpu);
spin_lock_init(&msg_queue->lock); bfin_ipi_data->bits = 0;
msg_queue->count = 0; bfin_ipi_data->count = 0;
msg_queue->head = 0;
} }
} }
static inline void smp_send_message(cpumask_t callmap, unsigned long type, void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg)
void (*func) (void *info), void *info, int wait)
{ {
unsigned int cpu; unsigned int cpu;
struct ipi_message_queue *msg_queue; struct ipi_data *bfin_ipi_data;
struct ipi_message *msg; unsigned long flags;
unsigned long flags, next_msg;
cpumask_t waitmask; /* waitmask is shared by all cpus */
cpumask_copy(&waitmask, &callmap); local_irq_save(flags);
for_each_cpu(cpu, &callmap) {
msg_queue = &per_cpu(ipi_msg_queue, cpu); for_each_cpu(cpu, cpumask) {
spin_lock_irqsave(&msg_queue->lock, flags); bfin_ipi_data = &per_cpu(bfin_ipi, cpu);
if (msg_queue->count < BFIN_IPI_MSGQ_LEN) { smp_mb();
next_msg = (msg_queue->head + msg_queue->count) set_bit(msg, &bfin_ipi_data->bits);
% BFIN_IPI_MSGQ_LEN; bfin_ipi_data->count++;
msg = &msg_queue->ipi_message[next_msg];
msg->type = type;
if (type == BFIN_IPI_CALL_FUNC) {
msg->call_struct.func = func;
msg->call_struct.info = info;
msg->call_struct.wait = wait;
msg->call_struct.waitmask = &waitmask;
}
msg_queue->count++;
} else
panic("IPI message queue overflow\n");
spin_unlock_irqrestore(&msg_queue->lock, flags);
platform_send_ipi_cpu(cpu, IRQ_SUPPLE_1); platform_send_ipi_cpu(cpu, IRQ_SUPPLE_1);
} }
if (wait) { local_irq_restore(flags);
while (!cpumask_empty(&waitmask))
blackfin_dcache_invalidate_range(
(unsigned long)(&waitmask),
(unsigned long)(&waitmask));
#ifdef __ARCH_SYNC_CORE_DCACHE
/*
* Invalidate D cache in case shared data was changed by
* other processors to ensure cache coherence.
*/
resync_core_dcache();
#endif
}
} }
int smp_call_function(void (*func)(void *info), void *info, int wait) void arch_send_call_function_single_ipi(int cpu)
{ {
cpumask_t callmap; send_ipi(cpumask_of(cpu), BFIN_IPI_CALL_FUNC_SINGLE);
preempt_disable();
cpumask_copy(&callmap, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &callmap);
if (!cpumask_empty(&callmap))
smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
preempt_enable();
return 0;
} }
EXPORT_SYMBOL_GPL(smp_call_function);
int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, void arch_send_call_function_ipi_mask(const struct cpumask *mask)
int wait)
{ {
unsigned int cpu = cpuid; send_ipi(mask, BFIN_IPI_CALL_FUNC);
cpumask_t callmap;
if (cpu_is_offline(cpu))
return 0;
cpumask_clear(&callmap);
cpumask_set_cpu(cpu, &callmap);
smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
return 0;
} }
EXPORT_SYMBOL_GPL(smp_call_function_single);
void smp_send_reschedule(int cpu) void smp_send_reschedule(int cpu)
{ {
cpumask_t callmap; send_ipi(cpumask_of(cpu), BFIN_IPI_RESCHEDULE);
/* simply trigger an ipi */
cpumask_clear(&callmap);
cpumask_set_cpu(cpu, &callmap);
smp_send_message(callmap, BFIN_IPI_RESCHEDULE, NULL, NULL, 0);
return; return;
} }
void smp_send_msg(const struct cpumask *mask, unsigned long type) void smp_send_msg(const struct cpumask *mask, unsigned long type)
{ {
smp_send_message(*mask, type, NULL, NULL, 0); send_ipi(mask, type);
} }
void smp_timer_broadcast(const struct cpumask *mask) void smp_timer_broadcast(const struct cpumask *mask)
@ -333,7 +242,7 @@ void smp_send_stop(void)
cpumask_copy(&callmap, cpu_online_mask); cpumask_copy(&callmap, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &callmap); cpumask_clear_cpu(smp_processor_id(), &callmap);
if (!cpumask_empty(&callmap)) if (!cpumask_empty(&callmap))
smp_send_message(callmap, BFIN_IPI_CPU_STOP, NULL, NULL, 0); send_ipi(&callmap, BFIN_IPI_CPU_STOP);
preempt_enable(); preempt_enable();
@ -436,7 +345,7 @@ void __init smp_prepare_boot_cpu(void)
void __init smp_prepare_cpus(unsigned int max_cpus) void __init smp_prepare_cpus(unsigned int max_cpus)
{ {
platform_prepare_cpus(max_cpus); platform_prepare_cpus(max_cpus);
ipi_queue_init(); bfin_ipi_init();
platform_request_ipi(IRQ_SUPPLE_0, ipi_handler_int0); platform_request_ipi(IRQ_SUPPLE_0, ipi_handler_int0);
platform_request_ipi(IRQ_SUPPLE_1, ipi_handler_int1); platform_request_ipi(IRQ_SUPPLE_1, ipi_handler_int1);
} }

View File

@ -66,16 +66,6 @@ static inline pte_t huge_ptep_get(pte_t *ptep)
return pte; return pte;
} }
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
pte_t pte = huge_ptep_get(ptep);
mm->context.flush_mm = 1;
pmd_clear((pmd_t *) ptep);
return pte;
}
static inline void __pmd_csp(pmd_t *pmdp) static inline void __pmd_csp(pmd_t *pmdp)
{ {
register unsigned long reg2 asm("2") = pmd_val(*pmdp); register unsigned long reg2 asm("2") = pmd_val(*pmdp);
@ -117,6 +107,15 @@ static inline void huge_ptep_invalidate(struct mm_struct *mm,
__pmd_csp(pmdp); __pmd_csp(pmdp);
} }
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
pte_t pte = huge_ptep_get(ptep);
huge_ptep_invalidate(mm, addr, ptep);
return pte;
}
#define huge_ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \ #define huge_ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \
({ \ ({ \
int __changed = !pte_same(huge_ptep_get(__ptep), __entry); \ int __changed = !pte_same(huge_ptep_get(__ptep), __entry); \
@ -131,10 +130,7 @@ static inline void huge_ptep_invalidate(struct mm_struct *mm,
({ \ ({ \
pte_t __pte = huge_ptep_get(__ptep); \ pte_t __pte = huge_ptep_get(__ptep); \
if (pte_write(__pte)) { \ if (pte_write(__pte)) { \
(__mm)->context.flush_mm = 1; \ huge_ptep_invalidate(__mm, __addr, __ptep); \
if (atomic_read(&(__mm)->context.attach_count) > 1 || \
(__mm) != current->active_mm) \
huge_ptep_invalidate(__mm, __addr, __ptep); \
set_huge_pte_at(__mm, __addr, __ptep, \ set_huge_pte_at(__mm, __addr, __ptep, \
huge_pte_wrprotect(__pte)); \ huge_pte_wrprotect(__pte)); \
} \ } \

View File

@ -90,12 +90,10 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
static inline void __tlb_flush_mm_cond(struct mm_struct * mm) static inline void __tlb_flush_mm_cond(struct mm_struct * mm)
{ {
spin_lock(&mm->page_table_lock);
if (mm->context.flush_mm) { if (mm->context.flush_mm) {
__tlb_flush_mm(mm); __tlb_flush_mm(mm);
mm->context.flush_mm = 0; mm->context.flush_mm = 0;
} }
spin_unlock(&mm->page_table_lock);
} }
/* /*

View File

@ -974,11 +974,13 @@ static void __init setup_hwcaps(void)
if (MACHINE_HAS_HPAGE) if (MACHINE_HAS_HPAGE)
elf_hwcap |= HWCAP_S390_HPAGE; elf_hwcap |= HWCAP_S390_HPAGE;
#if defined(CONFIG_64BIT)
/* /*
* 64-bit register support for 31-bit processes * 64-bit register support for 31-bit processes
* HWCAP_S390_HIGH_GPRS is bit 9. * HWCAP_S390_HIGH_GPRS is bit 9.
*/ */
elf_hwcap |= HWCAP_S390_HIGH_GPRS; elf_hwcap |= HWCAP_S390_HIGH_GPRS;
#endif
get_cpu_id(&cpu_id); get_cpu_id(&cpu_id);
switch (cpu_id.machine) { switch (cpu_id.machine) {

View File

@ -2,69 +2,82 @@
* User access functions based on page table walks for enhanced * User access functions based on page table walks for enhanced
* system layout without hardware support. * system layout without hardware support.
* *
* Copyright IBM Corp. 2006 * Copyright IBM Corp. 2006, 2012
* Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com) * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
*/ */
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/hardirq.h> #include <linux/hardirq.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/hugetlb.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/futex.h> #include <asm/futex.h>
#include "uaccess.h" #include "uaccess.h"
static inline pte_t *follow_table(struct mm_struct *mm, unsigned long addr)
/*
* Returns kernel address for user virtual address. If the returned address is
* >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address
* contains the (negative) exception code.
*/
static __always_inline unsigned long follow_table(struct mm_struct *mm,
unsigned long addr, int write)
{ {
pgd_t *pgd; pgd_t *pgd;
pud_t *pud; pud_t *pud;
pmd_t *pmd; pmd_t *pmd;
pte_t *ptep;
pgd = pgd_offset(mm, addr); pgd = pgd_offset(mm, addr);
if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
return (pte_t *) 0x3a; return -0x3aUL;
pud = pud_offset(pgd, addr); pud = pud_offset(pgd, addr);
if (pud_none(*pud) || unlikely(pud_bad(*pud))) if (pud_none(*pud) || unlikely(pud_bad(*pud)))
return (pte_t *) 0x3b; return -0x3bUL;
pmd = pmd_offset(pud, addr); pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) if (pmd_none(*pmd))
return (pte_t *) 0x10; return -0x10UL;
if (pmd_huge(*pmd)) {
if (write && (pmd_val(*pmd) & _SEGMENT_ENTRY_RO))
return -0x04UL;
return (pmd_val(*pmd) & HPAGE_MASK) + (addr & ~HPAGE_MASK);
}
if (unlikely(pmd_bad(*pmd)))
return -0x10UL;
return pte_offset_map(pmd, addr); ptep = pte_offset_map(pmd, addr);
if (!pte_present(*ptep))
return -0x11UL;
if (write && !pte_write(*ptep))
return -0x04UL;
return (pte_val(*ptep) & PAGE_MASK) + (addr & ~PAGE_MASK);
} }
static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr, static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
size_t n, int write_user) size_t n, int write_user)
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
unsigned long offset, pfn, done, size; unsigned long offset, done, size, kaddr;
pte_t *pte;
void *from, *to; void *from, *to;
done = 0; done = 0;
retry: retry:
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
do { do {
pte = follow_table(mm, uaddr); kaddr = follow_table(mm, uaddr, write_user);
if ((unsigned long) pte < 0x1000) if (IS_ERR_VALUE(kaddr))
goto fault; goto fault;
if (!pte_present(*pte)) {
pte = (pte_t *) 0x11;
goto fault;
} else if (write_user && !pte_write(*pte)) {
pte = (pte_t *) 0x04;
goto fault;
}
pfn = pte_pfn(*pte); offset = uaddr & ~PAGE_MASK;
offset = uaddr & (PAGE_SIZE - 1);
size = min(n - done, PAGE_SIZE - offset); size = min(n - done, PAGE_SIZE - offset);
if (write_user) { if (write_user) {
to = (void *)((pfn << PAGE_SHIFT) + offset); to = (void *) kaddr;
from = kptr + done; from = kptr + done;
} else { } else {
from = (void *)((pfn << PAGE_SHIFT) + offset); from = (void *) kaddr;
to = kptr + done; to = kptr + done;
} }
memcpy(to, from, size); memcpy(to, from, size);
@ -75,7 +88,7 @@ retry:
return n - done; return n - done;
fault: fault:
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
if (__handle_fault(uaddr, (unsigned long) pte, write_user)) if (__handle_fault(uaddr, -kaddr, write_user))
return n - done; return n - done;
goto retry; goto retry;
} }
@ -84,27 +97,22 @@ fault:
* Do DAT for user address by page table walk, return kernel address. * Do DAT for user address by page table walk, return kernel address.
* This function needs to be called with current->mm->page_table_lock held. * This function needs to be called with current->mm->page_table_lock held.
*/ */
static __always_inline unsigned long __dat_user_addr(unsigned long uaddr) static __always_inline unsigned long __dat_user_addr(unsigned long uaddr,
int write)
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
unsigned long pfn; unsigned long kaddr;
pte_t *pte;
int rc; int rc;
retry: retry:
pte = follow_table(mm, uaddr); kaddr = follow_table(mm, uaddr, write);
if ((unsigned long) pte < 0x1000) if (IS_ERR_VALUE(kaddr))
goto fault; goto fault;
if (!pte_present(*pte)) {
pte = (pte_t *) 0x11;
goto fault;
}
pfn = pte_pfn(*pte); return kaddr;
return (pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE - 1));
fault: fault:
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
rc = __handle_fault(uaddr, (unsigned long) pte, 0); rc = __handle_fault(uaddr, -kaddr, write);
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
if (!rc) if (!rc)
goto retry; goto retry;
@ -159,11 +167,9 @@ static size_t clear_user_pt(size_t n, void __user *to)
static size_t strnlen_user_pt(size_t count, const char __user *src) static size_t strnlen_user_pt(size_t count, const char __user *src)
{ {
char *addr;
unsigned long uaddr = (unsigned long) src; unsigned long uaddr = (unsigned long) src;
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
unsigned long offset, pfn, done, len; unsigned long offset, done, len, kaddr;
pte_t *pte;
size_t len_str; size_t len_str;
if (segment_eq(get_fs(), KERNEL_DS)) if (segment_eq(get_fs(), KERNEL_DS))
@ -172,19 +178,13 @@ static size_t strnlen_user_pt(size_t count, const char __user *src)
retry: retry:
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
do { do {
pte = follow_table(mm, uaddr); kaddr = follow_table(mm, uaddr, 0);
if ((unsigned long) pte < 0x1000) if (IS_ERR_VALUE(kaddr))
goto fault; goto fault;
if (!pte_present(*pte)) {
pte = (pte_t *) 0x11;
goto fault;
}
pfn = pte_pfn(*pte); offset = uaddr & ~PAGE_MASK;
offset = uaddr & (PAGE_SIZE-1);
addr = (char *)(pfn << PAGE_SHIFT) + offset;
len = min(count - done, PAGE_SIZE - offset); len = min(count - done, PAGE_SIZE - offset);
len_str = strnlen(addr, len); len_str = strnlen((char *) kaddr, len);
done += len_str; done += len_str;
uaddr += len_str; uaddr += len_str;
} while ((len_str == len) && (done < count)); } while ((len_str == len) && (done < count));
@ -192,7 +192,7 @@ retry:
return done + 1; return done + 1;
fault: fault:
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
if (__handle_fault(uaddr, (unsigned long) pte, 0)) if (__handle_fault(uaddr, -kaddr, 0))
return 0; return 0;
goto retry; goto retry;
} }
@ -225,11 +225,10 @@ static size_t copy_in_user_pt(size_t n, void __user *to,
const void __user *from) const void __user *from)
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
unsigned long offset_from, offset_to, offset_max, pfn_from, pfn_to, unsigned long offset_max, uaddr, done, size, error_code;
uaddr, done, size, error_code;
unsigned long uaddr_from = (unsigned long) from; unsigned long uaddr_from = (unsigned long) from;
unsigned long uaddr_to = (unsigned long) to; unsigned long uaddr_to = (unsigned long) to;
pte_t *pte_from, *pte_to; unsigned long kaddr_to, kaddr_from;
int write_user; int write_user;
if (segment_eq(get_fs(), KERNEL_DS)) { if (segment_eq(get_fs(), KERNEL_DS)) {
@ -242,38 +241,23 @@ retry:
do { do {
write_user = 0; write_user = 0;
uaddr = uaddr_from; uaddr = uaddr_from;
pte_from = follow_table(mm, uaddr_from); kaddr_from = follow_table(mm, uaddr_from, 0);
error_code = (unsigned long) pte_from; error_code = kaddr_from;
if (error_code < 0x1000) if (IS_ERR_VALUE(error_code))
goto fault; goto fault;
if (!pte_present(*pte_from)) {
error_code = 0x11;
goto fault;
}
write_user = 1; write_user = 1;
uaddr = uaddr_to; uaddr = uaddr_to;
pte_to = follow_table(mm, uaddr_to); kaddr_to = follow_table(mm, uaddr_to, 1);
error_code = (unsigned long) pte_to; error_code = (unsigned long) kaddr_to;
if (error_code < 0x1000) if (IS_ERR_VALUE(error_code))
goto fault; goto fault;
if (!pte_present(*pte_to)) {
error_code = 0x11;
goto fault;
} else if (!pte_write(*pte_to)) {
error_code = 0x04;
goto fault;
}
pfn_from = pte_pfn(*pte_from); offset_max = max(uaddr_from & ~PAGE_MASK,
pfn_to = pte_pfn(*pte_to); uaddr_to & ~PAGE_MASK);
offset_from = uaddr_from & (PAGE_SIZE-1);
offset_to = uaddr_from & (PAGE_SIZE-1);
offset_max = max(offset_from, offset_to);
size = min(n - done, PAGE_SIZE - offset_max); size = min(n - done, PAGE_SIZE - offset_max);
memcpy((void *)(pfn_to << PAGE_SHIFT) + offset_to, memcpy((void *) kaddr_to, (void *) kaddr_from, size);
(void *)(pfn_from << PAGE_SHIFT) + offset_from, size);
done += size; done += size;
uaddr_from += size; uaddr_from += size;
uaddr_to += size; uaddr_to += size;
@ -282,7 +266,7 @@ retry:
return n - done; return n - done;
fault: fault:
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
if (__handle_fault(uaddr, error_code, write_user)) if (__handle_fault(uaddr, -error_code, write_user))
return n - done; return n - done;
goto retry; goto retry;
} }
@ -341,7 +325,7 @@ int futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
return __futex_atomic_op_pt(op, uaddr, oparg, old); return __futex_atomic_op_pt(op, uaddr, oparg, old);
spin_lock(&current->mm->page_table_lock); spin_lock(&current->mm->page_table_lock);
uaddr = (u32 __force __user *) uaddr = (u32 __force __user *)
__dat_user_addr((__force unsigned long) uaddr); __dat_user_addr((__force unsigned long) uaddr, 1);
if (!uaddr) { if (!uaddr) {
spin_unlock(&current->mm->page_table_lock); spin_unlock(&current->mm->page_table_lock);
return -EFAULT; return -EFAULT;
@ -378,7 +362,7 @@ int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval); return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
spin_lock(&current->mm->page_table_lock); spin_lock(&current->mm->page_table_lock);
uaddr = (u32 __force __user *) uaddr = (u32 __force __user *)
__dat_user_addr((__force unsigned long) uaddr); __dat_user_addr((__force unsigned long) uaddr, 1);
if (!uaddr) { if (!uaddr) {
spin_unlock(&current->mm->page_table_lock); spin_unlock(&current->mm->page_table_lock);
return -EFAULT; return -EFAULT;

View File

@ -169,7 +169,7 @@ static ssize_t hw_interval_write(struct file *file, char const __user *buf,
if (*offset) if (*offset)
return -EINVAL; return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count); retval = oprofilefs_ulong_from_user(&val, buf, count);
if (retval) if (retval <= 0)
return retval; return retval;
if (val < oprofile_min_interval) if (val < oprofile_min_interval)
oprofile_hw_interval = oprofile_min_interval; oprofile_hw_interval = oprofile_min_interval;
@ -212,7 +212,7 @@ static ssize_t hwsampler_zero_write(struct file *file, char const __user *buf,
return -EINVAL; return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count); retval = oprofilefs_ulong_from_user(&val, buf, count);
if (retval) if (retval <= 0)
return retval; return retval;
if (val != 0) if (val != 0)
return -EINVAL; return -EINVAL;
@ -243,7 +243,7 @@ static ssize_t hwsampler_kernel_write(struct file *file, char const __user *buf,
return -EINVAL; return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count); retval = oprofilefs_ulong_from_user(&val, buf, count);
if (retval) if (retval <= 0)
return retval; return retval;
if (val != 0 && val != 1) if (val != 0 && val != 1)
@ -278,7 +278,7 @@ static ssize_t hwsampler_user_write(struct file *file, char const __user *buf,
return -EINVAL; return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count); retval = oprofilefs_ulong_from_user(&val, buf, count);
if (retval) if (retval <= 0)
return retval; return retval;
if (val != 0 && val != 1) if (val != 0 && val != 1)
@ -317,7 +317,7 @@ static ssize_t timer_enabled_write(struct file *file, char const __user *buf,
return -EINVAL; return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count); retval = oprofilefs_ulong_from_user(&val, buf, count);
if (retval) if (retval <= 0)
return retval; return retval;
if (val != 0 && val != 1) if (val != 0 && val != 1)

View File

@ -933,7 +933,7 @@ ret_with_reschedule:
pta restore_all, tr1 pta restore_all, tr1
movi _TIF_SIGPENDING, r8 movi (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), r8
and r8, r7, r8 and r8, r7, r8
pta work_notifysig, tr0 pta work_notifysig, tr0
bne r8, ZERO, tr0 bne r8, ZERO, tr0

View File

@ -139,7 +139,7 @@ work_pending:
! r8: current_thread_info ! r8: current_thread_info
! t: result of "tst #_TIF_NEED_RESCHED, r0" ! t: result of "tst #_TIF_NEED_RESCHED, r0"
bf/s work_resched bf/s work_resched
tst #_TIF_SIGPENDING, r0 tst #(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME), r0
work_notifysig: work_notifysig:
bt/s __restore_all bt/s __restore_all
mov r15, r4 mov r15, r4

View File

@ -48,9 +48,7 @@ void *module_alloc(unsigned long size)
return NULL; return NULL;
ret = module_map(size); ret = module_map(size);
if (!ret) if (ret)
ret = ERR_PTR(-ENOMEM);
else
memset(ret, 0, size); memset(ret, 0, size);
return ret; return ret;
@ -116,6 +114,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
v = sym->st_value + rel[i].r_addend; v = sym->st_value + rel[i].r_addend;
switch (ELF_R_TYPE(rel[i].r_info) & 0xff) { switch (ELF_R_TYPE(rel[i].r_info) & 0xff) {
case R_SPARC_DISP32:
v -= (Elf_Addr) location;
*loc32 = v;
break;
#ifdef CONFIG_SPARC64 #ifdef CONFIG_SPARC64
case R_SPARC_64: case R_SPARC_64:
location[0] = v >> 56; location[0] = v >> 56;
@ -128,11 +130,6 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
location[7] = v >> 0; location[7] = v >> 0;
break; break;
case R_SPARC_DISP32:
v -= (Elf_Addr) location;
*loc32 = v;
break;
case R_SPARC_WDISP19: case R_SPARC_WDISP19:
v -= (Elf_Addr) location; v -= (Elf_Addr) location;
*loc32 = (*loc32 & ~0x7ffff) | *loc32 = (*loc32 & ~0x7ffff) |

View File

@ -746,10 +746,10 @@ config SWIOTLB
def_bool y if X86_64 def_bool y if X86_64
---help--- ---help---
Support for software bounce buffers used on x86-64 systems Support for software bounce buffers used on x86-64 systems
which don't have a hardware IOMMU (e.g. the current generation which don't have a hardware IOMMU. Using this PCI devices
of Intel's x86-64 CPUs). Using this PCI devices which can only which can only access 32-bits of memory can be used on systems
access 32-bits of memory can be used on systems with more than with more than 3 GB of memory.
3 GB of memory. If unsure, say Y. If unsure, say Y.
config IOMMU_HELPER config IOMMU_HELPER
def_bool (CALGARY_IOMMU || GART_IOMMU || SWIOTLB || AMD_IOMMU) def_bool (CALGARY_IOMMU || GART_IOMMU || SWIOTLB || AMD_IOMMU)

View File

@ -51,7 +51,8 @@ extern unsigned long set_phys_range_identity(unsigned long pfn_s,
extern int m2p_add_override(unsigned long mfn, struct page *page, extern int m2p_add_override(unsigned long mfn, struct page *page,
struct gnttab_map_grant_ref *kmap_op); struct gnttab_map_grant_ref *kmap_op);
extern int m2p_remove_override(struct page *page, bool clear_pte); extern int m2p_remove_override(struct page *page,
struct gnttab_map_grant_ref *kmap_op);
extern struct page *m2p_find_override(unsigned long mfn); extern struct page *m2p_find_override(unsigned long mfn);
extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn); extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);

View File

@ -586,6 +586,8 @@ extern struct event_constraint intel_westmere_pebs_event_constraints[];
extern struct event_constraint intel_snb_pebs_event_constraints[]; extern struct event_constraint intel_snb_pebs_event_constraints[];
extern struct event_constraint intel_ivb_pebs_event_constraints[];
struct event_constraint *intel_pebs_constraints(struct perf_event *event); struct event_constraint *intel_pebs_constraints(struct perf_event *event);
void intel_pmu_pebs_enable(struct perf_event *event); void intel_pmu_pebs_enable(struct perf_event *event);

View File

@ -209,6 +209,15 @@ static int perf_ibs_precise_event(struct perf_event *event, u64 *config)
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static const struct perf_event_attr ibs_notsupp = {
.exclude_user = 1,
.exclude_kernel = 1,
.exclude_hv = 1,
.exclude_idle = 1,
.exclude_host = 1,
.exclude_guest = 1,
};
static int perf_ibs_init(struct perf_event *event) static int perf_ibs_init(struct perf_event *event)
{ {
struct hw_perf_event *hwc = &event->hw; struct hw_perf_event *hwc = &event->hw;
@ -229,6 +238,9 @@ static int perf_ibs_init(struct perf_event *event)
if (event->pmu != &perf_ibs->pmu) if (event->pmu != &perf_ibs->pmu)
return -ENOENT; return -ENOENT;
if (perf_flags(&event->attr) & perf_flags(&ibs_notsupp))
return -EINVAL;
if (config & ~perf_ibs->config_mask) if (config & ~perf_ibs->config_mask)
return -EINVAL; return -EINVAL;

View File

@ -2008,6 +2008,7 @@ __init int intel_pmu_init(void)
break; break;
case 28: /* Atom */ case 28: /* Atom */
case 54: /* Cedariew */
memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
sizeof(hw_cache_event_ids)); sizeof(hw_cache_event_ids));
@ -2047,7 +2048,6 @@ __init int intel_pmu_init(void)
case 42: /* SandyBridge */ case 42: /* SandyBridge */
case 45: /* SandyBridge, "Romely-EP" */ case 45: /* SandyBridge, "Romely-EP" */
x86_add_quirk(intel_sandybridge_quirk); x86_add_quirk(intel_sandybridge_quirk);
case 58: /* IvyBridge */
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
sizeof(hw_cache_event_ids)); sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
@ -2072,6 +2072,29 @@ __init int intel_pmu_init(void)
pr_cont("SandyBridge events, "); pr_cont("SandyBridge events, ");
break; break;
case 58: /* IvyBridge */
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
sizeof(hw_cache_extra_regs));
intel_pmu_lbr_init_snb();
x86_pmu.event_constraints = intel_snb_event_constraints;
x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
x86_pmu.extra_regs = intel_snb_extra_regs;
/* all extra regs are per-cpu when HT is on */
x86_pmu.er_flags |= ERF_HAS_RSP_1;
x86_pmu.er_flags |= ERF_NO_HT_SHARING;
/* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
pr_cont("IvyBridge events, ");
break;
default: default:
switch (x86_pmu.version) { switch (x86_pmu.version) {

View File

@ -407,6 +407,20 @@ struct event_constraint intel_snb_pebs_event_constraints[] = {
EVENT_CONSTRAINT_END EVENT_CONSTRAINT_END
}; };
struct event_constraint intel_ivb_pebs_event_constraints[] = {
INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
INTEL_UEVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */
INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
EVENT_CONSTRAINT_END
};
struct event_constraint *intel_pebs_constraints(struct perf_event *event) struct event_constraint *intel_pebs_constraints(struct perf_event *event)
{ {
struct event_constraint *c; struct event_constraint *c;

View File

@ -686,7 +686,8 @@ void intel_pmu_lbr_init_atom(void)
* to have an operational LBR which can freeze * to have an operational LBR which can freeze
* on PMU interrupt * on PMU interrupt
*/ */
if (boot_cpu_data.x86_mask < 10) { if (boot_cpu_data.x86_model == 28
&& boot_cpu_data.x86_mask < 10) {
pr_cont("LBR disabled due to erratum"); pr_cont("LBR disabled due to erratum");
return; return;
} }

View File

@ -661,6 +661,11 @@ static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
} }
} }
static struct uncore_event_desc snb_uncore_events[] = {
INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
{ /* end: all zeroes */ },
};
static struct attribute *snb_uncore_formats_attr[] = { static struct attribute *snb_uncore_formats_attr[] = {
&format_attr_event.attr, &format_attr_event.attr,
&format_attr_umask.attr, &format_attr_umask.attr,
@ -704,6 +709,7 @@ static struct intel_uncore_type snb_uncore_cbox = {
.constraints = snb_uncore_cbox_constraints, .constraints = snb_uncore_cbox_constraints,
.ops = &snb_uncore_msr_ops, .ops = &snb_uncore_msr_ops,
.format_group = &snb_uncore_format_group, .format_group = &snb_uncore_format_group,
.event_descs = snb_uncore_events,
}; };
static struct intel_uncore_type *snb_msr_uncores[] = { static struct intel_uncore_type *snb_msr_uncores[] = {

View File

@ -225,6 +225,9 @@ static ssize_t microcode_write(struct file *file, const char __user *buf,
if (do_microcode_update(buf, len) == 0) if (do_microcode_update(buf, len) == 0)
ret = (ssize_t)len; ret = (ssize_t)len;
if (ret > 0)
perf_check_microcode();
mutex_unlock(&microcode_mutex); mutex_unlock(&microcode_mutex);
put_online_cpus(); put_online_cpus();

View File

@ -318,7 +318,7 @@ static void pic_ioport_write(void *opaque, u32 addr, u32 val)
if (val & 0x10) { if (val & 0x10) {
u8 edge_irr = s->irr & ~s->elcr; u8 edge_irr = s->irr & ~s->elcr;
int i; int i;
bool found; bool found = false;
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
s->init4 = val & 1; s->init4 = val & 1;

View File

@ -3619,6 +3619,7 @@ static void seg_setup(int seg)
static int alloc_apic_access_page(struct kvm *kvm) static int alloc_apic_access_page(struct kvm *kvm)
{ {
struct page *page;
struct kvm_userspace_memory_region kvm_userspace_mem; struct kvm_userspace_memory_region kvm_userspace_mem;
int r = 0; int r = 0;
@ -3633,7 +3634,13 @@ static int alloc_apic_access_page(struct kvm *kvm)
if (r) if (r)
goto out; goto out;
kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00); page = gfn_to_page(kvm, 0xfee00);
if (is_error_page(page)) {
r = -EFAULT;
goto out;
}
kvm->arch.apic_access_page = page;
out: out:
mutex_unlock(&kvm->slots_lock); mutex_unlock(&kvm->slots_lock);
return r; return r;
@ -3641,6 +3648,7 @@ out:
static int alloc_identity_pagetable(struct kvm *kvm) static int alloc_identity_pagetable(struct kvm *kvm)
{ {
struct page *page;
struct kvm_userspace_memory_region kvm_userspace_mem; struct kvm_userspace_memory_region kvm_userspace_mem;
int r = 0; int r = 0;
@ -3656,8 +3664,13 @@ static int alloc_identity_pagetable(struct kvm *kvm)
if (r) if (r)
goto out; goto out;
kvm->arch.ept_identity_pagetable = gfn_to_page(kvm, page = gfn_to_page(kvm, kvm->arch.ept_identity_map_addr >> PAGE_SHIFT);
kvm->arch.ept_identity_map_addr >> PAGE_SHIFT); if (is_error_page(page)) {
r = -EFAULT;
goto out;
}
kvm->arch.ept_identity_pagetable = page;
out: out:
mutex_unlock(&kvm->slots_lock); mutex_unlock(&kvm->slots_lock);
return r; return r;
@ -6575,7 +6588,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
/* Exposing INVPCID only when PCID is exposed */ /* Exposing INVPCID only when PCID is exposed */
best = kvm_find_cpuid_entry(vcpu, 0x7, 0); best = kvm_find_cpuid_entry(vcpu, 0x7, 0);
if (vmx_invpcid_supported() && if (vmx_invpcid_supported() &&
best && (best->ecx & bit(X86_FEATURE_INVPCID)) && best && (best->ebx & bit(X86_FEATURE_INVPCID)) &&
guest_cpuid_has_pcid(vcpu)) { guest_cpuid_has_pcid(vcpu)) {
exec_control |= SECONDARY_EXEC_ENABLE_INVPCID; exec_control |= SECONDARY_EXEC_ENABLE_INVPCID;
vmcs_write32(SECONDARY_VM_EXEC_CONTROL, vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
@ -6585,7 +6598,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
vmcs_write32(SECONDARY_VM_EXEC_CONTROL, vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
exec_control); exec_control);
if (best) if (best)
best->ecx &= ~bit(X86_FEATURE_INVPCID); best->ebx &= ~bit(X86_FEATURE_INVPCID);
} }
} }

View File

@ -5113,17 +5113,20 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
!kvm_event_needs_reinjection(vcpu); !kvm_event_needs_reinjection(vcpu);
} }
static void vapic_enter(struct kvm_vcpu *vcpu) static int vapic_enter(struct kvm_vcpu *vcpu)
{ {
struct kvm_lapic *apic = vcpu->arch.apic; struct kvm_lapic *apic = vcpu->arch.apic;
struct page *page; struct page *page;
if (!apic || !apic->vapic_addr) if (!apic || !apic->vapic_addr)
return; return 0;
page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
if (is_error_page(page))
return -EFAULT;
vcpu->arch.apic->vapic_page = page; vcpu->arch.apic->vapic_page = page;
return 0;
} }
static void vapic_exit(struct kvm_vcpu *vcpu) static void vapic_exit(struct kvm_vcpu *vcpu)
@ -5430,7 +5433,11 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
} }
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
vapic_enter(vcpu); r = vapic_enter(vcpu);
if (r) {
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
return r;
}
r = 1; r = 1;
while (r > 0) { while (r > 0) {

View File

@ -319,7 +319,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
*/ */
int devmem_is_allowed(unsigned long pagenr) int devmem_is_allowed(unsigned long pagenr)
{ {
if (pagenr <= 256) if (pagenr < 256)
return 1; return 1;
if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
return 0; return 0;

View File

@ -1452,6 +1452,10 @@ asmlinkage void __init xen_start_kernel(void)
pci_request_acs(); pci_request_acs();
xen_acpi_sleep_register(); xen_acpi_sleep_register();
/* Avoid searching for BIOS MP tables */
x86_init.mpparse.find_smp_config = x86_init_noop;
x86_init.mpparse.get_smp_config = x86_init_uint_noop;
} }
#ifdef CONFIG_PCI #ifdef CONFIG_PCI
/* PCI BIOS service won't work from a PV guest. */ /* PCI BIOS service won't work from a PV guest. */

View File

@ -828,9 +828,6 @@ int m2p_add_override(unsigned long mfn, struct page *page,
xen_mc_issue(PARAVIRT_LAZY_MMU); xen_mc_issue(PARAVIRT_LAZY_MMU);
} }
/* let's use dev_bus_addr to record the old mfn instead */
kmap_op->dev_bus_addr = page->index;
page->index = (unsigned long) kmap_op;
} }
spin_lock_irqsave(&m2p_override_lock, flags); spin_lock_irqsave(&m2p_override_lock, flags);
list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]); list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]);
@ -857,7 +854,8 @@ int m2p_add_override(unsigned long mfn, struct page *page,
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(m2p_add_override); EXPORT_SYMBOL_GPL(m2p_add_override);
int m2p_remove_override(struct page *page, bool clear_pte) int m2p_remove_override(struct page *page,
struct gnttab_map_grant_ref *kmap_op)
{ {
unsigned long flags; unsigned long flags;
unsigned long mfn; unsigned long mfn;
@ -887,10 +885,8 @@ int m2p_remove_override(struct page *page, bool clear_pte)
WARN_ON(!PagePrivate(page)); WARN_ON(!PagePrivate(page));
ClearPagePrivate(page); ClearPagePrivate(page);
if (clear_pte) { set_phys_to_machine(pfn, page->index);
struct gnttab_map_grant_ref *map_op = if (kmap_op != NULL) {
(struct gnttab_map_grant_ref *) page->index;
set_phys_to_machine(pfn, map_op->dev_bus_addr);
if (!PageHighMem(page)) { if (!PageHighMem(page)) {
struct multicall_space mcs; struct multicall_space mcs;
struct gnttab_unmap_grant_ref *unmap_op; struct gnttab_unmap_grant_ref *unmap_op;
@ -902,13 +898,13 @@ int m2p_remove_override(struct page *page, bool clear_pte)
* issued. In this case handle is going to -1 because * issued. In this case handle is going to -1 because
* it hasn't been modified yet. * it hasn't been modified yet.
*/ */
if (map_op->handle == -1) if (kmap_op->handle == -1)
xen_mc_flush(); xen_mc_flush();
/* /*
* Now if map_op->handle is negative it means that the * Now if kmap_op->handle is negative it means that the
* hypercall actually returned an error. * hypercall actually returned an error.
*/ */
if (map_op->handle == GNTST_general_error) { if (kmap_op->handle == GNTST_general_error) {
printk(KERN_WARNING "m2p_remove_override: " printk(KERN_WARNING "m2p_remove_override: "
"pfn %lx mfn %lx, failed to modify kernel mappings", "pfn %lx mfn %lx, failed to modify kernel mappings",
pfn, mfn); pfn, mfn);
@ -918,8 +914,8 @@ int m2p_remove_override(struct page *page, bool clear_pte)
mcs = xen_mc_entry( mcs = xen_mc_entry(
sizeof(struct gnttab_unmap_grant_ref)); sizeof(struct gnttab_unmap_grant_ref));
unmap_op = mcs.args; unmap_op = mcs.args;
unmap_op->host_addr = map_op->host_addr; unmap_op->host_addr = kmap_op->host_addr;
unmap_op->handle = map_op->handle; unmap_op->handle = kmap_op->handle;
unmap_op->dev_bus_addr = 0; unmap_op->dev_bus_addr = 0;
MULTI_grant_table_op(mcs.mc, MULTI_grant_table_op(mcs.mc,
@ -930,10 +926,9 @@ int m2p_remove_override(struct page *page, bool clear_pte)
set_pte_at(&init_mm, address, ptep, set_pte_at(&init_mm, address, ptep,
pfn_pte(pfn, PAGE_KERNEL)); pfn_pte(pfn, PAGE_KERNEL));
__flush_tlb_single(address); __flush_tlb_single(address);
map_op->host_addr = 0; kmap_op->host_addr = 0;
} }
} else }
set_phys_to_machine(pfn, page->index);
/* p2m(m2p(mfn)) == FOREIGN_FRAME(mfn): the mfn is already present /* p2m(m2p(mfn)) == FOREIGN_FRAME(mfn): the mfn is already present
* somewhere in this domain, even before being added to the * somewhere in this domain, even before being added to the

View File

@ -2254,9 +2254,11 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
error_type = "I/O"; error_type = "I/O";
break; break;
} }
printk(KERN_ERR "end_request: %s error, dev %s, sector %llu\n", printk_ratelimited(KERN_ERR "end_request: %s error, dev %s, sector %llu\n",
error_type, req->rq_disk ? req->rq_disk->disk_name : "?", error_type, req->rq_disk ?
(unsigned long long)blk_rq_pos(req)); req->rq_disk->disk_name : "?",
(unsigned long long)blk_rq_pos(req));
} }
blk_account_io_completion(req, nr_bytes); blk_account_io_completion(req, nr_bytes);

View File

@ -41,7 +41,7 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
sizeof(long long) > sizeof(long)) { sizeof(long long) > sizeof(long)) {
long pstart = start, plength = length; long pstart = start, plength = length;
if (pstart != start || plength != length if (pstart != start || plength != length
|| pstart < 0 || plength < 0) || pstart < 0 || plength < 0 || partno > 65535)
return -EINVAL; return -EINVAL;
} }

View File

@ -336,7 +336,7 @@ static int crypto_authenc_genicv(struct aead_request *req, u8 *iv,
cryptlen += ivsize; cryptlen += ivsize;
} }
if (sg_is_last(assoc)) { if (req->assoclen && sg_is_last(assoc)) {
authenc_ahash_fn = crypto_authenc_ahash; authenc_ahash_fn = crypto_authenc_ahash;
sg_init_table(asg, 2); sg_init_table(asg, 2);
sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset); sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset);
@ -490,7 +490,7 @@ static int crypto_authenc_iverify(struct aead_request *req, u8 *iv,
cryptlen += ivsize; cryptlen += ivsize;
} }
if (sg_is_last(assoc)) { if (req->assoclen && sg_is_last(assoc)) {
authenc_ahash_fn = crypto_authenc_ahash; authenc_ahash_fn = crypto_authenc_ahash;
sg_init_table(asg, 2); sg_init_table(asg, 2);
sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset); sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset);

View File

@ -237,6 +237,16 @@ static int __acpi_bus_get_power(struct acpi_device *device, int *state)
} else if (result == ACPI_STATE_D3_HOT) { } else if (result == ACPI_STATE_D3_HOT) {
result = ACPI_STATE_D3; result = ACPI_STATE_D3;
} }
/*
* If we were unsure about the device parent's power state up to this
* point, the fact that the device is in D0 implies that the parent has
* to be in D0 too.
*/
if (device->parent && device->parent->power.state == ACPI_STATE_UNKNOWN
&& result == ACPI_STATE_D0)
device->parent->power.state = ACPI_STATE_D0;
*state = result; *state = result;
out: out:

View File

@ -107,6 +107,7 @@ struct acpi_power_resource {
/* List of devices relying on this power resource */ /* List of devices relying on this power resource */
struct acpi_power_resource_device *devices; struct acpi_power_resource_device *devices;
struct mutex devices_lock;
}; };
static struct list_head acpi_power_resource_list; static struct list_head acpi_power_resource_list;
@ -225,7 +226,6 @@ static void acpi_power_on_device(struct acpi_power_managed_device *device)
static int __acpi_power_on(struct acpi_power_resource *resource) static int __acpi_power_on(struct acpi_power_resource *resource)
{ {
struct acpi_power_resource_device *device_list = resource->devices;
acpi_status status = AE_OK; acpi_status status = AE_OK;
status = acpi_evaluate_object(resource->device->handle, "_ON", NULL, NULL); status = acpi_evaluate_object(resource->device->handle, "_ON", NULL, NULL);
@ -238,19 +238,15 @@ static int __acpi_power_on(struct acpi_power_resource *resource)
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Power resource [%s] turned on\n", ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Power resource [%s] turned on\n",
resource->name)); resource->name));
while (device_list) {
acpi_power_on_device(device_list->device);
device_list = device_list->next;
}
return 0; return 0;
} }
static int acpi_power_on(acpi_handle handle) static int acpi_power_on(acpi_handle handle)
{ {
int result = 0; int result = 0;
bool resume_device = false;
struct acpi_power_resource *resource = NULL; struct acpi_power_resource *resource = NULL;
struct acpi_power_resource_device *device_list;
result = acpi_power_get_context(handle, &resource); result = acpi_power_get_context(handle, &resource);
if (result) if (result)
@ -266,10 +262,25 @@ static int acpi_power_on(acpi_handle handle)
result = __acpi_power_on(resource); result = __acpi_power_on(resource);
if (result) if (result)
resource->ref_count--; resource->ref_count--;
else
resume_device = true;
} }
mutex_unlock(&resource->resource_lock); mutex_unlock(&resource->resource_lock);
if (!resume_device)
return result;
mutex_lock(&resource->devices_lock);
device_list = resource->devices;
while (device_list) {
acpi_power_on_device(device_list->device);
device_list = device_list->next;
}
mutex_unlock(&resource->devices_lock);
return result; return result;
} }
@ -355,7 +366,7 @@ static void __acpi_power_resource_unregister_device(struct device *dev,
if (acpi_power_get_context(res_handle, &resource)) if (acpi_power_get_context(res_handle, &resource))
return; return;
mutex_lock(&resource->resource_lock); mutex_lock(&resource->devices_lock);
prev = NULL; prev = NULL;
curr = resource->devices; curr = resource->devices;
while (curr) { while (curr) {
@ -372,7 +383,7 @@ static void __acpi_power_resource_unregister_device(struct device *dev,
prev = curr; prev = curr;
curr = curr->next; curr = curr->next;
} }
mutex_unlock(&resource->resource_lock); mutex_unlock(&resource->devices_lock);
} }
/* Unlink dev from all power resources in _PR0 */ /* Unlink dev from all power resources in _PR0 */
@ -414,10 +425,10 @@ static int __acpi_power_resource_register_device(
power_resource_device->device = powered_device; power_resource_device->device = powered_device;
mutex_lock(&resource->resource_lock); mutex_lock(&resource->devices_lock);
power_resource_device->next = resource->devices; power_resource_device->next = resource->devices;
resource->devices = power_resource_device; resource->devices = power_resource_device;
mutex_unlock(&resource->resource_lock); mutex_unlock(&resource->devices_lock);
return 0; return 0;
} }
@ -462,7 +473,7 @@ int acpi_power_resource_register_device(struct device *dev, acpi_handle handle)
return ret; return ret;
no_power_resource: no_power_resource:
printk(KERN_WARNING PREFIX "Invalid Power Resource to register!"); printk(KERN_DEBUG PREFIX "Invalid Power Resource to register!");
return -ENODEV; return -ENODEV;
} }
EXPORT_SYMBOL_GPL(acpi_power_resource_register_device); EXPORT_SYMBOL_GPL(acpi_power_resource_register_device);
@ -721,6 +732,7 @@ static int acpi_power_add(struct acpi_device *device)
resource->device = device; resource->device = device;
mutex_init(&resource->resource_lock); mutex_init(&resource->resource_lock);
mutex_init(&resource->devices_lock);
strcpy(resource->name, device->pnp.bus_id); strcpy(resource->name, device->pnp.bus_id);
strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME); strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_POWER_CLASS); strcpy(acpi_device_class(device), ACPI_POWER_CLASS);

View File

@ -268,6 +268,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
/* JMicron 360/1/3/5/6, match class to avoid IDE function */ /* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr }, PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr },
/* JMicron 362B and 362C have an AHCI function with IDE class code */
{ PCI_VDEVICE(JMICRON, 0x2362), board_ahci_ign_iferr },
{ PCI_VDEVICE(JMICRON, 0x236f), board_ahci_ign_iferr },
/* ATI */ /* ATI */
{ PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */ { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */
@ -393,6 +396,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
.driver_data = board_ahci_yes_fbs }, /* 88se9125 */ .driver_data = board_ahci_yes_fbs }, /* 88se9125 */
{ PCI_DEVICE(0x1b4b, 0x917a), { PCI_DEVICE(0x1b4b, 0x917a),
.driver_data = board_ahci_yes_fbs }, /* 88se9172 */ .driver_data = board_ahci_yes_fbs }, /* 88se9172 */
{ PCI_DEVICE(0x1b4b, 0x9192),
.driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */
{ PCI_DEVICE(0x1b4b, 0x91a3), { PCI_DEVICE(0x1b4b, 0x91a3),
.driver_data = board_ahci_yes_fbs }, .driver_data = board_ahci_yes_fbs },
@ -400,7 +405,10 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */ { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
/* Asmedia */ /* Asmedia */
{ PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1061 */ { PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci }, /* ASM1060 */
{ PCI_VDEVICE(ASMEDIA, 0x0602), board_ahci }, /* ASM1060 */
{ PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */
{ PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */
/* Generic, PCI class code for AHCI */ /* Generic, PCI class code for AHCI */
{ PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,

View File

@ -250,7 +250,7 @@ int __init dma_declare_contiguous(struct device *dev, unsigned long size,
return -EINVAL; return -EINVAL;
/* Sanitise input arguments */ /* Sanitise input arguments */
alignment = PAGE_SIZE << max(MAX_ORDER, pageblock_order); alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
base = ALIGN(base, alignment); base = ALIGN(base, alignment);
size = ALIGN(size, alignment); size = ALIGN(size, alignment);
limit &= ~(alignment - 1); limit &= ~(alignment - 1);

View File

@ -35,6 +35,7 @@ new_skb(ulong len)
skb_reset_mac_header(skb); skb_reset_mac_header(skb);
skb_reset_network_header(skb); skb_reset_network_header(skb);
skb->protocol = __constant_htons(ETH_P_AOE); skb->protocol = __constant_htons(ETH_P_AOE);
skb_checksum_none_assert(skb);
} }
return skb; return skb;
} }

View File

@ -795,6 +795,7 @@ static void complete_scsi_command(CommandList_struct *c, int timeout,
} }
break; break;
case CMD_PROTOCOL_ERR: case CMD_PROTOCOL_ERR:
cmd->result = DID_ERROR << 16;
dev_warn(&h->pdev->dev, dev_warn(&h->pdev->dev,
"%p has protocol error\n", c); "%p has protocol error\n", c);
break; break;

View File

@ -1148,11 +1148,15 @@ static bool mtip_pause_ncq(struct mtip_port *port,
reply = port->rxfis + RX_FIS_D2H_REG; reply = port->rxfis + RX_FIS_D2H_REG;
task_file_data = readl(port->mmio+PORT_TFDATA); task_file_data = readl(port->mmio+PORT_TFDATA);
if ((task_file_data & 1) || (fis->command == ATA_CMD_SEC_ERASE_UNIT)) if (fis->command == ATA_CMD_SEC_ERASE_UNIT)
clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
if ((task_file_data & 1))
return false; return false;
if (fis->command == ATA_CMD_SEC_ERASE_PREP) { if (fis->command == ATA_CMD_SEC_ERASE_PREP) {
set_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags); set_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
set_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
port->ic_pause_timer = jiffies; port->ic_pause_timer = jiffies;
return true; return true;
} else if ((fis->command == ATA_CMD_DOWNLOAD_MICRO) && } else if ((fis->command == ATA_CMD_DOWNLOAD_MICRO) &&
@ -1900,7 +1904,7 @@ static int exec_drive_command(struct mtip_port *port, u8 *command,
int rv = 0, xfer_sz = command[3]; int rv = 0, xfer_sz = command[3];
if (xfer_sz) { if (xfer_sz) {
if (user_buffer) if (!user_buffer)
return -EFAULT; return -EFAULT;
buf = dmam_alloc_coherent(&port->dd->pdev->dev, buf = dmam_alloc_coherent(&port->dd->pdev->dev,
@ -2043,7 +2047,7 @@ static void mtip_set_timeout(struct host_to_dev_fis *fis, unsigned int *timeout)
*timeout = 240000; /* 4 minutes */ *timeout = 240000; /* 4 minutes */
break; break;
case ATA_CMD_STANDBYNOW1: case ATA_CMD_STANDBYNOW1:
*timeout = 10000; /* 10 seconds */ *timeout = 120000; /* 2 minutes */
break; break;
case 0xF7: case 0xF7:
case 0xFA: case 0xFA:
@ -2588,9 +2592,6 @@ static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf,
if (!len || size) if (!len || size)
return 0; return 0;
if (size < 0)
return -EINVAL;
size += sprintf(&buf[size], "H/ S ACTive : [ 0x"); size += sprintf(&buf[size], "H/ S ACTive : [ 0x");
for (n = dd->slot_groups-1; n >= 0; n--) for (n = dd->slot_groups-1; n >= 0; n--)
@ -2660,9 +2661,6 @@ static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf,
if (!len || size) if (!len || size)
return 0; return 0;
if (size < 0)
return -EINVAL;
size += sprintf(&buf[size], "Flag-port : [ %08lX ]\n", size += sprintf(&buf[size], "Flag-port : [ %08lX ]\n",
dd->port->flags); dd->port->flags);
size += sprintf(&buf[size], "Flag-dd : [ %08lX ]\n", size += sprintf(&buf[size], "Flag-dd : [ %08lX ]\n",
@ -3214,8 +3212,8 @@ static int mtip_hw_init(struct driver_data *dd)
"Unable to check write protect progress\n"); "Unable to check write protect progress\n");
else else
dev_info(&dd->pdev->dev, dev_info(&dd->pdev->dev,
"Write protect progress: %d%% (%d blocks)\n", "Write protect progress: %u%% (%u blocks)\n",
attr242.cur, attr242.data); attr242.cur, le32_to_cpu(attr242.data));
return rv; return rv;
out3: out3:
@ -3619,6 +3617,10 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
bio_endio(bio, -ENODATA); bio_endio(bio, -ENODATA);
return; return;
} }
if (unlikely(test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag))) {
bio_endio(bio, -ENODATA);
return;
}
} }
if (unlikely(!bio_has_data(bio))) { if (unlikely(!bio_has_data(bio))) {
@ -4168,7 +4170,13 @@ static void mtip_pci_shutdown(struct pci_dev *pdev)
/* Table of device ids supported by this driver. */ /* Table of device ids supported by this driver. */
static DEFINE_PCI_DEVICE_TABLE(mtip_pci_tbl) = { static DEFINE_PCI_DEVICE_TABLE(mtip_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320_DEVICE_ID) }, { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320H_DEVICE_ID) },
{ PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320M_DEVICE_ID) },
{ PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320S_DEVICE_ID) },
{ PCI_DEVICE(PCI_VENDOR_ID_MICRON, P325M_DEVICE_ID) },
{ PCI_DEVICE(PCI_VENDOR_ID_MICRON, P420H_DEVICE_ID) },
{ PCI_DEVICE(PCI_VENDOR_ID_MICRON, P420M_DEVICE_ID) },
{ PCI_DEVICE(PCI_VENDOR_ID_MICRON, P425M_DEVICE_ID) },
{ 0 } { 0 }
}; };
@ -4199,12 +4207,12 @@ static int __init mtip_init(void)
{ {
int error; int error;
printk(KERN_INFO MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n"); pr_info(MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n");
/* Allocate a major block device number to use with this driver. */ /* Allocate a major block device number to use with this driver. */
error = register_blkdev(0, MTIP_DRV_NAME); error = register_blkdev(0, MTIP_DRV_NAME);
if (error <= 0) { if (error <= 0) {
printk(KERN_ERR "Unable to register block device (%d)\n", pr_err("Unable to register block device (%d)\n",
error); error);
return -EBUSY; return -EBUSY;
} }
@ -4213,7 +4221,7 @@ static int __init mtip_init(void)
if (!dfs_parent) { if (!dfs_parent) {
dfs_parent = debugfs_create_dir("rssd", NULL); dfs_parent = debugfs_create_dir("rssd", NULL);
if (IS_ERR_OR_NULL(dfs_parent)) { if (IS_ERR_OR_NULL(dfs_parent)) {
printk(KERN_WARNING "Error creating debugfs parent\n"); pr_warn("Error creating debugfs parent\n");
dfs_parent = NULL; dfs_parent = NULL;
} }
} }

View File

@ -76,7 +76,13 @@
/* Micron Vendor ID & P320x SSD Device ID */ /* Micron Vendor ID & P320x SSD Device ID */
#define PCI_VENDOR_ID_MICRON 0x1344 #define PCI_VENDOR_ID_MICRON 0x1344
#define P320_DEVICE_ID 0x5150 #define P320H_DEVICE_ID 0x5150
#define P320M_DEVICE_ID 0x5151
#define P320S_DEVICE_ID 0x5152
#define P325M_DEVICE_ID 0x5153
#define P420H_DEVICE_ID 0x5160
#define P420M_DEVICE_ID 0x5161
#define P425M_DEVICE_ID 0x5163
/* Driver name and version strings */ /* Driver name and version strings */
#define MTIP_DRV_NAME "mtip32xx" #define MTIP_DRV_NAME "mtip32xx"
@ -131,10 +137,12 @@ enum {
MTIP_PF_SVC_THD_STOP_BIT = 8, MTIP_PF_SVC_THD_STOP_BIT = 8,
/* below are bit numbers in 'dd_flag' defined in driver_data */ /* below are bit numbers in 'dd_flag' defined in driver_data */
MTIP_DDF_SEC_LOCK_BIT = 0,
MTIP_DDF_REMOVE_PENDING_BIT = 1, MTIP_DDF_REMOVE_PENDING_BIT = 1,
MTIP_DDF_OVER_TEMP_BIT = 2, MTIP_DDF_OVER_TEMP_BIT = 2,
MTIP_DDF_WRITE_PROTECT_BIT = 3, MTIP_DDF_WRITE_PROTECT_BIT = 3,
MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | \ MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | \
(1 << MTIP_DDF_SEC_LOCK_BIT) | \
(1 << MTIP_DDF_OVER_TEMP_BIT) | \ (1 << MTIP_DDF_OVER_TEMP_BIT) | \
(1 << MTIP_DDF_WRITE_PROTECT_BIT)), (1 << MTIP_DDF_WRITE_PROTECT_BIT)),

View File

@ -449,6 +449,14 @@ static void nbd_clear_que(struct nbd_device *nbd)
req->errors++; req->errors++;
nbd_end_request(req); nbd_end_request(req);
} }
while (!list_empty(&nbd->waiting_queue)) {
req = list_entry(nbd->waiting_queue.next, struct request,
queuelist);
list_del_init(&req->queuelist);
req->errors++;
nbd_end_request(req);
}
} }
@ -598,6 +606,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
nbd->file = NULL; nbd->file = NULL;
nbd_clear_que(nbd); nbd_clear_que(nbd);
BUG_ON(!list_empty(&nbd->queue_head)); BUG_ON(!list_empty(&nbd->queue_head));
BUG_ON(!list_empty(&nbd->waiting_queue));
if (file) if (file)
fput(file); fput(file);
return 0; return 0;

View File

@ -337,7 +337,7 @@ static void xen_blkbk_unmap(struct pending_req *req)
invcount++; invcount++;
} }
ret = gnttab_unmap_refs(unmap, pages, invcount, false); ret = gnttab_unmap_refs(unmap, NULL, pages, invcount);
BUG_ON(ret); BUG_ON(ret);
} }

View File

@ -86,6 +86,7 @@ static struct usb_device_id ath3k_table[] = {
/* Atheros AR5BBU22 with sflash firmware */ /* Atheros AR5BBU22 with sflash firmware */
{ USB_DEVICE(0x0489, 0xE03C) }, { USB_DEVICE(0x0489, 0xE03C) },
{ USB_DEVICE(0x0489, 0xE036) },
{ } /* Terminating entry */ { } /* Terminating entry */
}; };
@ -109,6 +110,7 @@ static struct usb_device_id ath3k_blist_tbl[] = {
/* Atheros AR5BBU22 with sflash firmware */ /* Atheros AR5BBU22 with sflash firmware */
{ USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xE036), .driver_info = BTUSB_ATH3012 },
{ } /* Terminating entry */ { } /* Terminating entry */
}; };

View File

@ -52,6 +52,9 @@ static struct usb_device_id btusb_table[] = {
/* Generic Bluetooth USB device */ /* Generic Bluetooth USB device */
{ USB_DEVICE_INFO(0xe0, 0x01, 0x01) }, { USB_DEVICE_INFO(0xe0, 0x01, 0x01) },
/* Apple-specific (Broadcom) devices */
{ USB_VENDOR_AND_INTERFACE_INFO(0x05ac, 0xff, 0x01, 0x01) },
/* Broadcom SoftSailing reporting vendor specific */ /* Broadcom SoftSailing reporting vendor specific */
{ USB_DEVICE(0x0a5c, 0x21e1) }, { USB_DEVICE(0x0a5c, 0x21e1) },
@ -94,16 +97,14 @@ static struct usb_device_id btusb_table[] = {
/* Broadcom BCM20702A0 */ /* Broadcom BCM20702A0 */
{ USB_DEVICE(0x0489, 0xe042) }, { USB_DEVICE(0x0489, 0xe042) },
{ USB_DEVICE(0x0a5c, 0x21e3) },
{ USB_DEVICE(0x0a5c, 0x21e6) },
{ USB_DEVICE(0x0a5c, 0x21e8) },
{ USB_DEVICE(0x0a5c, 0x21f3) },
{ USB_DEVICE(0x0a5c, 0x21f4) },
{ USB_DEVICE(0x413c, 0x8197) }, { USB_DEVICE(0x413c, 0x8197) },
/* Foxconn - Hon Hai */ /* Foxconn - Hon Hai */
{ USB_DEVICE(0x0489, 0xe033) }, { USB_DEVICE(0x0489, 0xe033) },
/*Broadcom devices with vendor specific id */
{ USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) },
{ } /* Terminating entry */ { } /* Terminating entry */
}; };
@ -141,6 +142,7 @@ static struct usb_device_id blacklist_table[] = {
/* Atheros AR5BBU12 with sflash firmware */ /* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe036), .driver_info = BTUSB_ATH3012 },
/* Broadcom BCM2035 */ /* Broadcom BCM2035 */
{ USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU }, { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU },

View File

@ -35,7 +35,6 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/sched.h> /* for current / set_cpus_allowed() */
#include <linux/io.h> #include <linux/io.h>
#include <linux/delay.h> #include <linux/delay.h>
@ -1139,16 +1138,23 @@ static int transition_frequency_pstate(struct powernow_k8_data *data,
return res; return res;
} }
/* Driver entry point to switch to the target frequency */ struct powernowk8_target_arg {
static int powernowk8_target(struct cpufreq_policy *pol, struct cpufreq_policy *pol;
unsigned targfreq, unsigned relation) unsigned targfreq;
unsigned relation;
};
static long powernowk8_target_fn(void *arg)
{ {
cpumask_var_t oldmask; struct powernowk8_target_arg *pta = arg;
struct cpufreq_policy *pol = pta->pol;
unsigned targfreq = pta->targfreq;
unsigned relation = pta->relation;
struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
u32 checkfid; u32 checkfid;
u32 checkvid; u32 checkvid;
unsigned int newstate; unsigned int newstate;
int ret = -EIO; int ret;
if (!data) if (!data)
return -EINVAL; return -EINVAL;
@ -1156,29 +1162,16 @@ static int powernowk8_target(struct cpufreq_policy *pol,
checkfid = data->currfid; checkfid = data->currfid;
checkvid = data->currvid; checkvid = data->currvid;
/* only run on specific CPU from here on. */
/* This is poor form: use a workqueue or smp_call_function_single */
if (!alloc_cpumask_var(&oldmask, GFP_KERNEL))
return -ENOMEM;
cpumask_copy(oldmask, tsk_cpus_allowed(current));
set_cpus_allowed_ptr(current, cpumask_of(pol->cpu));
if (smp_processor_id() != pol->cpu) {
printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu);
goto err_out;
}
if (pending_bit_stuck()) { if (pending_bit_stuck()) {
printk(KERN_ERR PFX "failing targ, change pending bit set\n"); printk(KERN_ERR PFX "failing targ, change pending bit set\n");
goto err_out; return -EIO;
} }
pr_debug("targ: cpu %d, %d kHz, min %d, max %d, relation %d\n", pr_debug("targ: cpu %d, %d kHz, min %d, max %d, relation %d\n",
pol->cpu, targfreq, pol->min, pol->max, relation); pol->cpu, targfreq, pol->min, pol->max, relation);
if (query_current_values_with_pending_wait(data)) if (query_current_values_with_pending_wait(data))
goto err_out; return -EIO;
if (cpu_family != CPU_HW_PSTATE) { if (cpu_family != CPU_HW_PSTATE) {
pr_debug("targ: curr fid 0x%x, vid 0x%x\n", pr_debug("targ: curr fid 0x%x, vid 0x%x\n",
@ -1196,7 +1189,7 @@ static int powernowk8_target(struct cpufreq_policy *pol,
if (cpufreq_frequency_table_target(pol, data->powernow_table, if (cpufreq_frequency_table_target(pol, data->powernow_table,
targfreq, relation, &newstate)) targfreq, relation, &newstate))
goto err_out; return -EIO;
mutex_lock(&fidvid_mutex); mutex_lock(&fidvid_mutex);
@ -1209,9 +1202,8 @@ static int powernowk8_target(struct cpufreq_policy *pol,
ret = transition_frequency_fidvid(data, newstate); ret = transition_frequency_fidvid(data, newstate);
if (ret) { if (ret) {
printk(KERN_ERR PFX "transition frequency failed\n"); printk(KERN_ERR PFX "transition frequency failed\n");
ret = 1;
mutex_unlock(&fidvid_mutex); mutex_unlock(&fidvid_mutex);
goto err_out; return 1;
} }
mutex_unlock(&fidvid_mutex); mutex_unlock(&fidvid_mutex);
@ -1220,12 +1212,25 @@ static int powernowk8_target(struct cpufreq_policy *pol,
data->powernow_table[newstate].index); data->powernow_table[newstate].index);
else else
pol->cur = find_khz_freq_from_fid(data->currfid); pol->cur = find_khz_freq_from_fid(data->currfid);
ret = 0;
err_out: return 0;
set_cpus_allowed_ptr(current, oldmask); }
free_cpumask_var(oldmask);
return ret; /* Driver entry point to switch to the target frequency */
static int powernowk8_target(struct cpufreq_policy *pol,
unsigned targfreq, unsigned relation)
{
struct powernowk8_target_arg pta = { .pol = pol, .targfreq = targfreq,
.relation = relation };
/*
* Must run on @pol->cpu. cpufreq core is responsible for ensuring
* that we're bound to the current CPU and pol->cpu stays online.
*/
if (smp_processor_id() == pol->cpu)
return powernowk8_target_fn(&pta);
else
return work_on_cpu(pol->cpu, powernowk8_target_fn, &pta);
} }
/* Driver entry point to verify the policy and range of frequencies */ /* Driver entry point to verify the policy and range of frequencies */

View File

@ -120,3 +120,4 @@ u32 gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
return ret; return ret;
} }
EXPORT_SYMBOL(gen_split_key);

View File

@ -661,7 +661,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
flags); flags);
if (unlikely(!atslave || !sg_len)) { if (unlikely(!atslave || !sg_len)) {
dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); dev_dbg(chan2dev(chan), "prep_slave_sg: sg length is zero!\n");
return NULL; return NULL;
} }
@ -689,6 +689,11 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
mem = sg_dma_address(sg); mem = sg_dma_address(sg);
len = sg_dma_len(sg); len = sg_dma_len(sg);
if (unlikely(!len)) {
dev_dbg(chan2dev(chan),
"prep_slave_sg: sg(%d) data length is zero\n", i);
goto err;
}
mem_width = 2; mem_width = 2;
if (unlikely(mem & 3 || len & 3)) if (unlikely(mem & 3 || len & 3))
mem_width = 0; mem_width = 0;
@ -724,6 +729,11 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
mem = sg_dma_address(sg); mem = sg_dma_address(sg);
len = sg_dma_len(sg); len = sg_dma_len(sg);
if (unlikely(!len)) {
dev_dbg(chan2dev(chan),
"prep_slave_sg: sg(%d) data length is zero\n", i);
goto err;
}
mem_width = 2; mem_width = 2;
if (unlikely(mem & 3 || len & 3)) if (unlikely(mem & 3 || len & 3))
mem_width = 0; mem_width = 0;
@ -757,6 +767,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
err_desc_get: err_desc_get:
dev_err(chan2dev(chan), "not enough descriptors available\n"); dev_err(chan2dev(chan), "not enough descriptors available\n");
err:
atc_desc_put(atchan, first); atc_desc_put(atchan, first);
return NULL; return NULL;
} }

View File

@ -1567,17 +1567,19 @@ static int pl330_submit_req(void *ch_id, struct pl330_req *r)
goto xfer_exit; goto xfer_exit;
} }
/* Prefer Secure Channel */
if (!_manager_ns(thrd))
r->cfg->nonsecure = 0;
else
r->cfg->nonsecure = 1;
/* Use last settings, if not provided */ /* Use last settings, if not provided */
if (r->cfg) if (r->cfg) {
/* Prefer Secure Channel */
if (!_manager_ns(thrd))
r->cfg->nonsecure = 0;
else
r->cfg->nonsecure = 1;
ccr = _prepare_ccr(r->cfg); ccr = _prepare_ccr(r->cfg);
else } else {
ccr = readl(regs + CC(thrd->id)); ccr = readl(regs + CC(thrd->id));
}
/* If this req doesn't have valid xfer settings */ /* If this req doesn't have valid xfer settings */
if (!_is_valid(ccr)) { if (!_is_valid(ccr)) {
@ -2928,6 +2930,11 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
num_chan = max_t(int, pi->pcfg.num_peri, pi->pcfg.num_chan); num_chan = max_t(int, pi->pcfg.num_peri, pi->pcfg.num_chan);
pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL); pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
if (!pdmac->peripherals) {
ret = -ENOMEM;
dev_err(&adev->dev, "unable to allocate pdmac->peripherals\n");
goto probe_err5;
}
for (i = 0; i < num_chan; i++) { for (i = 0; i < num_chan; i++) {
pch = &pdmac->peripherals[i]; pch = &pdmac->peripherals[i];

View File

@ -669,13 +669,18 @@ static int __devinit max77693_muic_probe(struct platform_device *pdev)
} }
info->dev = &pdev->dev; info->dev = &pdev->dev;
info->max77693 = max77693; info->max77693 = max77693;
info->max77693->regmap_muic = regmap_init_i2c(info->max77693->muic, if (info->max77693->regmap_muic)
&max77693_muic_regmap_config); dev_dbg(&pdev->dev, "allocate register map\n");
if (IS_ERR(info->max77693->regmap_muic)) { else {
ret = PTR_ERR(info->max77693->regmap_muic); info->max77693->regmap_muic = devm_regmap_init_i2c(
dev_err(max77693->dev, info->max77693->muic,
"failed to allocate register map: %d\n", ret); &max77693_muic_regmap_config);
goto err_regmap; if (IS_ERR(info->max77693->regmap_muic)) {
ret = PTR_ERR(info->max77693->regmap_muic);
dev_err(max77693->dev,
"failed to allocate register map: %d\n", ret);
goto err_regmap;
}
} }
platform_set_drvdata(pdev, info); platform_set_drvdata(pdev, info);
mutex_init(&info->mutex); mutex_init(&info->mutex);

View File

@ -193,6 +193,9 @@ static const struct file_operations ast_fops = {
.mmap = ast_mmap, .mmap = ast_mmap,
.poll = drm_poll, .poll = drm_poll,
.fasync = drm_fasync, .fasync = drm_fasync,
#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
#endif
.read = drm_read, .read = drm_read,
}; };

View File

@ -841,7 +841,7 @@ int ast_cursor_init(struct drm_device *dev)
ast->cursor_cache = obj; ast->cursor_cache = obj;
ast->cursor_cache_gpu_addr = gpu_addr; ast->cursor_cache_gpu_addr = gpu_addr;
DRM_ERROR("pinned cursor cache at %llx\n", ast->cursor_cache_gpu_addr); DRM_DEBUG_KMS("pinned cursor cache at %llx\n", ast->cursor_cache_gpu_addr);
return 0; return 0;
fail: fail:
return ret; return ret;

View File

@ -74,6 +74,9 @@ static const struct file_operations cirrus_driver_fops = {
.unlocked_ioctl = drm_ioctl, .unlocked_ioctl = drm_ioctl,
.mmap = cirrus_mmap, .mmap = cirrus_mmap,
.poll = drm_poll, .poll = drm_poll,
#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
#endif
.fasync = drm_fasync, .fasync = drm_fasync,
}; };
static struct drm_driver driver = { static struct drm_driver driver = {

View File

@ -36,6 +36,6 @@ config DRM_EXYNOS_VIDI
config DRM_EXYNOS_G2D config DRM_EXYNOS_G2D
bool "Exynos DRM G2D" bool "Exynos DRM G2D"
depends on DRM_EXYNOS depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_G2D
help help
Choose this option if you want to use Exynos G2D for DRM. Choose this option if you want to use Exynos G2D for DRM.

View File

@ -163,6 +163,12 @@ static void exynos_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
/* TODO */ /* TODO */
} }
static int exynos_gem_dmabuf_mmap(struct dma_buf *dma_buf,
struct vm_area_struct *vma)
{
return -ENOTTY;
}
static struct dma_buf_ops exynos_dmabuf_ops = { static struct dma_buf_ops exynos_dmabuf_ops = {
.map_dma_buf = exynos_gem_map_dma_buf, .map_dma_buf = exynos_gem_map_dma_buf,
.unmap_dma_buf = exynos_gem_unmap_dma_buf, .unmap_dma_buf = exynos_gem_unmap_dma_buf,
@ -170,6 +176,7 @@ static struct dma_buf_ops exynos_dmabuf_ops = {
.kmap_atomic = exynos_gem_dmabuf_kmap_atomic, .kmap_atomic = exynos_gem_dmabuf_kmap_atomic,
.kunmap = exynos_gem_dmabuf_kunmap, .kunmap = exynos_gem_dmabuf_kunmap,
.kunmap_atomic = exynos_gem_dmabuf_kunmap_atomic, .kunmap_atomic = exynos_gem_dmabuf_kunmap_atomic,
.mmap = exynos_gem_dmabuf_mmap,
.release = exynos_dmabuf_release, .release = exynos_dmabuf_release,
}; };

View File

@ -160,7 +160,6 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
if (!file_priv) if (!file_priv)
return -ENOMEM; return -ENOMEM;
drm_prime_init_file_private(&file->prime);
file->driver_priv = file_priv; file->driver_priv = file_priv;
return exynos_drm_subdrv_open(dev, file); return exynos_drm_subdrv_open(dev, file);
@ -184,7 +183,6 @@ static void exynos_drm_preclose(struct drm_device *dev,
e->base.destroy(&e->base); e->base.destroy(&e->base);
} }
} }
drm_prime_destroy_file_private(&file->prime);
spin_unlock_irqrestore(&dev->event_lock, flags); spin_unlock_irqrestore(&dev->event_lock, flags);
exynos_drm_subdrv_close(dev, file); exynos_drm_subdrv_close(dev, file);
@ -241,6 +239,9 @@ static const struct file_operations exynos_drm_driver_fops = {
.poll = drm_poll, .poll = drm_poll,
.read = drm_read, .read = drm_read,
.unlocked_ioctl = drm_ioctl, .unlocked_ioctl = drm_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
#endif
.release = drm_release, .release = drm_release,
}; };

View File

@ -831,11 +831,6 @@ static int __devinit fimd_probe(struct platform_device *pdev)
} }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(dev, "failed to find registers\n");
ret = -ENOENT;
goto err_clk;
}
ctx->regs = devm_request_and_ioremap(&pdev->dev, res); ctx->regs = devm_request_and_ioremap(&pdev->dev, res);
if (!ctx->regs) { if (!ctx->regs) {

View File

@ -129,7 +129,6 @@ struct g2d_runqueue_node {
struct g2d_data { struct g2d_data {
struct device *dev; struct device *dev;
struct clk *gate_clk; struct clk *gate_clk;
struct resource *regs_res;
void __iomem *regs; void __iomem *regs;
int irq; int irq;
struct workqueue_struct *g2d_workq; struct workqueue_struct *g2d_workq;
@ -751,7 +750,7 @@ static int __devinit g2d_probe(struct platform_device *pdev)
struct exynos_drm_subdrv *subdrv; struct exynos_drm_subdrv *subdrv;
int ret; int ret;
g2d = kzalloc(sizeof(*g2d), GFP_KERNEL); g2d = devm_kzalloc(&pdev->dev, sizeof(*g2d), GFP_KERNEL);
if (!g2d) { if (!g2d) {
dev_err(dev, "failed to allocate driver data\n"); dev_err(dev, "failed to allocate driver data\n");
return -ENOMEM; return -ENOMEM;
@ -759,10 +758,8 @@ static int __devinit g2d_probe(struct platform_device *pdev)
g2d->runqueue_slab = kmem_cache_create("g2d_runqueue_slab", g2d->runqueue_slab = kmem_cache_create("g2d_runqueue_slab",
sizeof(struct g2d_runqueue_node), 0, 0, NULL); sizeof(struct g2d_runqueue_node), 0, 0, NULL);
if (!g2d->runqueue_slab) { if (!g2d->runqueue_slab)
ret = -ENOMEM; return -ENOMEM;
goto err_free_mem;
}
g2d->dev = dev; g2d->dev = dev;
@ -794,38 +791,26 @@ static int __devinit g2d_probe(struct platform_device *pdev)
pm_runtime_enable(dev); pm_runtime_enable(dev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(dev, "failed to get I/O memory\n");
ret = -ENOENT;
goto err_put_clk;
}
g2d->regs_res = request_mem_region(res->start, resource_size(res), g2d->regs = devm_request_and_ioremap(&pdev->dev, res);
dev_name(dev));
if (!g2d->regs_res) {
dev_err(dev, "failed to request I/O memory\n");
ret = -ENOENT;
goto err_put_clk;
}
g2d->regs = ioremap(res->start, resource_size(res));
if (!g2d->regs) { if (!g2d->regs) {
dev_err(dev, "failed to remap I/O memory\n"); dev_err(dev, "failed to remap I/O memory\n");
ret = -ENXIO; ret = -ENXIO;
goto err_release_res; goto err_put_clk;
} }
g2d->irq = platform_get_irq(pdev, 0); g2d->irq = platform_get_irq(pdev, 0);
if (g2d->irq < 0) { if (g2d->irq < 0) {
dev_err(dev, "failed to get irq\n"); dev_err(dev, "failed to get irq\n");
ret = g2d->irq; ret = g2d->irq;
goto err_unmap_base; goto err_put_clk;
} }
ret = request_irq(g2d->irq, g2d_irq_handler, 0, "drm_g2d", g2d); ret = devm_request_irq(&pdev->dev, g2d->irq, g2d_irq_handler, 0,
"drm_g2d", g2d);
if (ret < 0) { if (ret < 0) {
dev_err(dev, "irq request failed\n"); dev_err(dev, "irq request failed\n");
goto err_unmap_base; goto err_put_clk;
} }
platform_set_drvdata(pdev, g2d); platform_set_drvdata(pdev, g2d);
@ -838,7 +823,7 @@ static int __devinit g2d_probe(struct platform_device *pdev)
ret = exynos_drm_subdrv_register(subdrv); ret = exynos_drm_subdrv_register(subdrv);
if (ret < 0) { if (ret < 0) {
dev_err(dev, "failed to register drm g2d device\n"); dev_err(dev, "failed to register drm g2d device\n");
goto err_free_irq; goto err_put_clk;
} }
dev_info(dev, "The exynos g2d(ver %d.%d) successfully probed\n", dev_info(dev, "The exynos g2d(ver %d.%d) successfully probed\n",
@ -846,13 +831,6 @@ static int __devinit g2d_probe(struct platform_device *pdev)
return 0; return 0;
err_free_irq:
free_irq(g2d->irq, g2d);
err_unmap_base:
iounmap(g2d->regs);
err_release_res:
release_resource(g2d->regs_res);
kfree(g2d->regs_res);
err_put_clk: err_put_clk:
pm_runtime_disable(dev); pm_runtime_disable(dev);
clk_put(g2d->gate_clk); clk_put(g2d->gate_clk);
@ -862,8 +840,6 @@ err_destroy_workqueue:
destroy_workqueue(g2d->g2d_workq); destroy_workqueue(g2d->g2d_workq);
err_destroy_slab: err_destroy_slab:
kmem_cache_destroy(g2d->runqueue_slab); kmem_cache_destroy(g2d->runqueue_slab);
err_free_mem:
kfree(g2d);
return ret; return ret;
} }
@ -873,24 +849,18 @@ static int __devexit g2d_remove(struct platform_device *pdev)
cancel_work_sync(&g2d->runqueue_work); cancel_work_sync(&g2d->runqueue_work);
exynos_drm_subdrv_unregister(&g2d->subdrv); exynos_drm_subdrv_unregister(&g2d->subdrv);
free_irq(g2d->irq, g2d);
while (g2d->runqueue_node) { while (g2d->runqueue_node) {
g2d_free_runqueue_node(g2d, g2d->runqueue_node); g2d_free_runqueue_node(g2d, g2d->runqueue_node);
g2d->runqueue_node = g2d_get_runqueue_node(g2d); g2d->runqueue_node = g2d_get_runqueue_node(g2d);
} }
iounmap(g2d->regs);
release_resource(g2d->regs_res);
kfree(g2d->regs_res);
pm_runtime_disable(&pdev->dev); pm_runtime_disable(&pdev->dev);
clk_put(g2d->gate_clk); clk_put(g2d->gate_clk);
g2d_fini_cmdlist(g2d); g2d_fini_cmdlist(g2d);
destroy_workqueue(g2d->g2d_workq); destroy_workqueue(g2d->g2d_workq);
kmem_cache_destroy(g2d->runqueue_slab); kmem_cache_destroy(g2d->runqueue_slab);
kfree(g2d);
return 0; return 0;
} }
@ -924,7 +894,7 @@ static int g2d_resume(struct device *dev)
} }
#endif #endif
SIMPLE_DEV_PM_OPS(g2d_pm_ops, g2d_suspend, g2d_resume); static SIMPLE_DEV_PM_OPS(g2d_pm_ops, g2d_suspend, g2d_resume);
struct platform_driver g2d_driver = { struct platform_driver g2d_driver = {
.probe = g2d_probe, .probe = g2d_probe,

Some files were not shown because too many files have changed in this diff Show More