2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-14 00:04:00 +08:00

Linux 3.9-rc5

-----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2.0.19 (GNU/Linux)
 
 iQEcBAABAgAGBQJRWLTrAAoJEHm+PkMAQRiGe8oH/iMy48mecVWvxVZn74Tx3Cef
 xmW/PnAIj28EhSPqK49N/Ow6AfQToFKf7AP0ge20KAf5teTq95AY+tH74DAANt8F
 BjKXXTZiR5xwBvRkq7CR5wDcCvEcBAAz8fgTEd6SEDB2d2VXFf5eKdKUqt1avTCh
 Z6Hup5kuwX+ddtwY2DCBXtp2n6fL0Rm5yLzY1A3OOBye1E7VyLTF7M5BR603Q44P
 4kRLxn8+R7jy3hTuZIhAeoS8TKUoBwVk7DmKxEzrhTHZVOmvwE9lEHybRnIyOpd/
 k1JnbRbiPsLsCVFOn10SQkGDAIk00lro3tuWP2C1ljERiD/OOh5Ui9nXYAhMkbI=
 =q15K
 -----END PGP SIGNATURE-----

Merge tag 'v3.9-rc5' into next/cleanup

This is a dependency for the mxs/cleanup branch.

Signed-off-by: Arnd Bergmann <arnd@arndb.de>
This commit is contained in:
Arnd Bergmann 2013-04-09 15:29:20 +02:00
commit 44c0d23775
555 changed files with 6220 additions and 3116 deletions

View File

@ -1510,6 +1510,14 @@ D: Natsemi ethernet
D: Cobalt Networks (x86) support
D: This-and-That
N: Mark M. Hoffman
E: mhoffman@lightlink.com
D: asb100, lm93 and smsc47b397 hardware monitoring drivers
D: hwmon subsystem core
D: hwmon subsystem maintainer
D: i2c-sis96x and i2c-stub SMBus drivers
S: USA
N: Dirk Hohndel
E: hohndel@suse.de
D: The XFree86[tm] Project

View File

@ -23,7 +23,7 @@ Supported chips:
Datasheet: Publicly available at the Maxim website
http://www.maxim-ic.com/
* Microchip (TelCom) TCN75
Prefix: 'lm75'
Prefix: 'tcn75'
Addresses scanned: none
Datasheet: Publicly available at the Microchip website
http://www.microchip.com/

View File

@ -5,7 +5,7 @@ Supported adapters:
Documentation:
http://www.diolan.com/i2c/u2c12.html
Author: Guenter Roeck <guenter.roeck@ericsson.com>
Author: Guenter Roeck <linux@roeck-us.net>
Description
-----------

View File

@ -15,6 +15,13 @@ amemthresh - INTEGER
enabled and the variable is automatically set to 2, otherwise
the strategy is disabled and the variable is set to 1.
backup_only - BOOLEAN
0 - disabled (default)
not 0 - enabled
If set, disable the director function while the server is
in backup mode to avoid packet loops for DR/TUN methods.
conntrack - BOOLEAN
0 - disabled (default)
not 0 - enabled

View File

@ -912,7 +912,7 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
models depending on the codec chip. The list of available models
is found in HD-Audio-Models.txt
The model name "genric" is treated as a special case. When this
The model name "generic" is treated as a special case. When this
model is given, the driver uses the generic codec parser without
"codec-patch". It's sometimes good for testing and debugging.

View File

@ -285,7 +285,7 @@ sample data.
<H4>
7.2.4 Close Callback</H4>
The <TT>close</TT> callback is called when this device is closed by the
applicaion. If any private data was allocated in open callback, it must
application. If any private data was allocated in open callback, it must
be released in the close callback. The deletion of ALSA port should be
done here, too. This callback must not be NULL.
<H4>

View File

@ -1338,12 +1338,6 @@ S: Maintained
F: drivers/platform/x86/asus*.c
F: drivers/platform/x86/eeepc*.c
ASUS ASB100 HARDWARE MONITOR DRIVER
M: "Mark M. Hoffman" <mhoffman@lightlink.com>
L: lm-sensors@lm-sensors.org
S: Maintained
F: drivers/hwmon/asb100.c
ASYNCHRONOUS TRANSFERS/TRANSFORMS (IOAT) API
M: Dan Williams <djbw@fb.com>
W: http://sourceforge.net/projects/xscaleiop
@ -1467,6 +1461,12 @@ F: drivers/dma/at_hdmac.c
F: drivers/dma/at_hdmac_regs.h
F: include/linux/platform_data/dma-atmel.h
ATMEL I2C DRIVER
M: Ludovic Desroches <ludovic.desroches@atmel.com>
L: linux-i2c@vger.kernel.org
S: Supported
F: drivers/i2c/busses/i2c-at91.c
ATMEL ISI DRIVER
M: Josh Wu <josh.wu@atmel.com>
L: linux-media@vger.kernel.org
@ -2629,7 +2629,7 @@ F: include/uapi/drm/
INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
M: Daniel Vetter <daniel.vetter@ffwll.ch>
L: intel-gfx@lists.freedesktop.org (subscribers-only)
L: intel-gfx@lists.freedesktop.org
L: dri-devel@lists.freedesktop.org
T: git git://people.freedesktop.org/~danvet/drm-intel
S: Supported
@ -3242,6 +3242,12 @@ F: Documentation/firmware_class/
F: drivers/base/firmware*.c
F: include/linux/firmware.h
FLASHSYSTEM DRIVER (IBM FlashSystem 70/80 PCI SSD Flash Card)
M: Joshua Morris <josh.h.morris@us.ibm.com>
M: Philip Kelleher <pjk1939@linux.vnet.ibm.com>
S: Maintained
F: drivers/block/rsxx/
FLOPPY DRIVER
M: Jiri Kosina <jkosina@suse.cz>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/floppy.git
@ -3851,7 +3857,7 @@ F: drivers/i2c/busses/i2c-ismt.c
F: Documentation/i2c/busses/i2c-ismt
I2C/SMBUS STUB DRIVER
M: "Mark M. Hoffman" <mhoffman@lightlink.com>
M: Jean Delvare <khali@linux-fr.org>
L: linux-i2c@vger.kernel.org
S: Maintained
F: drivers/i2c/i2c-stub.c
@ -5647,6 +5653,14 @@ S: Maintained
F: drivers/video/riva/
F: drivers/video/nvidia/
NVM EXPRESS DRIVER
M: Matthew Wilcox <willy@linux.intel.com>
L: linux-nvme@lists.infradead.org
T: git git://git.infradead.org/users/willy/linux-nvme.git
S: Supported
F: drivers/block/nvme.c
F: include/linux/nvme.h
OMAP SUPPORT
M: Tony Lindgren <tony@atomide.com>
L: linux-omap@vger.kernel.org
@ -5675,7 +5689,7 @@ S: Maintained
F: arch/arm/*omap*/*clock*
OMAP POWER MANAGEMENT SUPPORT
M: Kevin Hilman <khilman@ti.com>
M: Kevin Hilman <khilman@deeprootsystems.com>
L: linux-omap@vger.kernel.org
S: Maintained
F: arch/arm/*omap*/*pm*
@ -5769,7 +5783,7 @@ F: arch/arm/*omap*/usb*
OMAP GPIO DRIVER
M: Santosh Shilimkar <santosh.shilimkar@ti.com>
M: Kevin Hilman <khilman@ti.com>
M: Kevin Hilman <khilman@deeprootsystems.com>
L: linux-omap@vger.kernel.org
S: Maintained
F: drivers/gpio/gpio-omap.c
@ -6201,7 +6215,7 @@ F: include/linux/power_supply.h
F: drivers/power/
PNP SUPPORT
M: Adam Belay <abelay@mit.edu>
M: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
M: Bjorn Helgaas <bhelgaas@google.com>
S: Maintained
F: drivers/pnp/
@ -6543,12 +6557,6 @@ S: Maintained
F: Documentation/blockdev/ramdisk.txt
F: drivers/block/brd.c
RAMSAM DRIVER (IBM RamSan 70/80 PCI SSD Flash Card)
M: Joshua Morris <josh.h.morris@us.ibm.com>
M: Philip Kelleher <pjk1939@linux.vnet.ibm.com>
S: Maintained
F: drivers/block/rsxx/
RANDOM NUMBER DRIVER
M: Theodore Ts'o" <tytso@mit.edu>
S: Maintained
@ -7165,7 +7173,7 @@ F: arch/arm/mach-s3c2410/bast-irq.c
TI DAVINCI MACHINE SUPPORT
M: Sekhar Nori <nsekhar@ti.com>
M: Kevin Hilman <khilman@ti.com>
M: Kevin Hilman <khilman@deeprootsystems.com>
L: davinci-linux-open-source@linux.davincidsp.com (moderated for non-subscribers)
T: git git://gitorious.org/linux-davinci/linux-davinci.git
Q: http://patchwork.kernel.org/project/linux-davinci/list/
@ -7198,13 +7206,6 @@ L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/sis/sis900.*
SIS 96X I2C/SMBUS DRIVER
M: "Mark M. Hoffman" <mhoffman@lightlink.com>
L: linux-i2c@vger.kernel.org
S: Maintained
F: Documentation/i2c/busses/i2c-sis96x
F: drivers/i2c/busses/i2c-sis96x.c
SIS FRAMEBUFFER DRIVER
M: Thomas Winischhofer <thomas@winischhofer.net>
W: http://www.winischhofer.net/linuxsisvga.shtml
@ -7282,7 +7283,7 @@ F: Documentation/hwmon/sch5627
F: drivers/hwmon/sch5627.c
SMSC47B397 HARDWARE MONITOR DRIVER
M: "Mark M. Hoffman" <mhoffman@lightlink.com>
M: Jean Delvare <khali@linux-fr.org>
L: lm-sensors@lm-sensors.org
S: Maintained
F: Documentation/hwmon/smsc47b397
@ -7705,9 +7706,10 @@ F: include/linux/swiotlb.h
SYNOPSYS ARC ARCHITECTURE
M: Vineet Gupta <vgupta@synopsys.com>
L: linux-snps-arc@vger.kernel.org
S: Supported
F: arch/arc/
F: Documentation/devicetree/bindings/arc/
F: drivers/tty/serial/arc-uart.c
SYSV FILESYSTEM
M: Christoph Hellwig <hch@infradead.org>

View File

@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 9
SUBLEVEL = 0
EXTRAVERSION = -rc3
EXTRAVERSION = -rc5
NAME = Unicycling Gorilla
# *DOCUMENTATION*

View File

@ -126,7 +126,7 @@ dma_map_sg(struct device *dev, struct scatterlist *sg,
int i;
for_each_sg(sg, s, nents, i)
sg->dma_address = dma_map_page(dev, sg_page(s), s->offset,
s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
s->length, dir);
return nents;

View File

@ -72,7 +72,4 @@ extern int elf_check_arch(const struct elf32_hdr *);
*/
#define ELF_PLATFORM (NULL)
#define SET_PERSONALITY(ex) \
set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
#endif

View File

@ -415,7 +415,7 @@
*-------------------------------------------------------------*/
.macro SAVE_ALL_EXCEPTION marker
st \marker, [sp, 8]
st \marker, [sp, 8] /* orig_r8 */
st r0, [sp, 4] /* orig_r0, needed only for sys calls */
/* Restore r9 used to code the early prologue */

View File

@ -13,7 +13,7 @@
#ifdef CONFIG_KGDB
#include <asm/user.h>
#include <asm/ptrace.h>
/* to ensure compatibility with Linux 2.6.35, we don't implement the get/set
* register API yet */
@ -53,9 +53,7 @@ enum arc700_linux_regnums {
};
#else
static inline void kgdb_trap(struct pt_regs *regs, int param)
{
}
#define kgdb_trap(regs, param)
#endif
#endif /* __ARC_KGDB_H__ */

View File

@ -123,7 +123,7 @@ static inline long regs_return_value(struct pt_regs *regs)
#define orig_r8_IS_SCALL 0x0001
#define orig_r8_IS_SCALL_RESTARTED 0x0002
#define orig_r8_IS_BRKPT 0x0004
#define orig_r8_IS_EXCPN 0x0004
#define orig_r8_IS_EXCPN 0x0008
#define orig_r8_IS_IRQ1 0x0010
#define orig_r8_IS_IRQ2 0x0020

View File

@ -16,8 +16,6 @@
#include <linux/types.h>
int sys_clone_wrapper(int, int, int, int, int);
int sys_fork_wrapper(void);
int sys_vfork_wrapper(void);
int sys_cacheflush(uint32_t, uint32_t uint32_t);
int sys_arc_settls(void *);
int sys_arc_gettls(void);

View File

@ -28,14 +28,14 @@
*/
struct user_regs_struct {
struct scratch {
struct {
long pad;
long bta, lp_start, lp_end, lp_count;
long status32, ret, blink, fp, gp;
long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
long sp;
} scratch;
struct callee {
struct {
long pad;
long r25, r24, r23, r22, r21, r20;
long r19, r18, r17, r16, r15, r14, r13;

View File

@ -452,7 +452,7 @@ tracesys:
; using ERET won't work since next-PC has already committed
lr r12, [efa]
GET_CURR_TASK_FIELD_PTR TASK_THREAD, r11
st r12, [r11, THREAD_FAULT_ADDR]
st r12, [r11, THREAD_FAULT_ADDR] ; thread.fault_address
; PRE Sys Call Ptrace hook
mov r0, sp ; pt_regs needed
@ -792,31 +792,6 @@ ARC_EXIT ret_from_fork
;################### Special Sys Call Wrappers ##########################
; TBD: call do_fork directly from here
ARC_ENTRY sys_fork_wrapper
SAVE_CALLEE_SAVED_USER
bl @sys_fork
DISCARD_CALLEE_SAVED_USER
GET_CURR_THR_INFO_FLAGS r10
btst r10, TIF_SYSCALL_TRACE
bnz tracesys_exit
b ret_from_system_call
ARC_EXIT sys_fork_wrapper
ARC_ENTRY sys_vfork_wrapper
SAVE_CALLEE_SAVED_USER
bl @sys_vfork
DISCARD_CALLEE_SAVED_USER
GET_CURR_THR_INFO_FLAGS r10
btst r10, TIF_SYSCALL_TRACE
bnz tracesys_exit
b ret_from_system_call
ARC_EXIT sys_vfork_wrapper
ARC_ENTRY sys_clone_wrapper
SAVE_CALLEE_SAVED_USER
bl @sys_clone

View File

@ -9,6 +9,7 @@
*/
#include <linux/kgdb.h>
#include <linux/sched.h>
#include <asm/disasm.h>
#include <asm/cacheflush.h>

View File

@ -232,10 +232,8 @@ char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
n += scnprintf(buf + n, len - n, "\n");
#ifdef _ASM_GENERIC_UNISTD_H
n += scnprintf(buf + n, len - n,
"OS ABI [v2]\t: asm-generic/{unistd,stat,fcntl}\n");
#endif
"OS ABI [v3]\t: no-legacy-syscalls\n");
return buf;
}

View File

@ -6,8 +6,6 @@
#include <asm/syscalls.h>
#define sys_clone sys_clone_wrapper
#define sys_fork sys_fork_wrapper
#define sys_vfork sys_vfork_wrapper
#undef __SYSCALL
#define __SYSCALL(nr, call) [nr] = (call),

View File

@ -49,7 +49,6 @@ config ARM
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_UID16
select VIRT_TO_BUS
select KTIME_SCALAR
select PERF_USE_VMALLOC
select RTC_LIB
@ -743,6 +742,7 @@ config ARCH_RPC
select NEED_MACH_IO_H
select NEED_MACH_MEMORY_H
select NO_IOPORT
select VIRT_TO_BUS
help
On the Acorn Risc-PC, Linux can support the internal IDE disk and
CD-ROM interface, serial and parallel port, and the floppy drive.
@ -878,6 +878,7 @@ config ARCH_SHARK
select ISA_DMA
select NEED_MACH_MEMORY_H
select PCI
select VIRT_TO_BUS
select ZONE_DMA
help
Support for the StrongARM based Digital DNARD machine, also known
@ -1005,12 +1006,12 @@ config ARCH_MULTI_V4_V5
bool
config ARCH_MULTI_V6
bool "ARMv6 based platforms (ARM11, Scorpion, ...)"
bool "ARMv6 based platforms (ARM11)"
select ARCH_MULTI_V6_V7
select CPU_V6
config ARCH_MULTI_V7
bool "ARMv7 based platforms (Cortex-A, PJ4, Krait)"
bool "ARMv7 based platforms (Cortex-A, PJ4, Scorpion, Krait)"
default y
select ARCH_MULTI_V6_V7
select ARCH_VEXPRESS
@ -1461,10 +1462,6 @@ config ISA_DMA
bool
select ISA_DMA_API
config ARCH_NO_VIRT_TO_BUS
def_bool y
depends on !ARCH_RPC && !ARCH_NETWINDER && !ARCH_SHARK
# Select ISA DMA interface
config ISA_DMA_API
bool

View File

@ -495,6 +495,7 @@ config DEBUG_IMX_UART_PORT
DEBUG_IMX53_UART || \
DEBUG_IMX6Q_UART
default 1
depends on ARCH_MXC
help
Choose UART port on which kernel low-level debug messages
should be output.

View File

@ -238,8 +238,32 @@
nand {
pinctrl_nand: nand-0 {
atmel,pins =
<3 4 0x0 0x1 /* PD5 gpio RDY pin pull_up */
3 5 0x0 0x1>; /* PD4 gpio enable pin pull_up */
<3 0 0x1 0x0 /* PD0 periph A Read Enable */
3 1 0x1 0x0 /* PD1 periph A Write Enable */
3 2 0x1 0x0 /* PD2 periph A Address Latch Enable */
3 3 0x1 0x0 /* PD3 periph A Command Latch Enable */
3 4 0x0 0x1 /* PD4 gpio Chip Enable pin pull_up */
3 5 0x0 0x1 /* PD5 gpio RDY/BUSY pin pull_up */
3 6 0x1 0x0 /* PD6 periph A Data bit 0 */
3 7 0x1 0x0 /* PD7 periph A Data bit 1 */
3 8 0x1 0x0 /* PD8 periph A Data bit 2 */
3 9 0x1 0x0 /* PD9 periph A Data bit 3 */
3 10 0x1 0x0 /* PD10 periph A Data bit 4 */
3 11 0x1 0x0 /* PD11 periph A Data bit 5 */
3 12 0x1 0x0 /* PD12 periph A Data bit 6 */
3 13 0x1 0x0>; /* PD13 periph A Data bit 7 */
};
pinctrl_nand_16bits: nand_16bits-0 {
atmel,pins =
<3 14 0x1 0x0 /* PD14 periph A Data bit 8 */
3 15 0x1 0x0 /* PD15 periph A Data bit 9 */
3 16 0x1 0x0 /* PD16 periph A Data bit 10 */
3 17 0x1 0x0 /* PD17 periph A Data bit 11 */
3 18 0x1 0x0 /* PD18 periph A Data bit 12 */
3 19 0x1 0x0 /* PD19 periph A Data bit 13 */
3 20 0x1 0x0 /* PD20 periph A Data bit 14 */
3 21 0x1 0x0>; /* PD21 periph A Data bit 15 */
};
};

View File

@ -275,18 +275,27 @@
compatible = "arm,pl330", "arm,primecell";
reg = <0x12680000 0x1000>;
interrupts = <0 35 0>;
#dma-cells = <1>;
#dma-channels = <8>;
#dma-requests = <32>;
};
pdma1: pdma@12690000 {
compatible = "arm,pl330", "arm,primecell";
reg = <0x12690000 0x1000>;
interrupts = <0 36 0>;
#dma-cells = <1>;
#dma-channels = <8>;
#dma-requests = <32>;
};
mdma1: mdma@12850000 {
compatible = "arm,pl330", "arm,primecell";
reg = <0x12850000 0x1000>;
interrupts = <0 34 0>;
#dma-cells = <1>;
#dma-channels = <8>;
#dma-requests = <1>;
};
};
};

View File

@ -142,12 +142,18 @@
compatible = "arm,pl330", "arm,primecell";
reg = <0x120000 0x1000>;
interrupts = <0 34 0>;
#dma-cells = <1>;
#dma-channels = <8>;
#dma-requests = <32>;
};
pdma1: pdma@121B0000 {
compatible = "arm,pl330", "arm,primecell";
reg = <0x121000 0x1000>;
interrupts = <0 35 0>;
#dma-cells = <1>;
#dma-channels = <8>;
#dma-requests = <32>;
};
};

View File

@ -387,7 +387,7 @@
spi@7000d800 {
compatible = "nvidia,tegra20-slink";
reg = <0x7000d480 0x200>;
reg = <0x7000d800 0x200>;
interrupts = <0 83 0x04>;
nvidia,dma-request-selector = <&apbdma 17>;
#address-cells = <1>;

View File

@ -374,7 +374,7 @@
spi@7000d800 {
compatible = "nvidia,tegra30-slink", "nvidia,tegra20-slink";
reg = <0x7000d480 0x200>;
reg = <0x7000d800 0x200>;
interrupts = <0 83 0x04>;
nvidia,dma-request-selector = <&apbdma 17>;
#address-cells = <1>;

View File

@ -480,7 +480,7 @@ static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt)
evt->features = CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_DUMMY;
evt->rating = 400;
evt->rating = 100;
evt->mult = 1;
evt->set_mode = broadcast_timer_set_mode;

View File

@ -14,31 +14,15 @@
.text
.align 5
.word 0
1: subs r2, r2, #4 @ 1 do we have enough
blt 5f @ 1 bytes to align with?
cmp r3, #2 @ 1
strltb r1, [ip], #1 @ 1
strleb r1, [ip], #1 @ 1
strb r1, [ip], #1 @ 1
add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3))
/*
* The pointer is now aligned and the length is adjusted. Try doing the
* memset again.
*/
ENTRY(memset)
/*
* Preserve the contents of r0 for the return value.
*/
mov ip, r0
ands r3, ip, #3 @ 1 unaligned?
bne 1b @ 1
ands r3, r0, #3 @ 1 unaligned?
mov ip, r0 @ preserve r0 as return value
bne 6f @ 1
/*
* we know that the pointer in ip is aligned to a word boundary.
*/
orr r1, r1, r1, lsl #8
1: orr r1, r1, r1, lsl #8
orr r1, r1, r1, lsl #16
mov r3, r1
cmp r2, #16
@ -127,4 +111,13 @@ ENTRY(memset)
tst r2, #1
strneb r1, [ip], #1
mov pc, lr
6: subs r2, r2, #4 @ 1 do we have enough
blt 5b @ 1 bytes to align with?
cmp r3, #2 @ 1
strltb r1, [ip], #1 @ 1
strleb r1, [ip], #1 @ 1
strb r1, [ip], #1 @ 1
add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3))
b 1b
ENDPROC(memset)

View File

@ -209,6 +209,14 @@ extern int at91_get_gpio_value(unsigned pin);
extern void at91_gpio_suspend(void);
extern void at91_gpio_resume(void);
#ifdef CONFIG_PINCTRL_AT91
extern void at91_pinctrl_gpio_suspend(void);
extern void at91_pinctrl_gpio_resume(void);
#else
static inline void at91_pinctrl_gpio_suspend(void) {}
static inline void at91_pinctrl_gpio_resume(void) {}
#endif
#endif /* __ASSEMBLY__ */
#endif

View File

@ -92,23 +92,21 @@ static int at91_aic_set_wake(struct irq_data *d, unsigned value)
void at91_irq_suspend(void)
{
int i = 0, bit;
int bit = -1;
if (has_aic5()) {
/* disable enabled irqs */
while ((bit = find_next_bit(backups, n_irqs, i)) < n_irqs) {
while ((bit = find_next_bit(backups, n_irqs, bit + 1)) < n_irqs) {
at91_aic_write(AT91_AIC5_SSR,
bit & AT91_AIC5_INTSEL_MSK);
at91_aic_write(AT91_AIC5_IDCR, 1);
i = bit;
}
/* enable wakeup irqs */
i = 0;
while ((bit = find_next_bit(wakeups, n_irqs, i)) < n_irqs) {
bit = -1;
while ((bit = find_next_bit(wakeups, n_irqs, bit + 1)) < n_irqs) {
at91_aic_write(AT91_AIC5_SSR,
bit & AT91_AIC5_INTSEL_MSK);
at91_aic_write(AT91_AIC5_IECR, 1);
i = bit;
}
} else {
at91_aic_write(AT91_AIC_IDCR, *backups);
@ -118,23 +116,21 @@ void at91_irq_suspend(void)
void at91_irq_resume(void)
{
int i = 0, bit;
int bit = -1;
if (has_aic5()) {
/* disable wakeup irqs */
while ((bit = find_next_bit(wakeups, n_irqs, i)) < n_irqs) {
while ((bit = find_next_bit(wakeups, n_irqs, bit + 1)) < n_irqs) {
at91_aic_write(AT91_AIC5_SSR,
bit & AT91_AIC5_INTSEL_MSK);
at91_aic_write(AT91_AIC5_IDCR, 1);
i = bit;
}
/* enable irqs disabled for suspend */
i = 0;
while ((bit = find_next_bit(backups, n_irqs, i)) < n_irqs) {
bit = -1;
while ((bit = find_next_bit(backups, n_irqs, bit + 1)) < n_irqs) {
at91_aic_write(AT91_AIC5_SSR,
bit & AT91_AIC5_INTSEL_MSK);
at91_aic_write(AT91_AIC5_IECR, 1);
i = bit;
}
} else {
at91_aic_write(AT91_AIC_IDCR, *wakeups);

View File

@ -201,7 +201,10 @@ extern u32 at91_slow_clock_sz;
static int at91_pm_enter(suspend_state_t state)
{
at91_gpio_suspend();
if (of_have_populated_dt())
at91_pinctrl_gpio_suspend();
else
at91_gpio_suspend();
at91_irq_suspend();
pr_debug("AT91: PM - wake mask %08x, pm state %d\n",
@ -286,7 +289,10 @@ static int at91_pm_enter(suspend_state_t state)
error:
target_state = PM_SUSPEND_ON;
at91_irq_resume();
at91_gpio_resume();
if (of_have_populated_dt())
at91_pinctrl_gpio_resume();
else
at91_gpio_resume();
return 0;
}

View File

@ -743,6 +743,9 @@ EXPORT_SYMBOL(edma_free_channel);
*/
int edma_alloc_slot(unsigned ctlr, int slot)
{
if (!edma_cc[ctlr])
return -EINVAL;
if (slot >= 0)
slot = EDMA_CHAN_SLOT(slot);

View File

@ -67,6 +67,7 @@ config ARCH_NETWINDER
select ISA
select ISA_DMA
select PCI
select VIRT_TO_BUS
help
Say Y here if you intend to run this kernel on the Rebel.COM
NetWinder. Information about this machine can be found at:

View File

@ -264,6 +264,7 @@ int __init mx35_clocks_init(void)
clk_prepare_enable(clk[gpio3_gate]);
clk_prepare_enable(clk[iim_gate]);
clk_prepare_enable(clk[emi_gate]);
clk_prepare_enable(clk[max_gate]);
/*
* SCC is needed to boot via mmc after a watchdog reset. The clock code

View File

@ -27,6 +27,11 @@ static const char * const imx25_dt_board_compat[] __initconst = {
NULL
};
static void __init imx25_timer_init(void)
{
mx25_clocks_init_dt();
}
DT_MACHINE_START(IMX25_DT, "Freescale i.MX25 (Device Tree Support)")
.map_io = mx25_map_io,
.init_early = imx25_init_early,

View File

@ -9,6 +9,7 @@
*/
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <asm/mach/arch.h>

View File

@ -41,8 +41,6 @@ static struct fb_videomode mx23evk_video_modes[] = {
.lower_margin = 4,
.hsync_len = 1,
.vsync_len = 1,
.sync = FB_SYNC_DATA_ENABLE_HIGH_ACT |
FB_SYNC_DOTCLK_FAILING_ACT,
},
};
@ -59,8 +57,6 @@ static struct fb_videomode mx28evk_video_modes[] = {
.lower_margin = 10,
.hsync_len = 10,
.vsync_len = 10,
.sync = FB_SYNC_DATA_ENABLE_HIGH_ACT |
FB_SYNC_DOTCLK_FAILING_ACT,
},
};
@ -77,7 +73,6 @@ static struct fb_videomode m28evk_video_modes[] = {
.lower_margin = 45,
.hsync_len = 1,
.vsync_len = 1,
.sync = FB_SYNC_DATA_ENABLE_HIGH_ACT,
},
};
@ -94,9 +89,7 @@ static struct fb_videomode apx4devkit_video_modes[] = {
.lower_margin = 13,
.hsync_len = 48,
.vsync_len = 3,
.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT |
FB_SYNC_DATA_ENABLE_HIGH_ACT |
FB_SYNC_DOTCLK_FAILING_ACT,
.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
},
};
@ -113,9 +106,7 @@ static struct fb_videomode apf28dev_video_modes[] = {
.lower_margin = 0x15,
.hsync_len = 64,
.vsync_len = 4,
.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT |
FB_SYNC_DATA_ENABLE_HIGH_ACT |
FB_SYNC_DOTCLK_FAILING_ACT,
.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
},
};
@ -132,7 +123,6 @@ static struct fb_videomode cfa10049_video_modes[] = {
.lower_margin = 2,
.hsync_len = 15,
.vsync_len = 15,
.sync = FB_SYNC_DATA_ENABLE_HIGH_ACT
},
};
@ -259,6 +249,8 @@ static void __init imx23_evk_init(void)
mxsfb_pdata.mode_count = ARRAY_SIZE(mx23evk_video_modes);
mxsfb_pdata.default_bpp = 32;
mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT;
mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT |
MXSFB_SYNC_DOTCLK_FAILING_ACT;
}
static inline void enable_clk_enet_out(void)
@ -278,6 +270,8 @@ static void __init imx28_evk_init(void)
mxsfb_pdata.mode_count = ARRAY_SIZE(mx28evk_video_modes);
mxsfb_pdata.default_bpp = 32;
mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT;
mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT |
MXSFB_SYNC_DOTCLK_FAILING_ACT;
mxs_saif_clkmux_select(MXS_DIGCTL_SAIF_CLKMUX_EXTMSTR0);
}
@ -297,6 +291,7 @@ static void __init m28evk_init(void)
mxsfb_pdata.mode_count = ARRAY_SIZE(m28evk_video_modes);
mxsfb_pdata.default_bpp = 16;
mxsfb_pdata.ld_intf_width = STMLCDIF_18BIT;
mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT;
}
static void __init sc_sps1_init(void)
@ -322,6 +317,8 @@ static void __init apx4devkit_init(void)
mxsfb_pdata.mode_count = ARRAY_SIZE(apx4devkit_video_modes);
mxsfb_pdata.default_bpp = 32;
mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT;
mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT |
MXSFB_SYNC_DOTCLK_FAILING_ACT;
}
#define ENET0_MDC__GPIO_4_0 MXS_GPIO_NR(4, 0)
@ -407,6 +404,7 @@ static void __init cfa10049_init(void)
mxsfb_pdata.mode_count = ARRAY_SIZE(cfa10049_video_modes);
mxsfb_pdata.default_bpp = 32;
mxsfb_pdata.ld_intf_width = STMLCDIF_18BIT;
mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT;
}
static void __init cfa10037_init(void)
@ -423,6 +421,8 @@ static void __init apf28_init(void)
mxsfb_pdata.mode_count = ARRAY_SIZE(apf28dev_video_modes);
mxsfb_pdata.default_bpp = 16;
mxsfb_pdata.ld_intf_width = STMLCDIF_16BIT;
mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT |
MXSFB_SYNC_DOTCLK_FAILING_ACT;
}
static void __init mxs_machine_init(void)

View File

@ -214,11 +214,6 @@ static struct clk clk_pcmcdclk2 = {
.name = "pcmcdclk",
};
static struct clk dummy_apb_pclk = {
.name = "apb_pclk",
.id = -1,
};
static struct clk *clkset_vpllsrc_list[] = {
[0] = &clk_fin_vpll,
[1] = &clk_sclk_hdmi27m,
@ -305,18 +300,6 @@ static struct clk_ops clk_fout_apll_ops = {
static struct clk init_clocks_off[] = {
{
.name = "dma",
.devname = "dma-pl330.0",
.parent = &clk_hclk_psys.clk,
.enable = s5pv210_clk_ip0_ctrl,
.ctrlbit = (1 << 3),
}, {
.name = "dma",
.devname = "dma-pl330.1",
.parent = &clk_hclk_psys.clk,
.enable = s5pv210_clk_ip0_ctrl,
.ctrlbit = (1 << 4),
}, {
.name = "rot",
.parent = &clk_hclk_dsys.clk,
.enable = s5pv210_clk_ip0_ctrl,
@ -573,6 +556,20 @@ static struct clk clk_hsmmc3 = {
.ctrlbit = (1<<19),
};
static struct clk clk_pdma0 = {
.name = "pdma0",
.parent = &clk_hclk_psys.clk,
.enable = s5pv210_clk_ip0_ctrl,
.ctrlbit = (1 << 3),
};
static struct clk clk_pdma1 = {
.name = "pdma1",
.parent = &clk_hclk_psys.clk,
.enable = s5pv210_clk_ip0_ctrl,
.ctrlbit = (1 << 4),
};
static struct clk *clkset_uart_list[] = {
[6] = &clk_mout_mpll.clk,
[7] = &clk_mout_epll.clk,
@ -1075,6 +1072,8 @@ static struct clk *clk_cdev[] = {
&clk_hsmmc1,
&clk_hsmmc2,
&clk_hsmmc3,
&clk_pdma0,
&clk_pdma1,
};
/* Clock initialisation code */
@ -1333,6 +1332,8 @@ static struct clk_lookup s5pv210_clk_lookup[] = {
CLKDEV_INIT(NULL, "spi_busclk0", &clk_p),
CLKDEV_INIT("s5pv210-spi.0", "spi_busclk1", &clk_sclk_spi0.clk),
CLKDEV_INIT("s5pv210-spi.1", "spi_busclk1", &clk_sclk_spi1.clk),
CLKDEV_INIT("dma-pl330.0", "apb_pclk", &clk_pdma0),
CLKDEV_INIT("dma-pl330.1", "apb_pclk", &clk_pdma1),
};
void __init s5pv210_register_clocks(void)
@ -1361,6 +1362,5 @@ void __init s5pv210_register_clocks(void)
for (ptr = 0; ptr < ARRAY_SIZE(clk_cdev); ptr++)
s3c_disable_clocks(clk_cdev[ptr], 1);
s3c24xx_register_clock(&dummy_apb_pclk);
s3c_pwmclk_init();
}

View File

@ -845,7 +845,7 @@ static struct fimc_source_info goni_camera_sensors[] = {
.mux_id = 0,
.flags = V4L2_MBUS_PCLK_SAMPLE_FALLING |
V4L2_MBUS_VSYNC_ACTIVE_LOW,
.bus_type = FIMC_BUS_TYPE_ITU_601,
.fimc_bus_type = FIMC_BUS_TYPE_ITU_601,
.board_info = &noon010pc30_board_info,
.i2c_bus_num = 0,
.clk_frequency = 16000000UL,

View File

@ -32,6 +32,7 @@
#include <linux/smsc911x.h>
#include <linux/spi/spi.h>
#include <linux/spi/sh_hspi.h>
#include <linux/mmc/host.h>
#include <linux/mmc/sh_mobile_sdhi.h>
#include <linux/mfd/tmio.h>
#include <linux/usb/otg.h>

View File

@ -576,7 +576,7 @@ load_ind:
/* x = ((*(frame + k)) & 0xf) << 2; */
ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL;
/* the interpreter should deal with the negative K */
if (k < 0)
if ((int)k < 0)
return -1;
/* offset in r1: we might have to take the slow path */
emit_mov_i(r_off, k, ctx);

View File

@ -9,7 +9,6 @@ config ARM64
select CLONE_BACKWARDS
select COMMON_CLK
select GENERIC_CLOCKEVENTS
select GENERIC_HARDIRQS_NO_DEPRECATED
select GENERIC_IOMAP
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW

View File

@ -6,17 +6,6 @@ config FRAME_POINTER
bool
default y
config DEBUG_ERRORS
bool "Verbose kernel error messages"
depends on DEBUG_KERNEL
help
This option controls verbose debugging information which can be
printed when the kernel detects an internal error. This debugging
information is useful to kernel hackers when tracking down problems,
but mostly meaningless to other people. It's safe to say Y unless
you are concerned with the code size or don't want to see these
messages.
config DEBUG_STACK_USAGE
bool "Enable stack utilization instrumentation"
depends on DEBUG_KERNEL

View File

@ -82,4 +82,3 @@ CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_INFO=y
# CONFIG_FTRACE is not set
CONFIG_ATOMIC64_SELFTEST=y
CONFIG_DEBUG_ERRORS=y

View File

@ -22,7 +22,7 @@ struct ucontext {
stack_t uc_stack;
sigset_t uc_sigmask;
/* glibc uses a 1024-bit sigset_t */
__u8 __unused[(1024 - sizeof(sigset_t)) / 8];
__u8 __unused[1024 / 8 - sizeof(sigset_t)];
/* last for future expansion */
struct sigcontext uc_mcontext;
};

View File

@ -40,7 +40,9 @@ EXPORT_SYMBOL(__copy_to_user);
EXPORT_SYMBOL(__clear_user);
/* bitops */
#ifdef CONFIG_SMP
EXPORT_SYMBOL(__atomic_hash);
#endif
/* physical memory */
EXPORT_SYMBOL(memstart_addr);

View File

@ -549,7 +549,6 @@ int compat_setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs *regs)
{
struct compat_rt_sigframe __user *frame;
compat_stack_t stack;
int err = 0;
frame = compat_get_sigframe(ka, regs, sizeof(*frame));

View File

@ -261,7 +261,7 @@ static void __init create_mapping(phys_addr_t phys, unsigned long virt,
void __iomem * __init early_io_map(phys_addr_t phys, unsigned long virt)
{
unsigned long size, mask;
bool page64k = IS_ENABLED(ARM64_64K_PAGES);
bool page64k = IS_ENABLED(CONFIG_ARM64_64K_PAGES);
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;

View File

@ -291,7 +291,6 @@ cpu_idle (void)
}
if (!need_resched()) {
void (*idle)(void);
#ifdef CONFIG_SMP
min_xtp();
#endif
@ -299,9 +298,7 @@ cpu_idle (void)
if (mark_idle)
(*mark_idle)(1);
if (!idle)
idle = default_idle;
(*idle)();
default_idle();
if (mark_idle)
(*mark_idle)(0);
#ifdef CONFIG_SMP

View File

@ -90,6 +90,7 @@ config GENERIC_GPIO
config PPC
bool
default y
select BINFMT_ELF
select OF
select OF_EARLY_FLATTREE
select HAVE_FTRACE_MCOUNT_RECORD

View File

@ -343,17 +343,16 @@ extern void slb_set_size(u16 size);
/*
* VSID allocation (256MB segment)
*
* We first generate a 38-bit "proto-VSID". For kernel addresses this
* is equal to the ESID | 1 << 37, for user addresses it is:
* (context << USER_ESID_BITS) | (esid & ((1U << USER_ESID_BITS) - 1)
* We first generate a 37-bit "proto-VSID". Proto-VSIDs are generated
* from mmu context id and effective segment id of the address.
*
* This splits the proto-VSID into the below range
* 0 - (2^(CONTEXT_BITS + USER_ESID_BITS) - 1) : User proto-VSID range
* 2^(CONTEXT_BITS + USER_ESID_BITS) - 2^(VSID_BITS) : Kernel proto-VSID range
*
* We also have CONTEXT_BITS + USER_ESID_BITS = VSID_BITS - 1
* That is, we assign half of the space to user processes and half
* to the kernel.
* For user processes max context id is limited to ((1ul << 19) - 5)
* for kernel space, we use the top 4 context ids to map address as below
* NOTE: each context only support 64TB now.
* 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ]
* 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ]
* 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ]
* 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ]
*
* The proto-VSIDs are then scrambled into real VSIDs with the
* multiplicative hash:
@ -363,41 +362,49 @@ extern void slb_set_size(u16 size);
* VSID_MULTIPLIER is prime, so in particular it is
* co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
* Because the modulus is 2^n-1 we can compute it efficiently without
* a divide or extra multiply (see below).
* a divide or extra multiply (see below). The scramble function gives
* robust scattering in the hash table (at least based on some initial
* results).
*
* This scheme has several advantages over older methods:
* We also consider VSID 0 special. We use VSID 0 for slb entries mapping
* bad address. This enables us to consolidate bad address handling in
* hash_page.
*
* - We have VSIDs allocated for every kernel address
* (i.e. everything above 0xC000000000000000), except the very top
* segment, which simplifies several things.
*
* - We allow for USER_ESID_BITS significant bits of ESID and
* CONTEXT_BITS bits of context for user addresses.
* i.e. 64T (46 bits) of address space for up to half a million contexts.
*
* - The scramble function gives robust scattering in the hash
* table (at least based on some initial results). The previous
* method was more susceptible to pathological cases giving excessive
* hash collisions.
* We also need to avoid the last segment of the last context, because that
* would give a protovsid of 0x1fffffffff. That will result in a VSID 0
* because of the modulo operation in vsid scramble. But the vmemmap
* (which is what uses region 0xf) will never be close to 64TB in size
* (it's 56 bytes per page of system memory).
*/
#define CONTEXT_BITS 19
#define ESID_BITS 18
#define ESID_BITS_1T 6
/*
* 256MB segment
* The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments
* available for user + kernel mapping. The top 4 contexts are used for
* kernel mapping. Each segment contains 2^28 bytes. Each
* context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts
* (19 == 37 + 28 - 46).
*/
#define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 5)
/*
* This should be computed such that protovosid * vsid_mulitplier
* doesn't overflow 64 bits. It should also be co-prime to vsid_modulus
*/
#define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */
#define VSID_BITS_256M 38
#define VSID_BITS_256M (CONTEXT_BITS + ESID_BITS)
#define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1)
#define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */
#define VSID_BITS_1T 26
#define VSID_BITS_1T (CONTEXT_BITS + ESID_BITS_1T)
#define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1)
#define CONTEXT_BITS 19
#define USER_ESID_BITS 18
#define USER_ESID_BITS_1T 6
#define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT))
#define USER_VSID_RANGE (1UL << (ESID_BITS + SID_SHIFT))
/*
* This macro generates asm code to compute the VSID scramble
@ -421,7 +428,8 @@ extern void slb_set_size(u16 size);
srdi rx,rt,VSID_BITS_##size; \
clrldi rt,rt,(64-VSID_BITS_##size); \
add rt,rt,rx; /* add high and low bits */ \
/* Now, r3 == VSID (mod 2^36-1), and lies between 0 and \
/* NOTE: explanation based on VSID_BITS_##size = 36 \
* Now, r3 == VSID (mod 2^36-1), and lies between 0 and \
* 2^36-1+2^28-1. That in particular means that if r3 >= \
* 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \
* the bit clear, r3 already has the answer we want, if it \
@ -513,34 +521,6 @@ typedef struct {
})
#endif /* 1 */
/*
* This is only valid for addresses >= PAGE_OFFSET
* The proto-VSID space is divided into two class
* User: 0 to 2^(CONTEXT_BITS + USER_ESID_BITS) -1
* kernel: 2^(CONTEXT_BITS + USER_ESID_BITS) to 2^(VSID_BITS) - 1
*
* With KERNEL_START at 0xc000000000000000, the proto vsid for
* the kernel ends up with 0xc00000000 (36 bits). With 64TB
* support we need to have kernel proto-VSID in the
* [2^37 to 2^38 - 1] range due to the increased USER_ESID_BITS.
*/
static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
{
unsigned long proto_vsid;
/*
* We need to make sure proto_vsid for the kernel is
* >= 2^(CONTEXT_BITS + USER_ESID_BITS[_1T])
*/
if (ssize == MMU_SEGSIZE_256M) {
proto_vsid = ea >> SID_SHIFT;
proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS));
return vsid_scramble(proto_vsid, 256M);
}
proto_vsid = ea >> SID_SHIFT_1T;
proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS_1T));
return vsid_scramble(proto_vsid, 1T);
}
/* Returns the segment size indicator for a user address */
static inline int user_segment_size(unsigned long addr)
{
@ -550,17 +530,41 @@ static inline int user_segment_size(unsigned long addr)
return MMU_SEGSIZE_256M;
}
/* This is only valid for user addresses (which are below 2^44) */
static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
int ssize)
{
/*
* Bad address. We return VSID 0 for that
*/
if ((ea & ~REGION_MASK) >= PGTABLE_RANGE)
return 0;
if (ssize == MMU_SEGSIZE_256M)
return vsid_scramble((context << USER_ESID_BITS)
return vsid_scramble((context << ESID_BITS)
| (ea >> SID_SHIFT), 256M);
return vsid_scramble((context << USER_ESID_BITS_1T)
return vsid_scramble((context << ESID_BITS_1T)
| (ea >> SID_SHIFT_1T), 1T);
}
/*
* This is only valid for addresses >= PAGE_OFFSET
*
* For kernel space, we use the top 4 context ids to map address as below
* 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ]
* 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ]
* 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ]
* 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ]
*/
static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
{
unsigned long context;
/*
* kernel take the top 4 context from the available range
*/
context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1;
return get_vsid(context, ea, ssize);
}
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_MMU_HASH64_H_ */

View File

@ -275,7 +275,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_PPC970,
.cpu_user_features = COMMON_USER_POWER4 |
PPC_FEATURE_HAS_ALTIVEC_COMP,
.mmu_features = MMU_FTR_HPTE_TABLE,
.mmu_features = MMU_FTRS_PPC970,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,

View File

@ -23,8 +23,10 @@
#include <asm/code-patching.h>
#include <asm/machdep.h>
#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
extern void epapr_ev_idle(void);
extern u32 epapr_ev_idle_start[];
#endif
bool epapr_paravirt_enabled;
@ -47,11 +49,15 @@ static int __init epapr_paravirt_init(void)
for (i = 0; i < (len / 4); i++) {
patch_instruction(epapr_hypercall_start + i, insts[i]);
#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
patch_instruction(epapr_ev_idle_start + i, insts[i]);
#endif
}
#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
if (of_get_property(hyper_node, "has-idle", NULL))
ppc_md.power_save = epapr_ev_idle;
#endif
epapr_paravirt_enabled = true;

View File

@ -1066,78 +1066,6 @@ unrecov_user_slb:
#endif /* __DISABLED__ */
/*
* r13 points to the PACA, r9 contains the saved CR,
* r12 contain the saved SRR1, SRR0 is still ready for return
* r3 has the faulting address
* r9 - r13 are saved in paca->exslb.
* r3 is saved in paca->slb_r3
* We assume we aren't going to take any exceptions during this procedure.
*/
_GLOBAL(slb_miss_realmode)
mflr r10
#ifdef CONFIG_RELOCATABLE
mtctr r11
#endif
stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
bl .slb_allocate_realmode
/* All done -- return from exception. */
ld r10,PACA_EXSLB+EX_LR(r13)
ld r3,PACA_EXSLB+EX_R3(r13)
lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
mtlr r10
andi. r10,r12,MSR_RI /* check for unrecoverable exception */
beq- 2f
.machine push
.machine "power4"
mtcrf 0x80,r9
mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
.machine pop
RESTORE_PPR_PACA(PACA_EXSLB, r9)
ld r9,PACA_EXSLB+EX_R9(r13)
ld r10,PACA_EXSLB+EX_R10(r13)
ld r11,PACA_EXSLB+EX_R11(r13)
ld r12,PACA_EXSLB+EX_R12(r13)
ld r13,PACA_EXSLB+EX_R13(r13)
rfid
b . /* prevent speculative execution */
2: mfspr r11,SPRN_SRR0
ld r10,PACAKBASE(r13)
LOAD_HANDLER(r10,unrecov_slb)
mtspr SPRN_SRR0,r10
ld r10,PACAKMSR(r13)
mtspr SPRN_SRR1,r10
rfid
b .
unrecov_slb:
EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
DISABLE_INTS
bl .save_nvgprs
1: addi r3,r1,STACK_FRAME_OVERHEAD
bl .unrecoverable_exception
b 1b
#ifdef CONFIG_PPC_970_NAP
power4_fixup_nap:
andc r9,r9,r10
std r9,TI_LOCAL_FLAGS(r11)
ld r10,_LINK(r1) /* make idle task do the */
std r10,_NIP(r1) /* equivalent of a blr */
blr
#endif
.align 7
.globl alignment_common
alignment_common:
@ -1335,6 +1263,78 @@ _GLOBAL(opal_mc_secondary_handler)
#endif /* CONFIG_PPC_POWERNV */
/*
* r13 points to the PACA, r9 contains the saved CR,
* r12 contain the saved SRR1, SRR0 is still ready for return
* r3 has the faulting address
* r9 - r13 are saved in paca->exslb.
* r3 is saved in paca->slb_r3
* We assume we aren't going to take any exceptions during this procedure.
*/
_GLOBAL(slb_miss_realmode)
mflr r10
#ifdef CONFIG_RELOCATABLE
mtctr r11
#endif
stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
bl .slb_allocate_realmode
/* All done -- return from exception. */
ld r10,PACA_EXSLB+EX_LR(r13)
ld r3,PACA_EXSLB+EX_R3(r13)
lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
mtlr r10
andi. r10,r12,MSR_RI /* check for unrecoverable exception */
beq- 2f
.machine push
.machine "power4"
mtcrf 0x80,r9
mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
.machine pop
RESTORE_PPR_PACA(PACA_EXSLB, r9)
ld r9,PACA_EXSLB+EX_R9(r13)
ld r10,PACA_EXSLB+EX_R10(r13)
ld r11,PACA_EXSLB+EX_R11(r13)
ld r12,PACA_EXSLB+EX_R12(r13)
ld r13,PACA_EXSLB+EX_R13(r13)
rfid
b . /* prevent speculative execution */
2: mfspr r11,SPRN_SRR0
ld r10,PACAKBASE(r13)
LOAD_HANDLER(r10,unrecov_slb)
mtspr SPRN_SRR0,r10
ld r10,PACAKMSR(r13)
mtspr SPRN_SRR1,r10
rfid
b .
unrecov_slb:
EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
DISABLE_INTS
bl .save_nvgprs
1: addi r3,r1,STACK_FRAME_OVERHEAD
bl .unrecoverable_exception
b 1b
#ifdef CONFIG_PPC_970_NAP
power4_fixup_nap:
andc r9,r9,r10
std r9,TI_LOCAL_FLAGS(r11)
ld r10,_LINK(r1) /* make idle task do the */
std r10,_NIP(r1) /* equivalent of a blr */
blr
#endif
/*
* Hash table stuff
*/
@ -1452,20 +1452,36 @@ do_ste_alloc:
_GLOBAL(do_stab_bolted)
stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
mfspr r11,SPRN_DAR /* ea */
/*
* check for bad kernel/user address
* (ea & ~REGION_MASK) >= PGTABLE_RANGE
*/
rldicr. r9,r11,4,(63 - 46 - 4)
li r9,0 /* VSID = 0 for bad address */
bne- 0f
/*
* Calculate VSID:
* This is the kernel vsid, we take the top for context from
* the range. context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
* Here we know that (ea >> 60) == 0xc
*/
lis r9,(MAX_USER_CONTEXT + 1)@ha
addi r9,r9,(MAX_USER_CONTEXT + 1)@l
srdi r10,r11,SID_SHIFT
rldimi r10,r9,ESID_BITS,0 /* proto vsid */
ASM_VSID_SCRAMBLE(r10, r9, 256M)
rldic r9,r10,12,16 /* r9 = vsid << 12 */
0:
/* Hash to the primary group */
ld r10,PACASTABVIRT(r13)
mfspr r11,SPRN_DAR
srdi r11,r11,28
srdi r11,r11,SID_SHIFT
rldimi r10,r11,7,52 /* r10 = first ste of the group */
/* Calculate VSID */
/* This is a kernel address, so protovsid = ESID | 1 << 37 */
li r9,0x1
rldimi r11,r9,(CONTEXT_BITS + USER_ESID_BITS),0
ASM_VSID_SCRAMBLE(r11, r9, 256M)
rldic r9,r11,12,16 /* r9 = vsid << 12 */
/* Search the primary group for a free entry */
1: ld r11,0(r10) /* Test valid bit of the current ste */
andi. r11,r11,0x80

View File

@ -2832,11 +2832,13 @@ static void unreloc_toc(void)
{
}
#else
static void __reloc_toc(void *tocstart, unsigned long offset,
unsigned long nr_entries)
static void __reloc_toc(unsigned long offset, unsigned long nr_entries)
{
unsigned long i;
unsigned long *toc_entry = (unsigned long *)tocstart;
unsigned long *toc_entry;
/* Get the start of the TOC by using r2 directly. */
asm volatile("addi %0,2,-0x8000" : "=b" (toc_entry));
for (i = 0; i < nr_entries; i++) {
*toc_entry = *toc_entry + offset;
@ -2850,8 +2852,7 @@ static void reloc_toc(void)
unsigned long nr_entries =
(__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
/* Need to add offset to get at __prom_init_toc_start */
__reloc_toc(__prom_init_toc_start + offset, offset, nr_entries);
__reloc_toc(offset, nr_entries);
mb();
}
@ -2864,8 +2865,7 @@ static void unreloc_toc(void)
mb();
/* __prom_init_toc_start has been relocated, no need to add offset */
__reloc_toc(__prom_init_toc_start, -offset, nr_entries);
__reloc_toc(-offset, nr_entries);
}
#endif
#endif

View File

@ -1428,6 +1428,7 @@ static long ppc_set_hwdebug(struct task_struct *child,
brk.address = bp_info->addr & ~7UL;
brk.type = HW_BRK_TYPE_TRANSLATE;
brk.len = 8;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
brk.type |= HW_BRK_TYPE_READ;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)

View File

@ -326,8 +326,8 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
vcpu3s->context_id[0] = err;
vcpu3s->proto_vsid_max = ((vcpu3s->context_id[0] + 1)
<< USER_ESID_BITS) - 1;
vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS;
<< ESID_BITS) - 1;
vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << ESID_BITS;
vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first;
kvmppc_mmu_hpte_init(vcpu);

View File

@ -195,6 +195,11 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
unsigned long vpn = hpt_vpn(vaddr, vsid, ssize);
unsigned long tprot = prot;
/*
* If we hit a bad address return error.
*/
if (!vsid)
return -1;
/* Make kernel text executable */
if (overlaps_kernel_text(vaddr, vaddr + step))
tprot &= ~HPTE_R_N;
@ -759,6 +764,8 @@ void __init early_init_mmu(void)
/* Initialize stab / SLB management */
if (mmu_has_feature(MMU_FTR_SLB))
slb_initialize();
else
stab_initialize(get_paca()->stab_real);
}
#ifdef CONFIG_SMP
@ -922,11 +929,6 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
ea, access, trap);
if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) {
DBG_LOW(" out of pgtable range !\n");
return 1;
}
/* Get region & vsid */
switch (REGION_ID(ea)) {
case USER_REGION_ID:
@ -957,6 +959,11 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
}
DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);
/* Bad address. */
if (!vsid) {
DBG_LOW("Bad address!\n");
return 1;
}
/* Get pgdir */
pgdir = mm->pgd;
if (pgdir == NULL)
@ -1126,6 +1133,8 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
/* Get VSID */
ssize = user_segment_size(ea);
vsid = get_vsid(mm->context.id, ea, ssize);
if (!vsid)
return;
/* Hash doesn't like irqs */
local_irq_save(flags);
@ -1233,6 +1242,9 @@ static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
/* Don't create HPTE entries for bad address */
if (!vsid)
return;
ret = ppc_md.hpte_insert(hpteg, vpn, __pa(vaddr),
mode, HPTE_V_BOLTED,
mmu_linear_psize, mmu_kernel_ssize);

View File

@ -29,15 +29,6 @@
static DEFINE_SPINLOCK(mmu_context_lock);
static DEFINE_IDA(mmu_context_ida);
/*
* 256MB segment
* The proto-VSID space has 2^(CONTEX_BITS + USER_ESID_BITS) - 1 segments
* available for user mappings. Each segment contains 2^28 bytes. Each
* context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts
* (19 == 37 + 28 - 46).
*/
#define MAX_CONTEXT ((1UL << CONTEXT_BITS) - 1)
int __init_new_context(void)
{
int index;
@ -56,7 +47,7 @@ again:
else if (err)
return err;
if (index > MAX_CONTEXT) {
if (index > MAX_USER_CONTEXT) {
spin_lock(&mmu_context_lock);
ida_remove(&mmu_context_ida, index);
spin_unlock(&mmu_context_lock);

View File

@ -61,7 +61,7 @@
#endif
#ifdef CONFIG_PPC_STD_MMU_64
#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT))
#if TASK_SIZE_USER64 > (1UL << (ESID_BITS + SID_SHIFT))
#error TASK_SIZE_USER64 exceeds user VSID range
#endif
#endif

View File

@ -31,10 +31,15 @@
* No other registers are examined or changed.
*/
_GLOBAL(slb_allocate_realmode)
/* r3 = faulting address */
/*
* check for bad kernel/user address
* (ea & ~REGION_MASK) >= PGTABLE_RANGE
*/
rldicr. r9,r3,4,(63 - 46 - 4)
bne- 8f
srdi r9,r3,60 /* get region */
srdi r10,r3,28 /* get esid */
srdi r10,r3,SID_SHIFT /* get esid */
cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */
/* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */
@ -56,12 +61,14 @@ _GLOBAL(slb_allocate_realmode)
*/
_GLOBAL(slb_miss_kernel_load_linear)
li r11,0
li r9,0x1
/*
* for 1T we shift 12 bits more. slb_finish_load_1T will do
* the necessary adjustment
* context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
* r9 = region id.
*/
rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0
addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha
addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l
BEGIN_FTR_SECTION
b slb_finish_load
END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
@ -91,24 +98,19 @@ _GLOBAL(slb_miss_kernel_load_vmemmap)
_GLOBAL(slb_miss_kernel_load_io)
li r11,0
6:
li r9,0x1
/*
* for 1T we shift 12 bits more. slb_finish_load_1T will do
* the necessary adjustment
* context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
* r9 = region id.
*/
rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0
addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha
addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l
BEGIN_FTR_SECTION
b slb_finish_load
END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
b slb_finish_load_1T
0: /* user address: proto-VSID = context << 15 | ESID. First check
* if the address is within the boundaries of the user region
*/
srdi. r9,r10,USER_ESID_BITS
bne- 8f /* invalid ea bits set */
0:
/* when using slices, we extract the psize off the slice bitmaps
* and then we need to get the sllp encoding off the mmu_psize_defs
* array.
@ -164,15 +166,13 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
ld r9,PACACONTEXTID(r13)
BEGIN_FTR_SECTION
cmpldi r10,0x1000
END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
rldimi r10,r9,USER_ESID_BITS,0
BEGIN_FTR_SECTION
bge slb_finish_load_1T
END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
b slb_finish_load
8: /* invalid EA */
li r10,0 /* BAD_VSID */
li r9,0 /* BAD_VSID */
li r11,SLB_VSID_USER /* flags don't much matter */
b slb_finish_load
@ -221,8 +221,6 @@ _GLOBAL(slb_allocate_user)
/* get context to calculate proto-VSID */
ld r9,PACACONTEXTID(r13)
rldimi r10,r9,USER_ESID_BITS,0
/* fall through slb_finish_load */
#endif /* __DISABLED__ */
@ -231,9 +229,10 @@ _GLOBAL(slb_allocate_user)
/*
* Finish loading of an SLB entry and return
*
* r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
* r3 = EA, r9 = context, r10 = ESID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
*/
slb_finish_load:
rldimi r10,r9,ESID_BITS,0
ASM_VSID_SCRAMBLE(r10,r9,256M)
/*
* bits above VSID_BITS_256M need to be ignored from r10
@ -298,10 +297,11 @@ _GLOBAL(slb_compare_rr_to_size)
/*
* Finish loading of a 1T SLB entry (for the kernel linear mapping) and return.
*
* r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9
* r3 = EA, r9 = context, r10 = ESID(256MB), r11 = flags, clobbers r9
*/
slb_finish_load_1T:
srdi r10,r10,40-28 /* get 1T ESID */
srdi r10,r10,(SID_SHIFT_1T - SID_SHIFT) /* get 1T ESID */
rldimi r10,r9,ESID_BITS_1T,0
ASM_VSID_SCRAMBLE(r10,r9,1T)
/*
* bits above VSID_BITS_1T need to be ignored from r10

View File

@ -82,11 +82,11 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
if (!is_kernel_addr(addr)) {
ssize = user_segment_size(addr);
vsid = get_vsid(mm->context.id, addr, ssize);
WARN_ON(vsid == 0);
} else {
vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
ssize = mmu_kernel_ssize;
}
WARN_ON(vsid == 0);
vpn = hpt_vpn(addr, vsid, ssize);
rpte = __real_pte(__pte(pte), ptep);

View File

@ -420,7 +420,20 @@ static struct attribute_group power7_pmu_events_group = {
.attrs = power7_events_attr,
};
PMU_FORMAT_ATTR(event, "config:0-19");
static struct attribute *power7_pmu_format_attr[] = {
&format_attr_event.attr,
NULL,
};
struct attribute_group power7_pmu_format_group = {
.name = "format",
.attrs = power7_pmu_format_attr,
};
static const struct attribute_group *power7_pmu_attr_groups[] = {
&power7_pmu_format_group,
&power7_pmu_events_group,
NULL,
};

View File

@ -69,7 +69,7 @@ static irqreturn_t gpio_halt_irq(int irq, void *__data)
return IRQ_HANDLED;
};
static int __devinit gpio_halt_probe(struct platform_device *pdev)
static int gpio_halt_probe(struct platform_device *pdev)
{
enum of_gpio_flags flags;
struct device_node *node = pdev->dev.of_node;
@ -128,7 +128,7 @@ static int __devinit gpio_halt_probe(struct platform_device *pdev)
return 0;
}
static int __devexit gpio_halt_remove(struct platform_device *pdev)
static int gpio_halt_remove(struct platform_device *pdev)
{
if (halt_node) {
int gpio = of_get_gpio(halt_node, 0);
@ -165,7 +165,7 @@ static struct platform_driver gpio_halt_driver = {
.of_match_table = gpio_halt_match,
},
.probe = gpio_halt_probe,
.remove = __devexit_p(gpio_halt_remove),
.remove = gpio_halt_remove,
};
module_platform_driver(gpio_halt_driver);

View File

@ -124,9 +124,8 @@ config 6xx
select PPC_HAVE_PMU_SUPPORT
config POWER3
bool
depends on PPC64 && PPC_BOOK3S
default y if !POWER4_ONLY
def_bool y
config POWER4
depends on PPC64 && PPC_BOOK3S
@ -145,8 +144,7 @@ config TUNE_CELL
but somewhat slower on other machines. This option only changes
the scheduling of instructions, not the selection of instructions
itself, so the resulting kernel will keep running on all other
machines. When building a kernel that is supposed to run only
on Cell, you should also select the POWER4_ONLY option.
machines.
# this is temp to handle compat with arch=ppc
config 8xx

View File

@ -34,6 +34,8 @@ struct arsb {
u32 reserved[4];
} __packed;
#define EQC_WR_PROHIBIT 22
struct msb {
u8 fmt:4;
u8 oc:4;
@ -96,11 +98,13 @@ struct scm_device {
#define OP_STATE_TEMP_ERR 2
#define OP_STATE_PERM_ERR 3
enum scm_event {SCM_CHANGE, SCM_AVAIL};
struct scm_driver {
struct device_driver drv;
int (*probe) (struct scm_device *scmdev);
int (*remove) (struct scm_device *scmdev);
void (*notify) (struct scm_device *scmdev);
void (*notify) (struct scm_device *scmdev, enum scm_event event);
void (*handler) (struct scm_device *scmdev, void *data, int error);
};

View File

@ -74,8 +74,6 @@ static inline void __tlb_flush_idte(unsigned long asce)
static inline void __tlb_flush_mm(struct mm_struct * mm)
{
if (unlikely(cpumask_empty(mm_cpumask(mm))))
return;
/*
* If the machine has IDTE we prefer to do a per mm flush
* on all cpus instead of doing a local flush if the mm

View File

@ -636,7 +636,8 @@ ENTRY(mcck_int_handler)
UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER
mcck_skip:
SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT
mvc __PT_R0(64,%r11),__LC_GPREGS_SAVE_AREA
stm %r0,%r7,__PT_R0(%r11)
mvc __PT_R8(32,%r11),__LC_GPREGS_SAVE_AREA+32
stm %r8,%r9,__PT_PSW(%r11)
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
l %r1,BASED(.Ldo_machine_check)

View File

@ -678,8 +678,9 @@ ENTRY(mcck_int_handler)
UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER
LAST_BREAK %r14
mcck_skip:
lghi %r14,__LC_GPREGS_SAVE_AREA
mvc __PT_R0(128,%r11),0(%r14)
lghi %r14,__LC_GPREGS_SAVE_AREA+64
stmg %r0,%r7,__PT_R0(%r11)
mvc __PT_R8(64,%r11),0(%r14)
stmg %r8,%r9,__PT_PSW(%r11)
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
lgr %r2,%r11 # pass pointer to pt_regs

View File

@ -571,6 +571,8 @@ static void __init setup_memory_end(void)
/* Split remaining virtual space between 1:1 mapping & vmemmap array */
tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
/* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
tmp = SECTION_ALIGN_UP(tmp);
tmp = VMALLOC_START - tmp * sizeof(struct page);
tmp &= ~((vmax >> 11) - 1); /* align to page table level */
tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS);

View File

@ -84,12 +84,6 @@ config ARCH_DEFCONFIG
default "arch/sparc/configs/sparc32_defconfig" if SPARC32
default "arch/sparc/configs/sparc64_defconfig" if SPARC64
# CONFIG_BITS can be used at source level to get 32/64 bits
config BITS
int
default 32 if SPARC32
default 64 if SPARC64
config IOMMU_HELPER
bool
default y if SPARC64
@ -197,7 +191,7 @@ config RWSEM_XCHGADD_ALGORITHM
config GENERIC_HWEIGHT
bool
default y if !ULTRA_HAS_POPULATION_COUNT
default y
config GENERIC_CALIBRATE_DELAY
bool

View File

@ -45,6 +45,7 @@
#define SUN4V_CHIP_NIAGARA3 0x03
#define SUN4V_CHIP_NIAGARA4 0x04
#define SUN4V_CHIP_NIAGARA5 0x05
#define SUN4V_CHIP_SPARC64X 0x8a
#define SUN4V_CHIP_UNKNOWN 0xff
#ifndef __ASSEMBLY__

View File

@ -493,6 +493,12 @@ static void __init sun4v_cpu_probe(void)
sparc_pmu_type = "niagara5";
break;
case SUN4V_CHIP_SPARC64X:
sparc_cpu_type = "SPARC64-X";
sparc_fpu_type = "SPARC64-X integrated FPU";
sparc_pmu_type = "sparc64-x";
break;
default:
printk(KERN_WARNING "CPU: Unknown sun4v cpu type [%s]\n",
prom_cpu_compatible);

View File

@ -134,6 +134,8 @@ prom_niagara_prefix:
.asciz "SUNW,UltraSPARC-T"
prom_sparc_prefix:
.asciz "SPARC-"
prom_sparc64x_prefix:
.asciz "SPARC64-X"
.align 4
prom_root_compatible:
.skip 64
@ -412,7 +414,7 @@ sun4v_chip_type:
cmp %g2, 'T'
be,pt %xcc, 70f
cmp %g2, 'M'
bne,pn %xcc, 4f
bne,pn %xcc, 49f
nop
70: ldub [%g1 + 7], %g2
@ -425,7 +427,7 @@ sun4v_chip_type:
cmp %g2, '5'
be,pt %xcc, 5f
mov SUN4V_CHIP_NIAGARA5, %g4
ba,pt %xcc, 4f
ba,pt %xcc, 49f
nop
91: sethi %hi(prom_cpu_compatible), %g1
@ -439,6 +441,25 @@ sun4v_chip_type:
mov SUN4V_CHIP_NIAGARA2, %g4
4:
/* Athena */
sethi %hi(prom_cpu_compatible), %g1
or %g1, %lo(prom_cpu_compatible), %g1
sethi %hi(prom_sparc64x_prefix), %g7
or %g7, %lo(prom_sparc64x_prefix), %g7
mov 9, %g3
41: ldub [%g7], %g2
ldub [%g1], %g4
cmp %g2, %g4
bne,pn %icc, 49f
add %g7, 1, %g7
subcc %g3, 1, %g3
bne,pt %xcc, 41b
add %g1, 1, %g1
mov SUN4V_CHIP_SPARC64X, %g4
ba,pt %xcc, 5f
nop
49:
mov SUN4V_CHIP_UNKNOWN, %g4
5: sethi %hi(sun4v_chip_type), %g2
or %g2, %lo(sun4v_chip_type), %g2

View File

@ -186,6 +186,8 @@ struct grpci2_cap_first {
#define CAP9_IOMAP_OFS 0x20
#define CAP9_BARSIZE_OFS 0x24
#define TGT 256
struct grpci2_priv {
struct leon_pci_info info; /* must be on top of this structure */
struct grpci2_regs *regs;
@ -237,8 +239,12 @@ static int grpci2_cfg_r32(struct grpci2_priv *priv, unsigned int bus,
if (where & 0x3)
return -EINVAL;
if (bus == 0 && PCI_SLOT(devfn) != 0)
devfn += (0x8 * 6);
if (bus == 0) {
devfn += (0x8 * 6); /* start at AD16=Device0 */
} else if (bus == TGT) {
bus = 0;
devfn = 0; /* special case: bridge controller itself */
}
/* Select bus */
spin_lock_irqsave(&grpci2_dev_lock, flags);
@ -303,8 +309,12 @@ static int grpci2_cfg_w32(struct grpci2_priv *priv, unsigned int bus,
if (where & 0x3)
return -EINVAL;
if (bus == 0 && PCI_SLOT(devfn) != 0)
devfn += (0x8 * 6);
if (bus == 0) {
devfn += (0x8 * 6); /* start at AD16=Device0 */
} else if (bus == TGT) {
bus = 0;
devfn = 0; /* special case: bridge controller itself */
}
/* Select bus */
spin_lock_irqsave(&grpci2_dev_lock, flags);
@ -368,7 +378,7 @@ static int grpci2_read_config(struct pci_bus *bus, unsigned int devfn,
unsigned int busno = bus->number;
int ret;
if (PCI_SLOT(devfn) > 15 || (PCI_SLOT(devfn) == 0 && busno == 0)) {
if (PCI_SLOT(devfn) > 15 || busno > 255) {
*val = ~0;
return 0;
}
@ -406,7 +416,7 @@ static int grpci2_write_config(struct pci_bus *bus, unsigned int devfn,
struct grpci2_priv *priv = grpci2priv;
unsigned int busno = bus->number;
if (PCI_SLOT(devfn) > 15 || (PCI_SLOT(devfn) == 0 && busno == 0))
if (PCI_SLOT(devfn) > 15 || busno > 255)
return 0;
#ifdef GRPCI2_DEBUG_CFGACCESS
@ -578,15 +588,15 @@ void grpci2_hw_init(struct grpci2_priv *priv)
REGSTORE(regs->ahbmst_map[i], priv->pci_area);
/* Get the GRPCI2 Host PCI ID */
grpci2_cfg_r32(priv, 0, 0, PCI_VENDOR_ID, &priv->pciid);
grpci2_cfg_r32(priv, TGT, 0, PCI_VENDOR_ID, &priv->pciid);
/* Get address to first (always defined) capability structure */
grpci2_cfg_r8(priv, 0, 0, PCI_CAPABILITY_LIST, &capptr);
grpci2_cfg_r8(priv, TGT, 0, PCI_CAPABILITY_LIST, &capptr);
/* Enable/Disable Byte twisting */
grpci2_cfg_r32(priv, 0, 0, capptr+CAP9_IOMAP_OFS, &io_map);
grpci2_cfg_r32(priv, TGT, 0, capptr+CAP9_IOMAP_OFS, &io_map);
io_map = (io_map & ~0x1) | (priv->bt_enabled ? 1 : 0);
grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_IOMAP_OFS, io_map);
grpci2_cfg_w32(priv, TGT, 0, capptr+CAP9_IOMAP_OFS, io_map);
/* Setup the Host's PCI Target BARs for other peripherals to access,
* and do DMA to the host's memory. The target BARs can be sized and
@ -617,17 +627,18 @@ void grpci2_hw_init(struct grpci2_priv *priv)
pciadr = 0;
}
}
grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_BARSIZE_OFS+i*4, bar_sz);
grpci2_cfg_w32(priv, 0, 0, PCI_BASE_ADDRESS_0+i*4, pciadr);
grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_BAR_OFS+i*4, ahbadr);
grpci2_cfg_w32(priv, TGT, 0, capptr+CAP9_BARSIZE_OFS+i*4,
bar_sz);
grpci2_cfg_w32(priv, TGT, 0, PCI_BASE_ADDRESS_0+i*4, pciadr);
grpci2_cfg_w32(priv, TGT, 0, capptr+CAP9_BAR_OFS+i*4, ahbadr);
printk(KERN_INFO " TGT BAR[%d]: 0x%08x (PCI)-> 0x%08x\n",
i, pciadr, ahbadr);
}
/* set as bus master and enable pci memory responses */
grpci2_cfg_r32(priv, 0, 0, PCI_COMMAND, &data);
grpci2_cfg_r32(priv, TGT, 0, PCI_COMMAND, &data);
data |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
grpci2_cfg_w32(priv, 0, 0, PCI_COMMAND, data);
grpci2_cfg_w32(priv, TGT, 0, PCI_COMMAND, data);
/* Enable Error respone (CPU-TRAP) on illegal memory access. */
REGSTORE(regs->ctrl, CTRL_ER | CTRL_PE);

View File

@ -330,7 +330,6 @@ CONFIG_MD_RAID0=m
CONFIG_MD_RAID1=m
CONFIG_MD_RAID10=m
CONFIG_MD_RAID456=m
CONFIG_MULTICORE_RAID456=y
CONFIG_MD_FAULTY=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_DEBUG=y

View File

@ -324,7 +324,6 @@ CONFIG_MD_RAID0=m
CONFIG_MD_RAID1=m
CONFIG_MD_RAID10=m
CONFIG_MD_RAID456=m
CONFIG_MULTICORE_RAID456=y
CONFIG_MD_FAULTY=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_DEBUG=y

View File

@ -77,6 +77,7 @@ struct arch_specific_insn {
* a post_handler or break_handler).
*/
int boostable;
bool if_modifier;
};
struct arch_optimized_insn {

View File

@ -414,8 +414,8 @@ struct kvm_vcpu_arch {
gpa_t time;
struct pvclock_vcpu_time_info hv_clock;
unsigned int hw_tsc_khz;
unsigned int time_offset;
struct page *time_page;
struct gfn_to_hva_cache pv_time;
bool pv_time_enabled;
/* set guest stopped flag in pvclock flags field */
bool pvclock_set_guest_stopped_request;

View File

@ -382,14 +382,14 @@ HYPERVISOR_console_io(int cmd, int count, char *str)
return _hypercall3(int, console_io, cmd, count, str);
}
extern int __must_check HYPERVISOR_physdev_op_compat(int, void *);
extern int __must_check xen_physdev_op_compat(int, void *);
static inline int
HYPERVISOR_physdev_op(int cmd, void *arg)
{
int rc = _hypercall2(int, physdev_op, cmd, arg);
if (unlikely(rc == -ENOSYS))
rc = HYPERVISOR_physdev_op_compat(cmd, arg);
rc = xen_physdev_op_compat(cmd, arg);
return rc;
}

View File

@ -44,6 +44,7 @@
#define SNB_C1_AUTO_UNDEMOTE (1UL << 27)
#define SNB_C3_AUTO_UNDEMOTE (1UL << 28)
#define MSR_PLATFORM_INFO 0x000000ce
#define MSR_MTRRcap 0x000000fe
#define MSR_IA32_BBL_CR_CTL 0x00000119
#define MSR_IA32_BBL_CR_CTL3 0x0000011e

View File

@ -101,6 +101,10 @@ static struct event_constraint intel_snb_event_constraints[] __read_mostly =
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */

View File

@ -375,6 +375,9 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
else
p->ainsn.boostable = -1;
/* Check whether the instruction modifies Interrupt Flag or not */
p->ainsn.if_modifier = is_IF_modifier(p->ainsn.insn);
/* Also, displacement change doesn't affect the first byte */
p->opcode = p->ainsn.insn[0];
}
@ -434,7 +437,7 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
__this_cpu_write(current_kprobe, p);
kcb->kprobe_saved_flags = kcb->kprobe_old_flags
= (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
if (is_IF_modifier(p->ainsn.insn))
if (p->ainsn.if_modifier)
kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF;
}

View File

@ -90,13 +90,13 @@ microcode_phys(struct microcode_intel **mc_saved_tmp,
struct microcode_intel ***mc_saved;
mc_saved = (struct microcode_intel ***)
__pa_symbol(&mc_saved_data->mc_saved);
__pa_nodebug(&mc_saved_data->mc_saved);
for (i = 0; i < mc_saved_data->mc_saved_count; i++) {
struct microcode_intel *p;
p = *(struct microcode_intel **)
__pa(mc_saved_data->mc_saved + i);
mc_saved_tmp[i] = (struct microcode_intel *)__pa(p);
__pa_nodebug(mc_saved_data->mc_saved + i);
mc_saved_tmp[i] = (struct microcode_intel *)__pa_nodebug(p);
}
}
#endif
@ -562,7 +562,7 @@ scan_microcode(unsigned long start, unsigned long end,
struct cpio_data cd;
long offset = 0;
#ifdef CONFIG_X86_32
char *p = (char *)__pa_symbol(ucode_name);
char *p = (char *)__pa_nodebug(ucode_name);
#else
char *p = ucode_name;
#endif
@ -630,8 +630,8 @@ static void __cpuinit print_ucode(struct ucode_cpu_info *uci)
if (mc_intel == NULL)
return;
delay_ucode_info_p = (int *)__pa_symbol(&delay_ucode_info);
current_mc_date_p = (int *)__pa_symbol(&current_mc_date);
delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info);
current_mc_date_p = (int *)__pa_nodebug(&current_mc_date);
*delay_ucode_info_p = 1;
*current_mc_date_p = mc_intel->hdr.date;
@ -659,8 +659,8 @@ static inline void __cpuinit print_ucode(struct ucode_cpu_info *uci)
}
#endif
static int apply_microcode_early(struct mc_saved_data *mc_saved_data,
struct ucode_cpu_info *uci)
static int __cpuinit apply_microcode_early(struct mc_saved_data *mc_saved_data,
struct ucode_cpu_info *uci)
{
struct microcode_intel *mc_intel;
unsigned int val[2];
@ -741,15 +741,15 @@ load_ucode_intel_bsp(void)
#ifdef CONFIG_X86_32
struct boot_params *boot_params_p;
boot_params_p = (struct boot_params *)__pa_symbol(&boot_params);
boot_params_p = (struct boot_params *)__pa_nodebug(&boot_params);
ramdisk_image = boot_params_p->hdr.ramdisk_image;
ramdisk_size = boot_params_p->hdr.ramdisk_size;
initrd_start_early = ramdisk_image;
initrd_end_early = initrd_start_early + ramdisk_size;
_load_ucode_intel_bsp(
(struct mc_saved_data *)__pa_symbol(&mc_saved_data),
(unsigned long *)__pa_symbol(&mc_saved_in_initrd),
(struct mc_saved_data *)__pa_nodebug(&mc_saved_data),
(unsigned long *)__pa_nodebug(&mc_saved_in_initrd),
initrd_start_early, initrd_end_early, &uci);
#else
ramdisk_image = boot_params.hdr.ramdisk_image;
@ -772,10 +772,10 @@ void __cpuinit load_ucode_intel_ap(void)
unsigned long *initrd_start_p;
mc_saved_in_initrd_p =
(unsigned long *)__pa_symbol(mc_saved_in_initrd);
mc_saved_data_p = (struct mc_saved_data *)__pa_symbol(&mc_saved_data);
initrd_start_p = (unsigned long *)__pa_symbol(&initrd_start);
initrd_start_addr = (unsigned long)__pa_symbol(*initrd_start_p);
(unsigned long *)__pa_nodebug(mc_saved_in_initrd);
mc_saved_data_p = (struct mc_saved_data *)__pa_nodebug(&mc_saved_data);
initrd_start_p = (unsigned long *)__pa_nodebug(&initrd_start);
initrd_start_addr = (unsigned long)__pa_nodebug(*initrd_start_p);
#else
mc_saved_data_p = &mc_saved_data;
mc_saved_in_initrd_p = mc_saved_in_initrd;

View File

@ -1406,25 +1406,15 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
unsigned long flags, this_tsc_khz;
struct kvm_vcpu_arch *vcpu = &v->arch;
struct kvm_arch *ka = &v->kvm->arch;
void *shared_kaddr;
s64 kernel_ns, max_kernel_ns;
u64 tsc_timestamp, host_tsc;
struct pvclock_vcpu_time_info *guest_hv_clock;
struct pvclock_vcpu_time_info guest_hv_clock;
u8 pvclock_flags;
bool use_master_clock;
kernel_ns = 0;
host_tsc = 0;
/* Keep irq disabled to prevent changes to the clock */
local_irq_save(flags);
this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
if (unlikely(this_tsc_khz == 0)) {
local_irq_restore(flags);
kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
return 1;
}
/*
* If the host uses TSC clock, then passthrough TSC as stable
* to the guest.
@ -1436,6 +1426,15 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
kernel_ns = ka->master_kernel_ns;
}
spin_unlock(&ka->pvclock_gtod_sync_lock);
/* Keep irq disabled to prevent changes to the clock */
local_irq_save(flags);
this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
if (unlikely(this_tsc_khz == 0)) {
local_irq_restore(flags);
kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
return 1;
}
if (!use_master_clock) {
host_tsc = native_read_tsc();
kernel_ns = get_kernel_ns();
@ -1463,7 +1462,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
local_irq_restore(flags);
if (!vcpu->time_page)
if (!vcpu->pv_time_enabled)
return 0;
/*
@ -1525,12 +1524,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
*/
vcpu->hv_clock.version += 2;
shared_kaddr = kmap_atomic(vcpu->time_page);
guest_hv_clock = shared_kaddr + vcpu->time_offset;
if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
&guest_hv_clock, sizeof(guest_hv_clock))))
return 0;
/* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
pvclock_flags = (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED);
pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
if (vcpu->pvclock_set_guest_stopped_request) {
pvclock_flags |= PVCLOCK_GUEST_STOPPED;
@ -1543,12 +1542,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
vcpu->hv_clock.flags = pvclock_flags;
memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
sizeof(vcpu->hv_clock));
kunmap_atomic(shared_kaddr);
mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
&vcpu->hv_clock,
sizeof(vcpu->hv_clock));
return 0;
}
@ -1837,10 +1833,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
static void kvmclock_reset(struct kvm_vcpu *vcpu)
{
if (vcpu->arch.time_page) {
kvm_release_page_dirty(vcpu->arch.time_page);
vcpu->arch.time_page = NULL;
}
vcpu->arch.pv_time_enabled = false;
}
static void accumulate_steal_time(struct kvm_vcpu *vcpu)
@ -1947,6 +1940,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_KVM_SYSTEM_TIME_NEW:
case MSR_KVM_SYSTEM_TIME: {
u64 gpa_offset;
kvmclock_reset(vcpu);
vcpu->arch.time = data;
@ -1956,14 +1950,17 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!(data & 1))
break;
/* ...but clean it before doing the actual write */
vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
gpa_offset = data & ~(PAGE_MASK | 1);
vcpu->arch.time_page =
gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
/* Check that the address is 32-byte aligned. */
if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1))
break;
if (is_error_page(vcpu->arch.time_page))
vcpu->arch.time_page = NULL;
if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
&vcpu->arch.pv_time, data & ~1ULL))
vcpu->arch.pv_time_enabled = false;
else
vcpu->arch.pv_time_enabled = true;
break;
}
@ -2967,7 +2964,7 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
*/
static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
{
if (!vcpu->arch.time_page)
if (!vcpu->arch.pv_time_enabled)
return -EINVAL;
vcpu->arch.pvclock_set_guest_stopped_request = true;
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
@ -6718,6 +6715,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
goto fail_free_wbinvd_dirty_mask;
vcpu->arch.ia32_tsc_adjust_msr = 0x0;
vcpu->arch.pv_time_enabled = false;
kvm_async_pf_hash_reset(vcpu);
kvm_pmu_init(vcpu);

View File

@ -74,10 +74,10 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
char c;
unsigned zero_len;
for (; len; --len) {
for (; len; --len, to++) {
if (__get_user_nocheck(c, from++, sizeof(char)))
break;
if (__put_user_nocheck(c, to++, sizeof(char)))
if (__put_user_nocheck(c, to, sizeof(char)))
break;
}

View File

@ -1467,8 +1467,6 @@ static void __init xen_write_cr3_init(unsigned long cr3)
__xen_write_cr3(true, cr3);
xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
pv_mmu_ops.write_cr3 = &xen_write_cr3;
}
#endif
@ -2122,6 +2120,7 @@ static void __init xen_post_allocator_init(void)
#endif
#ifdef CONFIG_X86_64
pv_mmu_ops.write_cr3 = &xen_write_cr3;
SetPagePinned(virt_to_page(level3_user_vsyscall));
#endif
xen_mark_init_mm_pinned();

View File

@ -444,7 +444,7 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
* copied from blk_rq_pos(rq).
*/
if (error_sector)
*error_sector = bio->bi_sector;
*error_sector = bio->bi_sector;
if (!bio_flagged(bio, BIO_UPTODATE))
ret = -EIO;

View File

@ -257,6 +257,7 @@ void delete_partition(struct gendisk *disk, int partno)
hd_struct_put(part);
}
EXPORT_SYMBOL(delete_partition);
static ssize_t whole_disk_show(struct device *dev,
struct device_attribute *attr, char *buf)

View File

@ -405,7 +405,7 @@ int apei_estatus_check(const struct acpi_hest_generic_status *estatus)
return rc;
data_len = estatus->data_length;
gdata = (struct acpi_hest_generic_data *)(estatus + 1);
while (data_len > sizeof(*gdata)) {
while (data_len >= sizeof(*gdata)) {
gedata_len = gdata->error_data_length;
if (gedata_len > data_len - sizeof(*gdata))
return -EINVAL;

View File

@ -646,6 +646,7 @@ static void handle_root_bridge_insertion(acpi_handle handle)
static void handle_root_bridge_removal(struct acpi_device *device)
{
acpi_status status;
struct acpi_eject_event *ej_event;
ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL);
@ -661,7 +662,9 @@ static void handle_root_bridge_removal(struct acpi_device *device)
ej_event->device = device;
ej_event->event = ACPI_NOTIFY_EJECT_REQUEST;
acpi_bus_hot_remove_device(ej_event);
status = acpi_os_hotplug_execute(acpi_bus_hot_remove_device, ej_event);
if (ACPI_FAILURE(status))
kfree(ej_event);
}
static void _handle_hotplug_event_root(struct work_struct *work)
@ -676,8 +679,9 @@ static void _handle_hotplug_event_root(struct work_struct *work)
handle = hp_work->handle;
type = hp_work->type;
root = acpi_pci_find_root(handle);
acpi_scan_lock_acquire();
root = acpi_pci_find_root(handle);
acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
switch (type) {
@ -711,6 +715,7 @@ static void _handle_hotplug_event_root(struct work_struct *work)
break;
}
acpi_scan_lock_release();
kfree(hp_work); /* allocated in handle_hotplug_event_bridge */
kfree(buffer.pointer);
}

View File

@ -193,6 +193,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
},
{
.callback = init_nvs_nosave,
.ident = "Sony Vaio VGN-FW21M",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21M"),
},
},
{
.callback = init_nvs_nosave,
.ident = "Sony Vaio VPCEB17FX",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),

View File

@ -158,7 +158,7 @@ int tegra_ahb_enable_smmu(struct device_node *dn)
EXPORT_SYMBOL(tegra_ahb_enable_smmu);
#endif
#ifdef CONFIG_PM_SLEEP
#ifdef CONFIG_PM
static int tegra_ahb_suspend(struct device *dev)
{
int i;

View File

@ -59,15 +59,16 @@ config ATA_ACPI
option libata.noacpi=1
config SATA_ZPODD
bool "SATA Zero Power ODD Support"
bool "SATA Zero Power Optical Disc Drive (ZPODD) support"
depends on ATA_ACPI
default n
help
This option adds support for SATA ZPODD. It requires both
ODD and the platform support, and if enabled, will automatically
power on/off the ODD when certain condition is satisfied. This
does not impact user's experience of the ODD, only power is saved
when ODD is not in use(i.e. no disc inside).
This option adds support for SATA Zero Power Optical Disc
Drive (ZPODD). It requires both the ODD and the platform
support, and if enabled, will automatically power on/off the
ODD when certain condition is satisfied. This does not impact
end user's experience of the ODD, only power is saved when
the ODD is not in use (i.e. no disc inside).
If unsure, say N.

View File

@ -281,6 +281,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x1f37), board_ahci }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f3e), board_ahci }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f3f), board_ahci }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */
{ PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */
{ PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */
{ PCI_VDEVICE(INTEL, 0x8d04), board_ahci }, /* Wellsburg RAID */
{ PCI_VDEVICE(INTEL, 0x8d06), board_ahci }, /* Wellsburg RAID */

View File

@ -1547,6 +1547,10 @@ static bool piix_broken_system_poweroff(struct pci_dev *pdev)
static int prefer_ms_hyperv = 1;
module_param(prefer_ms_hyperv, int, 0);
MODULE_PARM_DESC(prefer_ms_hyperv,
"Prefer Hyper-V paravirtualization drivers instead of ATA, "
"0 - Use ATA drivers, "
"1 (Default) - Use the paravirtualization drivers.");
static void piix_ignore_devices_quirk(struct ata_host *host)
{

View File

@ -1027,7 +1027,7 @@ static void ata_acpi_register_power_resource(struct ata_device *dev)
handle = ata_dev_acpi_handle(dev);
if (handle)
acpi_dev_pm_remove_dependent(handle, &sdev->sdev_gendev);
acpi_dev_pm_add_dependent(handle, &sdev->sdev_gendev);
}
static void ata_acpi_unregister_power_resource(struct ata_device *dev)

View File

@ -661,18 +661,7 @@ static struct platform_driver pata_s3c_driver = {
},
};
static int __init pata_s3c_init(void)
{
return platform_driver_probe(&pata_s3c_driver, pata_s3c_probe);
}
static void __exit pata_s3c_exit(void)
{
platform_driver_unregister(&pata_s3c_driver);
}
module_init(pata_s3c_init);
module_exit(pata_s3c_exit);
module_platform_driver_probe(pata_s3c_driver, pata_s3c_probe);
MODULE_AUTHOR("Abhilash Kesavan, <a.kesavan@samsung.com>");
MODULE_DESCRIPTION("low-level driver for Samsung PATA controller");

View File

@ -1511,8 +1511,7 @@ error_exit_with_cleanup:
if (hcr_base)
iounmap(hcr_base);
if (host_priv)
kfree(host_priv);
kfree(host_priv);
return retval;
}

View File

@ -532,11 +532,11 @@ config BLK_DEV_RBD
If unsure, say N.
config BLK_DEV_RSXX
tristate "RamSam PCIe Flash SSD Device Driver"
tristate "IBM FlashSystem 70/80 PCIe SSD Device Driver"
depends on PCI
help
Device driver for IBM's high speed PCIe SSD
storage devices: RamSan-70 and RamSan-80.
storage devices: FlashSystem-70 and FlashSystem-80.
To compile this driver as a module, choose M here: the
module will be called rsxx.

Some files were not shown because too many files have changed in this diff Show More