2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-11-18 23:54:26 +08:00

Merge branch 'x86/urgent' of into irq/sparseirq

Reason: Pull in the latest io_apic bugfixes

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
Thomas Gleixner 2010-10-12 16:41:22 +02:00
commit 7c5f13519a
249 changed files with 2046 additions and 1093 deletions

View File

@ -1220,7 +1220,7 @@ F: drivers/auxdisplay/
F: include/linux/cfag12864b.h
AVR32 ARCHITECTURE
M: Haavard Skinnemoen <hskinnemoen@atmel.com>
M: Hans-Christian Egtvedt <hans-christian.egtvedt@atmel.com>
W: http://www.atmel.com/products/AVR32/
W: http://avr32linux.org/
W: http://avrfreaks.net/
@ -1228,7 +1228,7 @@ S: Supported
F: arch/avr32/
AVR32/AT32AP MACHINE SUPPORT
M: Haavard Skinnemoen <hskinnemoen@atmel.com>
M: Hans-Christian Egtvedt <hans-christian.egtvedt@atmel.com>
S: Supported
F: arch/avr32/mach-at32ap/
@ -2199,6 +2199,12 @@ W: http://acpi4asus.sf.net
S: Maintained
F: drivers/platform/x86/eeepc-laptop.c
EFIFB FRAMEBUFFER DRIVER
L: linux-fbdev@vger.kernel.org
M: Peter Jones <pjones@redhat.com>
S: Maintained
F: drivers/video/efifb.c
EFS FILESYSTEM
W: http://aeschi.ch.eu.org/efs/
S: Orphan
@ -2662,6 +2668,8 @@ M: Guenter Roeck <guenter.roeck@ericsson.com>
L: lm-sensors@lm-sensors.org
W: http://www.lm-sensors.org/
T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/
T: quilt kernel.org/pub/linux/kernel/people/groeck/linux-staging/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
S: Maintained
F: Documentation/hwmon/
F: drivers/hwmon/
@ -3905,10 +3913,8 @@ F: Documentation/serial/moxa-smartio
F: drivers/char/mxser.*
MSI LAPTOP SUPPORT
M: Lennart Poettering <mzxreary@0pointer.de>
M: Lee, Chun-Yi <jlee@novell.com>
L: platform-driver-x86@vger.kernel.org
W: https://tango.0pointer.de/mailman/listinfo/s270-linux
W: http://0pointer.de/lennart/tchibo.html
S: Maintained
F: drivers/platform/x86/msi-laptop.c
@ -3925,8 +3931,10 @@ S: Supported
F: drivers/mfd/
MULTIMEDIA CARD (MMC), SECURE DIGITAL (SD) AND SDIO SUBSYSTEM
S: Orphan
M: Chris Ball <cjb@laptop.org>
L: linux-mmc@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc.git
S: Maintained
F: drivers/mmc/
F: include/linux/mmc/
@ -5097,8 +5105,10 @@ S: Maintained
F: drivers/mmc/host/sdricoh_cs.c
SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) DRIVER
S: Orphan
M: Chris Ball <cjb@laptop.org>
L: linux-mmc@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc.git
S: Maintained
F: drivers/mmc/host/sdhci.*
SECURE DIGITAL HOST CONTROLLER INTERFACE, OPEN FIRMWARE BINDINGS (SDHCI-OF)

View File

@ -32,8 +32,9 @@ config HAVE_OPROFILE
config KPROBES
bool "Kprobes"
depends on KALLSYMS && MODULES
depends on MODULES
depends on HAVE_KPROBES
select KALLSYMS
help
Kprobes allows you to trap at almost any kernel address and
execute a callback function. register_kprobe() establishes
@ -45,7 +46,6 @@ config OPTPROBES
def_bool y
depends on KPROBES && HAVE_OPTPROBES
depends on !PREEMPT
select KALLSYMS_ALL
config HAVE_EFFICIENT_UNALIGNED_ACCESS
bool

View File

@ -73,8 +73,6 @@
ldq $20, HAE_REG($19); \
stq $21, HAE_CACHE($19); \
stq $21, 0($20); \
ldq $0, 0($sp); \
ldq $1, 8($sp); \
99:; \
ldq $19, 72($sp); \
ldq $20, 80($sp); \
@ -316,7 +314,7 @@ ret_from_sys_call:
cmovne $26, 0, $19 /* $19 = 0 => non-restartable */
ldq $0, SP_OFF($sp)
and $0, 8, $0
beq $0, restore_all
beq $0, ret_to_kernel
ret_to_user:
/* Make sure need_resched and sigpending don't change between
sampling and the rti. */
@ -329,6 +327,11 @@ restore_all:
RESTORE_ALL
call_pal PAL_rti
ret_to_kernel:
lda $16, 7
call_pal PAL_swpipl
br restore_all
.align 3
$syscall_error:
/*
@ -657,7 +660,7 @@ kernel_thread:
/* We don't actually care for a3 success widgetry in the kernel.
Not for positive errno values. */
stq $0, 0($sp) /* $0 */
br restore_all
br ret_to_kernel
.end kernel_thread
/*
@ -911,15 +914,6 @@ sys_execve:
jmp $31, do_sys_execve
.end sys_execve
.align 4
.globl osf_sigprocmask
.ent osf_sigprocmask
osf_sigprocmask:
.prologue 0
mov $sp, $18
jmp $31, sys_osf_sigprocmask
.end osf_sigprocmask
.align 4
.globl alpha_ni_syscall
.ent alpha_ni_syscall

View File

@ -356,7 +356,7 @@ dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt, struct thread_info *ti)
dest[27] = pt->r27;
dest[28] = pt->r28;
dest[29] = pt->gp;
dest[30] = rdusp();
dest[30] = ti == current_thread_info() ? rdusp() : ti->pcb.usp;
dest[31] = pt->pc;
/* Once upon a time this was the PS value. Which is stupid

View File

@ -41,46 +41,20 @@ static void do_signal(struct pt_regs *, struct switch_stack *,
/*
* The OSF/1 sigprocmask calling sequence is different from the
* C sigprocmask() sequence..
*
* how:
* 1 - SIG_BLOCK
* 2 - SIG_UNBLOCK
* 3 - SIG_SETMASK
*
* We change the range to -1 .. 1 in order to let gcc easily
* use the conditional move instructions.
*
* Note that we don't need to acquire the kernel lock for SMP
* operation, as all of this is local to this thread.
*/
SYSCALL_DEFINE3(osf_sigprocmask, int, how, unsigned long, newmask,
struct pt_regs *, regs)
SYSCALL_DEFINE2(osf_sigprocmask, int, how, unsigned long, newmask)
{
unsigned long oldmask = -EINVAL;
sigset_t oldmask;
sigset_t mask;
unsigned long res;
if ((unsigned long)how-1 <= 2) {
long sign = how-2; /* -1 .. 1 */
unsigned long block, unblock;
newmask &= _BLOCKABLE;
spin_lock_irq(&current->sighand->siglock);
oldmask = current->blocked.sig[0];
unblock = oldmask & ~newmask;
block = oldmask | newmask;
if (!sign)
block = unblock;
if (sign <= 0)
newmask = block;
if (_NSIG_WORDS > 1 && sign > 0)
sigemptyset(&current->blocked);
current->blocked.sig[0] = newmask;
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
regs->r0 = 0; /* special no error return */
siginitset(&mask, newmask & ~_BLOCKABLE);
res = sigprocmask(how, &mask, &oldmask);
if (!res) {
force_successful_syscall_return();
res = oldmask.sig[0];
}
return oldmask;
return res;
}
SYSCALL_DEFINE3(osf_sigaction, int, sig,
@ -94,9 +68,9 @@ SYSCALL_DEFINE3(osf_sigaction, int, sig,
old_sigset_t mask;
if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
__get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
__get_user(new_ka.sa.sa_flags, &act->sa_flags))
__get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
__get_user(mask, &act->sa_mask))
return -EFAULT;
__get_user(mask, &act->sa_mask);
siginitset(&new_ka.sa.sa_mask, mask);
new_ka.ka_restorer = NULL;
}
@ -106,9 +80,9 @@ SYSCALL_DEFINE3(osf_sigaction, int, sig,
if (!ret && oact) {
if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
__put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
__put_user(old_ka.sa.sa_flags, &oact->sa_flags))
__put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
return -EFAULT;
__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
}
return ret;

View File

@ -58,7 +58,7 @@ sys_call_table:
.quad sys_open /* 45 */
.quad alpha_ni_syscall
.quad sys_getxgid
.quad osf_sigprocmask
.quad sys_osf_sigprocmask
.quad alpha_ni_syscall
.quad alpha_ni_syscall /* 50 */
.quad sys_acct

View File

@ -271,7 +271,6 @@ config ARCH_AT91
bool "Atmel AT91"
select ARCH_REQUIRE_GPIOLIB
select HAVE_CLK
select ARCH_USES_GETTIMEOFFSET
help
This enables support for systems based on the Atmel AT91RM9200,
AT91SAM9 and AT91CAP9 processors.
@ -1051,6 +1050,32 @@ config ARM_ERRATA_460075
ACTLR register. Note that setting specific bits in the ACTLR register
may not be available in non-secure mode.
config ARM_ERRATA_742230
bool "ARM errata: DMB operation may be faulty"
depends on CPU_V7 && SMP
help
This option enables the workaround for the 742230 Cortex-A9
(r1p0..r2p2) erratum. Under rare circumstances, a DMB instruction
between two write operations may not ensure the correct visibility
ordering of the two writes. This workaround sets a specific bit in
the diagnostic register of the Cortex-A9 which causes the DMB
instruction to behave as a DSB, ensuring the correct behaviour of
the two writes.
config ARM_ERRATA_742231
bool "ARM errata: Incorrect hazard handling in the SCU may lead to data corruption"
depends on CPU_V7 && SMP
help
This option enables the workaround for the 742231 Cortex-A9
(r2p0..r2p2) erratum. Under certain conditions, specific to the
Cortex-A9 MPCore micro-architecture, two CPUs working in SMP mode,
accessing some data located in the same cache line, may get corrupted
data due to bad handling of the address hazard when the line gets
replaced from one of the CPUs at the same time as another CPU is
accessing it. This workaround sets specific bits in the diagnostic
register of the Cortex-A9 which reduces the linefill issuing
capabilities of the processor.
config PL310_ERRATA_588369
bool "Clean & Invalidate maintenance operations do not invalidate clean lines"
depends on CACHE_L2X0 && ARCH_OMAP4

View File

@ -116,5 +116,5 @@ CFLAGS_font.o := -Dstatic=
$(obj)/font.c: $(FONTC)
$(call cmd,shipped)
$(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile .config
$(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile $(KCONFIG_CONFIG)
@sed "$(SEDFLAGS)" < $< > $@

View File

@ -271,6 +271,14 @@ int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
((dma_addr + size - PHYS_OFFSET) >= SZ_64M);
}
int dma_set_coherent_mask(struct device *dev, u64 mask)
{
if (mask >= PHYS_OFFSET + SZ_64M - 1)
return 0;
return -EIO;
}
int __init it8152_pci_setup(int nr, struct pci_sys_data *sys)
{
it8152_io.start = IT8152_IO_BASE + 0x12000;

View File

@ -317,6 +317,10 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
#define pgprot_dmacoherent(prot) \
__pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE)
#define __HAVE_PHYS_MEM_ACCESS_PROT
struct file;
extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot);
#else
#define pgprot_dmacoherent(prot) \
__pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED)

View File

@ -48,6 +48,8 @@ work_pending:
beq no_work_pending
mov r0, sp @ 'regs'
mov r2, why @ 'syscall'
tst r1, #_TIF_SIGPENDING @ delivering a signal?
movne why, #0 @ prevent further restarts
bl do_notify_resume
b ret_slow_syscall @ Check work again

View File

@ -426,7 +426,7 @@ static struct i2c_gpio_platform_data pdata_i2c0 = {
.sda_is_open_drain = 1,
.scl_pin = AT91_PIN_PA21,
.scl_is_open_drain = 1,
.udelay = 2, /* ~100 kHz */
.udelay = 5, /* ~100 kHz */
};
static struct platform_device at91sam9g45_twi0_device = {
@ -440,7 +440,7 @@ static struct i2c_gpio_platform_data pdata_i2c1 = {
.sda_is_open_drain = 1,
.scl_pin = AT91_PIN_PB11,
.scl_is_open_drain = 1,
.udelay = 2, /* ~100 kHz */
.udelay = 5, /* ~100 kHz */
};
static struct platform_device at91sam9g45_twi1_device = {

View File

@ -769,8 +769,7 @@ static struct map_desc dm355_io_desc[] = {
.virtual = SRAM_VIRT,
.pfn = __phys_to_pfn(0x00010000),
.length = SZ_32K,
/* MT_MEMORY_NONCACHED requires supersection alignment */
.type = MT_DEVICE,
.type = MT_MEMORY_NONCACHED,
},
};

View File

@ -969,8 +969,7 @@ static struct map_desc dm365_io_desc[] = {
.virtual = SRAM_VIRT,
.pfn = __phys_to_pfn(0x00010000),
.length = SZ_32K,
/* MT_MEMORY_NONCACHED requires supersection alignment */
.type = MT_DEVICE,
.type = MT_MEMORY_NONCACHED,
},
};

View File

@ -653,8 +653,7 @@ static struct map_desc dm644x_io_desc[] = {
.virtual = SRAM_VIRT,
.pfn = __phys_to_pfn(0x00008000),
.length = SZ_16K,
/* MT_MEMORY_NONCACHED requires supersection alignment */
.type = MT_DEVICE,
.type = MT_MEMORY_NONCACHED,
},
};

View File

@ -737,8 +737,7 @@ static struct map_desc dm646x_io_desc[] = {
.virtual = SRAM_VIRT,
.pfn = __phys_to_pfn(0x00010000),
.length = SZ_32K,
/* MT_MEMORY_NONCACHED requires supersection alignment */
.type = MT_DEVICE,
.type = MT_MEMORY_NONCACHED,
},
};

View File

@ -13,8 +13,8 @@
#define IO_SPACE_LIMIT 0xffffffff
#define __io(a) ((void __iomem *)(((a) - DOVE_PCIE0_IO_PHYS_BASE) +\
DOVE_PCIE0_IO_VIRT_BASE))
#define __mem_pci(a) (a)
#define __io(a) ((void __iomem *)(((a) - DOVE_PCIE0_IO_BUS_BASE) + \
DOVE_PCIE0_IO_VIRT_BASE))
#define __mem_pci(a) (a)
#endif

View File

@ -503,6 +503,14 @@ struct pci_bus * __devinit ixp4xx_scan_bus(int nr, struct pci_sys_data *sys)
return pci_scan_bus(sys->busnr, &ixp4xx_ops, sys);
}
int dma_set_coherent_mask(struct device *dev, u64 mask)
{
if (mask >= SZ_64M - 1)
return 0;
return -EIO;
}
EXPORT_SYMBOL(ixp4xx_pci_read);
EXPORT_SYMBOL(ixp4xx_pci_write);

View File

@ -26,6 +26,8 @@
#define PCIBIOS_MAX_MEM 0x4BFFFFFF
#endif
#define ARCH_HAS_DMA_SET_COHERENT_MASK
#define pcibios_assign_all_busses() 1
/* Register locations and bits */

View File

@ -38,7 +38,7 @@
#define KIRKWOOD_PCIE1_IO_PHYS_BASE 0xf3000000
#define KIRKWOOD_PCIE1_IO_VIRT_BASE 0xfef00000
#define KIRKWOOD_PCIE1_IO_BUS_BASE 0x00000000
#define KIRKWOOD_PCIE1_IO_BUS_BASE 0x00100000
#define KIRKWOOD_PCIE1_IO_SIZE SZ_1M
#define KIRKWOOD_PCIE_IO_PHYS_BASE 0xf2000000

View File

@ -117,7 +117,7 @@ static void __init pcie0_ioresources_init(struct pcie_port *pp)
* IORESOURCE_IO
*/
pp->res[0].name = "PCIe 0 I/O Space";
pp->res[0].start = KIRKWOOD_PCIE_IO_PHYS_BASE;
pp->res[0].start = KIRKWOOD_PCIE_IO_BUS_BASE;
pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE_IO_SIZE - 1;
pp->res[0].flags = IORESOURCE_IO;
@ -139,7 +139,7 @@ static void __init pcie1_ioresources_init(struct pcie_port *pp)
* IORESOURCE_IO
*/
pp->res[0].name = "PCIe 1 I/O Space";
pp->res[0].start = KIRKWOOD_PCIE1_IO_PHYS_BASE;
pp->res[0].start = KIRKWOOD_PCIE1_IO_BUS_BASE;
pp->res[0].end = pp->res[0].start + KIRKWOOD_PCIE1_IO_SIZE - 1;
pp->res[0].flags = IORESOURCE_IO;

View File

@ -9,6 +9,8 @@
#ifndef __ASM_MACH_SYSTEM_H
#define __ASM_MACH_SYSTEM_H
#include <mach/cputype.h>
static inline void arch_idle(void)
{
cpu_do_idle();
@ -16,6 +18,9 @@ static inline void arch_idle(void)
static inline void arch_reset(char mode, const char *cmd)
{
cpu_reset(0);
if (cpu_is_pxa168())
cpu_reset(0xffff0000);
else
cpu_reset(0);
}
#endif /* __ASM_MACH_SYSTEM_H */

View File

@ -312,8 +312,7 @@ static int pxa_set_target(struct cpufreq_policy *policy,
freqs.cpu = policy->cpu;
if (freq_debug)
pr_debug(KERN_INFO "Changing CPU frequency to %d Mhz, "
"(SDRAM %d Mhz)\n",
pr_debug("Changing CPU frequency to %d Mhz, (SDRAM %d Mhz)\n",
freqs.new / 1000, (pxa_freq_settings[idx].div2) ?
(new_freq_mem / 2000) : (new_freq_mem / 1000));

View File

@ -264,23 +264,35 @@
* <= 0x2 for pxa21x/pxa25x/pxa26x/pxa27x
* == 0x3 for pxa300/pxa310/pxa320
*/
#if defined(CONFIG_PXA25x) || defined(CONFIG_PXA27x)
#define __cpu_is_pxa2xx(id) \
({ \
unsigned int _id = (id) >> 13 & 0x7; \
_id <= 0x2; \
})
#else
#define __cpu_is_pxa2xx(id) (0)
#endif
#ifdef CONFIG_PXA3xx
#define __cpu_is_pxa3xx(id) \
({ \
unsigned int _id = (id) >> 13 & 0x7; \
_id == 0x3; \
})
#else
#define __cpu_is_pxa3xx(id) (0)
#endif
#if defined(CONFIG_CPU_PXA930) || defined(CONFIG_CPU_PXA935)
#define __cpu_is_pxa93x(id) \
({ \
unsigned int _id = (id) >> 4 & 0xfff; \
_id == 0x683 || _id == 0x693; \
})
#else
#define __cpu_is_pxa93x(id) (0)
#endif
#define cpu_is_pxa2xx() \
({ \
@ -309,7 +321,7 @@ extern unsigned long get_clock_tick_rate(void);
#define PCIBIOS_MIN_IO 0
#define PCIBIOS_MIN_MEM 0
#define pcibios_assign_all_busses() 1
#define ARCH_HAS_DMA_SET_COHERENT_MASK
#endif
#endif /* _ASM_ARCH_HARDWARE_H */

View File

@ -6,6 +6,8 @@
#ifndef __ASM_ARM_ARCH_IO_H
#define __ASM_ARM_ARCH_IO_H
#include <mach/hardware.h>
#define IO_SPACE_LIMIT 0xffffffff
/*

View File

@ -469,9 +469,13 @@ static struct i2c_board_info __initdata palm27x_pi2c_board_info[] = {
},
};
static struct i2c_pxa_platform_data palm27x_i2c_power_info = {
.use_pio = 1,
};
void __init palm27x_pmic_init(void)
{
i2c_register_board_info(1, ARRAY_AND_SIZE(palm27x_pi2c_board_info));
pxa27x_set_i2c_power_info(NULL);
pxa27x_set_i2c_power_info(&palm27x_i2c_power_info);
}
#endif

View File

@ -240,6 +240,7 @@ static void __init vpac270_onenand_init(void) {}
#if defined(CONFIG_MMC_PXA) || defined(CONFIG_MMC_PXA_MODULE)
static struct pxamci_platform_data vpac270_mci_platform_data = {
.ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
.gpio_power = -1,
.gpio_card_detect = GPIO53_VPAC270_SD_DETECT_N,
.gpio_card_ro = GPIO52_VPAC270_SD_READONLY,
.detect_delay_ms = 200,

View File

@ -273,6 +273,9 @@ extern void gpio_pullup(unsigned gpio, int value);
extern int gpio_get_value(unsigned gpio);
extern void gpio_set_value(unsigned gpio, int value);
#define gpio_get_value_cansleep gpio_get_value
#define gpio_set_value_cansleep gpio_set_value
/* wrappers to sleep-enable the previous two functions */
static inline unsigned gpio_to_irq(unsigned gpio)
{

View File

@ -227,7 +227,13 @@ static void ct_ca9x4_init(void)
int i;
#ifdef CONFIG_CACHE_L2X0
l2x0_init(MMIO_P2V(CT_CA9X4_L2CC), 0x00000000, 0xfe0fffff);
void __iomem *l2x0_base = MMIO_P2V(CT_CA9X4_L2CC);
/* set RAM latencies to 1 cycle for this core tile. */
writel(0, l2x0_base + L2X0_TAG_LATENCY_CTRL);
writel(0, l2x0_base + L2X0_DATA_LATENCY_CTRL);
l2x0_init(l2x0_base, 0x00400000, 0xfe0fffff);
#endif
clkdev_add_table(lookups, ARRAY_SIZE(lookups));

View File

@ -885,8 +885,23 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
if (ai_usermode & UM_SIGNAL)
force_sig(SIGBUS, current);
else
set_cr(cr_no_alignment);
else {
/*
* We're about to disable the alignment trap and return to
* user space. But if an interrupt occurs before actually
* reaching user space, then the IRQ vector entry code will
* notice that we were still in kernel space and therefore
* the alignment trap won't be re-enabled in that case as it
* is presumed to be always on from kernel space.
* Let's prevent that race by disabling interrupts here (they
* are disabled on the way back to user space anyway in
* entry-common.S) and disable the alignment trap only if
* there is no work pending for this thread.
*/
raw_local_irq_disable();
if (!(current_thread_info()->flags & _TIF_WORK_MASK))
set_cr(cr_no_alignment);
}
return 0;
}

View File

@ -15,6 +15,7 @@
#include <linux/nodemask.h>
#include <linux/memblock.h>
#include <linux/sort.h>
#include <linux/fs.h>
#include <asm/cputype.h>
#include <asm/sections.h>
@ -246,6 +247,9 @@ static struct mem_type mem_types[] = {
.domain = DOMAIN_USER,
},
[MT_MEMORY] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_USER | L_PTE_EXEC,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
.domain = DOMAIN_KERNEL,
},
@ -254,6 +258,9 @@ static struct mem_type mem_types[] = {
.domain = DOMAIN_KERNEL,
},
[MT_MEMORY_NONCACHED] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_USER | L_PTE_EXEC | L_PTE_MT_BUFFERABLE,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
.domain = DOMAIN_KERNEL,
},
@ -411,9 +418,12 @@ static void __init build_mem_type_table(void)
* Enable CPU-specific coherency if supported.
* (Only available on XSC3 at the moment.)
*/
if (arch_is_coherent() && cpu_is_xsc3())
if (arch_is_coherent() && cpu_is_xsc3()) {
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
}
/*
* ARMv6 and above have extended page tables.
*/
@ -438,7 +448,9 @@ static void __init build_mem_type_table(void)
mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
#endif
}
@ -475,6 +487,8 @@ static void __init build_mem_type_table(void)
mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
mem_types[MT_ROM].prot_sect |= cp->pmd;
switch (cp->pmd) {
@ -498,6 +512,19 @@ static void __init build_mem_type_table(void)
}
}
#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
{
if (!pfn_valid(pfn))
return pgprot_noncached(vma_prot);
else if (file->f_flags & O_SYNC)
return pgprot_writecombine(vma_prot);
return vma_prot;
}
EXPORT_SYMBOL(phys_mem_access_prot);
#endif
#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
static void __init *early_alloc(unsigned long sz)

View File

@ -186,13 +186,14 @@ cpu_v7_name:
* It is assumed that:
* - cache type register is implemented
*/
__v7_setup:
__v7_ca9mp_setup:
#ifdef CONFIG_SMP
mrc p15, 0, r0, c1, c0, 1
tst r0, #(1 << 6) @ SMP/nAMP mode enabled?
orreq r0, r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and
mcreq p15, 0, r0, c1, c0, 1 @ TLB ops broadcasting
#endif
__v7_setup:
adr r12, __v7_setup_stack @ the local stack
stmia r12, {r0-r5, r7, r9, r11, lr}
bl v7_flush_dcache_all
@ -201,11 +202,16 @@ __v7_setup:
mrc p15, 0, r0, c0, c0, 0 @ read main ID register
and r10, r0, #0xff000000 @ ARM?
teq r10, #0x41000000
bne 2f
bne 3f
and r5, r0, #0x00f00000 @ variant
and r6, r0, #0x0000000f @ revision
orr r0, r6, r5, lsr #20-4 @ combine variant and revision
orr r6, r6, r5, lsr #20-4 @ combine variant and revision
ubfx r0, r0, #4, #12 @ primary part number
/* Cortex-A8 Errata */
ldr r10, =0x00000c08 @ Cortex-A8 primary part number
teq r0, r10
bne 2f
#ifdef CONFIG_ARM_ERRATA_430973
teq r5, #0x00100000 @ only present in r1p*
mrceq p15, 0, r10, c1, c0, 1 @ read aux control register
@ -213,21 +219,42 @@ __v7_setup:
mcreq p15, 0, r10, c1, c0, 1 @ write aux control register
#endif
#ifdef CONFIG_ARM_ERRATA_458693
teq r0, #0x20 @ only present in r2p0
teq r6, #0x20 @ only present in r2p0
mrceq p15, 0, r10, c1, c0, 1 @ read aux control register
orreq r10, r10, #(1 << 5) @ set L1NEON to 1
orreq r10, r10, #(1 << 9) @ set PLDNOP to 1
mcreq p15, 0, r10, c1, c0, 1 @ write aux control register
#endif
#ifdef CONFIG_ARM_ERRATA_460075
teq r0, #0x20 @ only present in r2p0
teq r6, #0x20 @ only present in r2p0
mrceq p15, 1, r10, c9, c0, 2 @ read L2 cache aux ctrl register
tsteq r10, #1 << 22
orreq r10, r10, #(1 << 22) @ set the Write Allocate disable bit
mcreq p15, 1, r10, c9, c0, 2 @ write the L2 cache aux ctrl register
#endif
b 3f
2: mov r10, #0
/* Cortex-A9 Errata */
2: ldr r10, =0x00000c09 @ Cortex-A9 primary part number
teq r0, r10
bne 3f
#ifdef CONFIG_ARM_ERRATA_742230
cmp r6, #0x22 @ only present up to r2p2
mrcle p15, 0, r10, c15, c0, 1 @ read diagnostic register
orrle r10, r10, #1 << 4 @ set bit #4
mcrle p15, 0, r10, c15, c0, 1 @ write diagnostic register
#endif
#ifdef CONFIG_ARM_ERRATA_742231
teq r6, #0x20 @ present in r2p0
teqne r6, #0x21 @ present in r2p1
teqne r6, #0x22 @ present in r2p2
mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register
orreq r10, r10, #1 << 12 @ set bit #12
orreq r10, r10, #1 << 22 @ set bit #22
mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
#endif
3: mov r10, #0
#ifdef HARVARD_CACHE
mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate
#endif
@ -323,6 +350,29 @@ cpu_elf_name:
.section ".proc.info.init", #alloc, #execinstr
.type __v7_ca9mp_proc_info, #object
__v7_ca9mp_proc_info:
.long 0x410fc090 @ Required ID value
.long 0xff0ffff0 @ Mask for ID
.long PMD_TYPE_SECT | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ | \
PMD_FLAGS
.long PMD_TYPE_SECT | \
PMD_SECT_XN | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
b __v7_ca9mp_setup
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
.long cpu_v7_name
.long v7_processor_functions
.long v7wbi_tlb_fns
.long v6_user_fns
.long v7_cache_fns
.size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info
/*
* Match any ARMv7 processor core.
*/

View File

@ -1,5 +1,5 @@
/*
* linux/arch/arm/mach-nomadik/timer.c
* linux/arch/arm/plat-nomadik/timer.c
*
* Copyright (C) 2008 STMicroelectronics
* Copyright (C) 2010 Alessandro Rubini
@ -75,7 +75,7 @@ static void nmdk_clkevt_mode(enum clock_event_mode mode,
cr = readl(mtu_base + MTU_CR(1));
writel(0, mtu_base + MTU_LR(1));
writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(1));
writel(0x2, mtu_base + MTU_IMSC);
writel(1 << 1, mtu_base + MTU_IMSC);
break;
case CLOCK_EVT_MODE_SHUTDOWN:
case CLOCK_EVT_MODE_UNUSED:
@ -131,25 +131,23 @@ void __init nmdk_timer_init(void)
{
unsigned long rate;
struct clk *clk0;
struct clk *clk1;
u32 cr;
u32 cr = MTU_CRn_32BITS;
clk0 = clk_get_sys("mtu0", NULL);
BUG_ON(IS_ERR(clk0));
clk1 = clk_get_sys("mtu1", NULL);
BUG_ON(IS_ERR(clk1));
clk_enable(clk0);
clk_enable(clk1);
/*
* Tick rate is 2.4MHz for Nomadik and 110MHz for ux500:
* use a divide-by-16 counter if it's more than 16MHz
* Tick rate is 2.4MHz for Nomadik and 2.4Mhz, 100MHz or 133 MHz
* for ux500.
* Use a divide-by-16 counter if the tick rate is more than 32MHz.
* At 32 MHz, the timer (with 32 bit counter) can be programmed
* to wake-up at a max 127s a head in time. Dividing a 2.4 MHz timer
* with 16 gives too low timer resolution.
*/
cr = MTU_CRn_32BITS;;
rate = clk_get_rate(clk0);
if (rate > 16 << 20) {
if (rate > 32000000) {
rate /= 16;
cr |= MTU_CRn_PRESCALE_16;
} else {
@ -170,15 +168,8 @@ void __init nmdk_timer_init(void)
pr_err("timer: failed to initialize clock source %s\n",
nmdk_clksrc.name);
/* Timer 1 is used for events, fix according to rate */
cr = MTU_CRn_32BITS;
rate = clk_get_rate(clk1);
if (rate > 16 << 20) {
rate /= 16;
cr |= MTU_CRn_PRESCALE_16;
} else {
cr |= MTU_CRn_PRESCALE_1;
}
/* Timer 1 is used for events */
clockevents_calc_mult_shift(&nmdk_clkevt, rate, MTU_MIN_RANGE);
writel(cr | MTU_CRn_ONESHOT, mtu_base + MTU_CR(1)); /* off, currently */

View File

@ -220,20 +220,7 @@ void __init omap_map_sram(void)
if (omap_sram_size == 0)
return;
if (cpu_is_omap24xx()) {
omap_sram_io_desc[0].virtual = OMAP2_SRAM_VA;
base = OMAP2_SRAM_PA;
base = ROUND_DOWN(base, PAGE_SIZE);
omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
}
if (cpu_is_omap34xx()) {
omap_sram_io_desc[0].virtual = OMAP3_SRAM_VA;
base = OMAP3_SRAM_PA;
base = ROUND_DOWN(base, PAGE_SIZE);
omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
/*
* SRAM must be marked as non-cached on OMAP3 since the
* CORE DPLL M2 divider change code (in SRAM) runs with the
@ -244,13 +231,11 @@ void __init omap_map_sram(void)
omap_sram_io_desc[0].type = MT_MEMORY_NONCACHED;
}
if (cpu_is_omap44xx()) {
omap_sram_io_desc[0].virtual = OMAP4_SRAM_VA;
base = OMAP4_SRAM_PA;
base = ROUND_DOWN(base, PAGE_SIZE);
omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
}
omap_sram_io_desc[0].length = 1024 * 1024; /* Use section desc */
omap_sram_io_desc[0].virtual = omap_sram_base;
base = omap_sram_start;
base = ROUND_DOWN(base, PAGE_SIZE);
omap_sram_io_desc[0].pfn = __phys_to_pfn(base);
omap_sram_io_desc[0].length = ROUND_DOWN(omap_sram_size, PAGE_SIZE);
iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc));
printk(KERN_INFO "SRAM: Mapped pa 0x%08lx to va 0x%08lx size: 0x%lx\n",

View File

@ -157,7 +157,6 @@ typedef struct sigaltstack {
#undef __HAVE_ARCH_SIG_BITOPS
struct pt_regs;
extern int do_signal(struct pt_regs *regs, sigset_t *oldset);
#define ptrace_signal_deliver(regs, cookie) do { } while (0)

View File

@ -351,6 +351,7 @@
#define __ARCH_WANT_SYS_OLD_GETRLIMIT /*will be unused*/
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_RT_SIGACTION
#define __ARCH_WANT_SYS_RT_SIGSUSPEND
#define __IGNORE_lchown
#define __IGNORE_setuid

View File

@ -235,10 +235,9 @@ work_resched:
work_notifysig: ; deal with pending signals and
; notify-resume requests
mv r0, sp ; arg1 : struct pt_regs *regs
ldi r1, #0 ; arg2 : sigset_t *oldset
mv r2, r9 ; arg3 : __u32 thread_info_flags
mv r1, r9 ; arg2 : __u32 thread_info_flags
bl do_notify_resume
bra restore_all
bra resume_userspace
; perform syscall exit tracing
ALIGN

View File

@ -592,16 +592,17 @@ void user_enable_single_step(struct task_struct *child)
if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0)
!= sizeof(insn))
break;
return -EIO;
compute_next_pc(insn, pc, &next_pc, child);
if (next_pc & 0x80000000)
break;
return -EIO;
if (embed_debug_trap(child, next_pc))
break;
return -EIO;
invalidate_cache();
return 0;
}
void user_disable_single_step(struct task_struct *child)

View File

@ -28,37 +28,6 @@
#define DEBUG_SIG 0
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
int do_signal(struct pt_regs *, sigset_t *);
asmlinkage int
sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize,
unsigned long r2, unsigned long r3, unsigned long r4,
unsigned long r5, unsigned long r6, struct pt_regs *regs)
{
sigset_t newset;
/* XXX: Don't preclude handling different sized sigset_t's. */
if (sigsetsize != sizeof(sigset_t))
return -EINVAL;
if (copy_from_user(&newset, unewset, sizeof(newset)))
return -EFAULT;
sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
spin_lock_irq(&current->sighand->siglock);
current->saved_sigmask = current->blocked;
current->blocked = newset;
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
current->state = TASK_INTERRUPTIBLE;
schedule();
set_thread_flag(TIF_RESTORE_SIGMASK);
return -ERESTARTNOHAND;
}
asmlinkage int
sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
unsigned long r2, unsigned long r3, unsigned long r4,
@ -218,7 +187,7 @@ get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
return (void __user *)((sp - frame_size) & -8ul);
}
static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs *regs)
{
struct rt_sigframe __user *frame;
@ -275,22 +244,34 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
current->comm, current->pid, frame, regs->pc);
#endif
return;
return 0;
give_sigsegv:
force_sigsegv(sig, current);
return -EFAULT;
}
static int prev_insn(struct pt_regs *regs)
{
u16 inst;
if (get_user(&inst, (u16 __user *)(regs->bpc - 2)))
return -EFAULT;
if ((inst & 0xfff0) == 0x10f0) /* trap ? */
regs->bpc -= 2;
else
regs->bpc -= 4;
regs->syscall_nr = -1;
return 0;
}
/*
* OK, we're invoking a handler
*/
static void
static int
handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *oldset, struct pt_regs *regs)
{
unsigned short inst;
/* Are we from a system call? */
if (regs->syscall_nr >= 0) {
/* If so, check system call restarting.. */
@ -308,16 +289,14 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
/* fallthrough */
case -ERESTARTNOINTR:
regs->r0 = regs->orig_r0;
inst = *(unsigned short *)(regs->bpc - 2);
if ((inst & 0xfff0) == 0x10f0) /* trap ? */
regs->bpc -= 2;
else
regs->bpc -= 4;
if (prev_insn(regs) < 0)
return -EFAULT;
}
}
/* Set up the stack frame */
setup_rt_frame(sig, ka, info, oldset, regs);
if (setup_rt_frame(sig, ka, info, oldset, regs))
return -EFAULT;
spin_lock_irq(&current->sighand->siglock);
sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
@ -325,6 +304,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
sigaddset(&current->blocked,sig);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
return 0;
}
/*
@ -332,12 +312,12 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
* want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake.
*/
int do_signal(struct pt_regs *regs, sigset_t *oldset)
static void do_signal(struct pt_regs *regs)
{
siginfo_t info;
int signr;
struct k_sigaction ka;
unsigned short inst;
sigset_t *oldset;
/*
* We want the common case to go fast, which
@ -346,12 +326,14 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
* if so.
*/
if (!user_mode(regs))
return 1;
return;
if (try_to_freeze())
goto no_signal;
if (!oldset)
if (test_thread_flag(TIF_RESTORE_SIGMASK))
oldset = &current->saved_sigmask;
else
oldset = &current->blocked;
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
@ -363,8 +345,10 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
*/
/* Whee! Actually deliver the signal. */
handle_signal(signr, &ka, &info, oldset, regs);
return 1;
if (handle_signal(signr, &ka, &info, oldset, regs) == 0)
clear_thread_flag(TIF_RESTORE_SIGMASK);
return;
}
no_signal:
@ -375,31 +359,24 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
regs->r0 == -ERESTARTSYS ||
regs->r0 == -ERESTARTNOINTR) {
regs->r0 = regs->orig_r0;
inst = *(unsigned short *)(regs->bpc - 2);
if ((inst & 0xfff0) == 0x10f0) /* trap ? */
regs->bpc -= 2;
else
regs->bpc -= 4;
}
if (regs->r0 == -ERESTART_RESTARTBLOCK){
prev_insn(regs);
} else if (regs->r0 == -ERESTART_RESTARTBLOCK){
regs->r0 = regs->orig_r0;
regs->r7 = __NR_restart_syscall;
inst = *(unsigned short *)(regs->bpc - 2);
if ((inst & 0xfff0) == 0x10f0) /* trap ? */
regs->bpc -= 2;
else
regs->bpc -= 4;
prev_insn(regs);
}
}
return 0;
if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
clear_thread_flag(TIF_RESTORE_SIGMASK);
sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
}
}
/*
* notification of userspace execution resumption
* - triggered by current->work.notify_resume
*/
void do_notify_resume(struct pt_regs *regs, sigset_t *oldset,
__u32 thread_info_flags)
void do_notify_resume(struct pt_regs *regs, __u32 thread_info_flags)
{
/* Pending single-step? */
if (thread_info_flags & _TIF_SINGLESTEP)
@ -407,7 +384,7 @@ void do_notify_resume(struct pt_regs *regs, sigset_t *oldset,
/* deal with pending signal delivery */
if (thread_info_flags & _TIF_SIGPENDING)
do_signal(regs,oldset);
do_signal(regs);
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);

View File

@ -8,7 +8,6 @@ mainmenu "Linux Kernel Configuration"
config MN10300
def_bool y
select HAVE_OPROFILE
select HAVE_ARCH_TRACEHOOK
config AM33
def_bool y

View File

@ -101,7 +101,7 @@ config GDBSTUB_DEBUG_BREAKPOINT
choice
prompt "GDB stub port"
default GDBSTUB_TTYSM0
default GDBSTUB_ON_TTYSM0
depends on GDBSTUB
help
Select the serial port used for GDB-stub.

View File

@ -229,9 +229,9 @@ int ffs(int x)
#include <asm-generic/bitops/hweight.h>
#define ext2_set_bit_atomic(lock, nr, addr) \
test_and_set_bit((nr) ^ 0x18, (addr))
test_and_set_bit((nr), (addr))
#define ext2_clear_bit_atomic(lock, nr, addr) \
test_and_clear_bit((nr) ^ 0x18, (addr))
test_and_clear_bit((nr), (addr))
#include <asm-generic/bitops/ext2-non-atomic.h>
#include <asm-generic/bitops/minix-le.h>

View File

@ -78,7 +78,7 @@ typedef unsigned long sigset_t;
/* These should not be considered constants from userland. */
#define SIGRTMIN 32
#define SIGRTMAX (_NSIG-1)
#define SIGRTMAX _NSIG
/*
* SA_FLAGS values:

View File

@ -65,10 +65,10 @@ asmlinkage long sys_sigaction(int sig,
old_sigset_t mask;
if (verify_area(VERIFY_READ, act, sizeof(*act)) ||
__get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
__get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
__get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
__get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
__get_user(mask, &act->sa_mask))
return -EFAULT;
__get_user(new_ka.sa.sa_flags, &act->sa_flags);
__get_user(mask, &act->sa_mask);
siginitset(&new_ka.sa.sa_mask, mask);
}
@ -77,10 +77,10 @@ asmlinkage long sys_sigaction(int sig,
if (!ret && oact) {
if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) ||
__put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
__put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
__put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
__put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
return -EFAULT;
__put_user(old_ka.sa.sa_flags, &oact->sa_flags);
__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
}
return ret;
@ -102,6 +102,9 @@ static int restore_sigcontext(struct pt_regs *regs,
{
unsigned int err = 0;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
if (is_using_fpu(current))
fpu_kill_state(current);
@ -330,8 +333,6 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
regs->d0 = sig;
regs->d1 = (unsigned long) &frame->sc;
set_fs(USER_DS);
/* the tracer may want to single-step inside the handler */
if (test_thread_flag(TIF_SINGLESTEP))
ptrace_notify(SIGTRAP);
@ -345,7 +346,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
return 0;
give_sigsegv:
force_sig(SIGSEGV, current);
force_sigsegv(sig, current);
return -EFAULT;
}
@ -413,8 +414,6 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
regs->d0 = sig;
regs->d1 = (long) &frame->info;
set_fs(USER_DS);
/* the tracer may want to single-step inside the handler */
if (test_thread_flag(TIF_SINGLESTEP))
ptrace_notify(SIGTRAP);
@ -428,10 +427,16 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
return 0;
give_sigsegv:
force_sig(SIGSEGV, current);
force_sigsegv(sig, current);
return -EFAULT;
}
static inline void stepback(struct pt_regs *regs)
{
regs->pc -= 2;
regs->orig_d0 = -1;
}
/*
* handle the actual delivery of a signal to userspace
*/
@ -459,7 +464,7 @@ static int handle_signal(int sig,
/* fallthrough */
case -ERESTARTNOINTR:
regs->d0 = regs->orig_d0;
regs->pc -= 2;
stepback(regs);
}
}
@ -527,12 +532,12 @@ static void do_signal(struct pt_regs *regs)
case -ERESTARTSYS:
case -ERESTARTNOINTR:
regs->d0 = regs->orig_d0;
regs->pc -= 2;
stepback(regs);
break;
case -ERESTART_RESTARTBLOCK:
regs->d0 = __NR_restart_syscall;
regs->pc -= 2;
stepback(regs);
break;
}
}

View File

@ -138,6 +138,7 @@ static int do_signal_pending(sigset_t *oldset, struct pt_regs *regs)
ti->local_flags &= ~_TLF_RESTORE_SIGMASK;
sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
}
regs->trap = 0;
return 0; /* no signals delivered */
}
@ -164,6 +165,7 @@ static int do_signal_pending(sigset_t *oldset, struct pt_regs *regs)
ret = handle_rt_signal64(signr, &ka, &info, oldset, regs);
}
regs->trap = 0;
if (ret) {
spin_lock_irq(&current->sighand->siglock);
sigorsets(&current->blocked, &current->blocked,

View File

@ -511,6 +511,7 @@ static long restore_user_regs(struct pt_regs *regs,
if (!sig)
save_r2 = (unsigned int)regs->gpr[2];
err = restore_general_regs(regs, sr);
regs->trap = 0;
err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
if (!sig)
regs->gpr[2] = (unsigned long) save_r2;
@ -884,7 +885,6 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
regs->nip = (unsigned long) ka->sa.sa_handler;
/* enter the signal handler in big-endian mode */
regs->msr &= ~MSR_LE;
regs->trap = 0;
return 1;
badframe:
@ -1228,7 +1228,6 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
regs->nip = (unsigned long) ka->sa.sa_handler;
/* enter the signal handler in big-endian mode */
regs->msr &= ~MSR_LE;
regs->trap = 0;
return 1;

View File

@ -178,7 +178,7 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
err |= __get_user(regs->xer, &sc->gp_regs[PT_XER]);
err |= __get_user(regs->ccr, &sc->gp_regs[PT_CCR]);
/* skip SOFTE */
err |= __get_user(regs->trap, &sc->gp_regs[PT_TRAP]);
regs->trap = 0;
err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]);
err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]);
err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]);

View File

@ -1038,6 +1038,7 @@ static int __hw_perf_event_init(struct perf_event *event)
if (atomic_read(&nmi_active) < 0)
return -ENODEV;
pmap = NULL;
if (attr->type == PERF_TYPE_HARDWARE) {
if (attr->config >= sparc_pmu->max_events)
return -EINVAL;
@ -1046,9 +1047,18 @@ static int __hw_perf_event_init(struct perf_event *event)
pmap = sparc_map_cache_event(attr->config);
if (IS_ERR(pmap))
return PTR_ERR(pmap);
} else
} else if (attr->type != PERF_TYPE_RAW)
return -EOPNOTSUPP;
if (pmap) {
hwc->event_base = perf_event_encode(pmap);
} else {
/* User gives us "(encoding << 16) | pic_mask" for
* PERF_TYPE_RAW events.
*/
hwc->event_base = attr->config;
}
/* We save the enable bits in the config_base. */
hwc->config_base = sparc_pmu->irq_bit;
if (!attr->exclude_user)
@ -1058,8 +1068,6 @@ static int __hw_perf_event_init(struct perf_event *event)
if (!attr->exclude_hv)
hwc->config_base |= sparc_pmu->hv_bit;
hwc->event_base = perf_event_encode(pmap);
n = 0;
if (event->group_leader != event) {
n = collect_events(event->group_leader,

View File

@ -453,8 +453,66 @@ static int save_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
return err;
}
static void setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
int signo, sigset_t *oldset)
/* The I-cache flush instruction only works in the primary ASI, which
* right now is the nucleus, aka. kernel space.
*
* Therefore we have to kick the instructions out using the kernel
* side linear mapping of the physical address backing the user
* instructions.
*/
static void flush_signal_insns(unsigned long address)
{
unsigned long pstate, paddr;
pte_t *ptep, pte;
pgd_t *pgdp;
pud_t *pudp;
pmd_t *pmdp;
/* Commit all stores of the instructions we are about to flush. */
wmb();
/* Disable cross-call reception. In this way even a very wide
* munmap() on another cpu can't tear down the page table
* hierarchy from underneath us, since that can't complete
* until the IPI tlb flush returns.
*/
__asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
__asm__ __volatile__("wrpr %0, %1, %%pstate"
: : "r" (pstate), "i" (PSTATE_IE));
pgdp = pgd_offset(current->mm, address);
if (pgd_none(*pgdp))
goto out_irqs_on;
pudp = pud_offset(pgdp, address);
if (pud_none(*pudp))
goto out_irqs_on;
pmdp = pmd_offset(pudp, address);
if (pmd_none(*pmdp))
goto out_irqs_on;
ptep = pte_offset_map(pmdp, address);
pte = *ptep;
if (!pte_present(pte))
goto out_unmap;
paddr = (unsigned long) page_address(pte_page(pte));
__asm__ __volatile__("flush %0 + %1"
: /* no outputs */
: "r" (paddr),
"r" (address & (PAGE_SIZE - 1))
: "memory");
out_unmap:
pte_unmap(ptep);
out_irqs_on:
__asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
}
static int setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
int signo, sigset_t *oldset)
{
struct signal_frame32 __user *sf;
int sigframe_size;
@ -547,13 +605,7 @@ static void setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
if (ka->ka_restorer) {
regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
} else {
/* Flush instruction space. */
unsigned long address = ((unsigned long)&(sf->insns[0]));
pgd_t *pgdp = pgd_offset(current->mm, address);
pud_t *pudp = pud_offset(pgdp, address);
pmd_t *pmdp = pmd_offset(pudp, address);
pte_t *ptep;
pte_t pte;
regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
@ -562,34 +614,22 @@ static void setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
if (err)
goto sigsegv;
preempt_disable();
ptep = pte_offset_map(pmdp, address);
pte = *ptep;
if (pte_present(pte)) {
unsigned long page = (unsigned long)
page_address(pte_page(pte));
wmb();
__asm__ __volatile__("flush %0 + %1"
: /* no outputs */
: "r" (page),
"r" (address & (PAGE_SIZE - 1))
: "memory");
}
pte_unmap(ptep);
preempt_enable();
flush_signal_insns(address);
}
return;
return 0;
sigill:
do_exit(SIGILL);
return -EINVAL;
sigsegv:
force_sigsegv(signo, current);
return -EFAULT;
}
static void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
unsigned long signr, sigset_t *oldset,
siginfo_t *info)
static int setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
unsigned long signr, sigset_t *oldset,
siginfo_t *info)
{
struct rt_signal_frame32 __user *sf;
int sigframe_size;
@ -687,12 +727,7 @@ static void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
if (ka->ka_restorer)
regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
else {
/* Flush instruction space. */
unsigned long address = ((unsigned long)&(sf->insns[0]));
pgd_t *pgdp = pgd_offset(current->mm, address);
pud_t *pudp = pud_offset(pgdp, address);
pmd_t *pmdp = pmd_offset(pudp, address);
pte_t *ptep;
regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
@ -704,38 +739,32 @@ static void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
if (err)
goto sigsegv;
preempt_disable();
ptep = pte_offset_map(pmdp, address);
if (pte_present(*ptep)) {
unsigned long page = (unsigned long)
page_address(pte_page(*ptep));
wmb();
__asm__ __volatile__("flush %0 + %1"
: /* no outputs */
: "r" (page),
"r" (address & (PAGE_SIZE - 1))
: "memory");
}
pte_unmap(ptep);
preempt_enable();
flush_signal_insns(address);
}
return;
return 0;
sigill:
do_exit(SIGILL);
return -EINVAL;
sigsegv:
force_sigsegv(signr, current);
return -EFAULT;
}
static inline void handle_signal32(unsigned long signr, struct k_sigaction *ka,
siginfo_t *info,
sigset_t *oldset, struct pt_regs *regs)
static inline int handle_signal32(unsigned long signr, struct k_sigaction *ka,
siginfo_t *info,
sigset_t *oldset, struct pt_regs *regs)
{
int err;
if (ka->sa.sa_flags & SA_SIGINFO)
setup_rt_frame32(ka, regs, signr, oldset, info);
err = setup_rt_frame32(ka, regs, signr, oldset, info);
else
setup_frame32(ka, regs, signr, oldset);
err = setup_frame32(ka, regs, signr, oldset);
if (err)
return err;
spin_lock_irq(&current->sighand->siglock);
sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
@ -743,6 +772,10 @@ static inline void handle_signal32(unsigned long signr, struct k_sigaction *ka,
sigaddset(&current->blocked,signr);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
tracehook_signal_handler(signr, info, ka, regs, 0);
return 0;
}
static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs,
@ -789,16 +822,14 @@ void do_signal32(sigset_t *oldset, struct pt_regs * regs,
if (signr > 0) {
if (restart_syscall)
syscall_restart32(orig_i0, regs, &ka.sa);
handle_signal32(signr, &ka, &info, oldset, regs);
/* A signal was successfully delivered; the saved
* sigmask will have been stored in the signal frame,
* and will be restored by sigreturn, so we can simply
* clear the TS_RESTORE_SIGMASK flag.
*/
current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
tracehook_signal_handler(signr, &info, &ka, regs, 0);
if (handle_signal32(signr, &ka, &info, oldset, regs) == 0) {
/* A signal was successfully delivered; the saved
* sigmask will have been stored in the signal frame,
* and will be restored by sigreturn, so we can simply
* clear the TS_RESTORE_SIGMASK flag.
*/
current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
}
return;
}
if (restart_syscall &&
@ -809,12 +840,14 @@ void do_signal32(sigset_t *oldset, struct pt_regs * regs,
regs->u_regs[UREG_I0] = orig_i0;
regs->tpc -= 4;
regs->tnpc -= 4;
pt_regs_clear_syscall(regs);
}
if (restart_syscall &&
regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
regs->u_regs[UREG_G1] = __NR_restart_syscall;
regs->tpc -= 4;
regs->tnpc -= 4;
pt_regs_clear_syscall(regs);
}
/* If there's no signal to deliver, we just put the saved sigmask

View File

@ -315,8 +315,8 @@ save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
return err;
}
static void setup_frame(struct k_sigaction *ka, struct pt_regs *regs,
int signo, sigset_t *oldset)
static int setup_frame(struct k_sigaction *ka, struct pt_regs *regs,
int signo, sigset_t *oldset)
{
struct signal_frame __user *sf;
int sigframe_size, err;
@ -384,16 +384,19 @@ static void setup_frame(struct k_sigaction *ka, struct pt_regs *regs,
/* Flush instruction space. */
flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
}
return;
return 0;
sigill_and_return:
do_exit(SIGILL);
return -EINVAL;
sigsegv:
force_sigsegv(signo, current);
return -EFAULT;
}
static void setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
int signo, sigset_t *oldset, siginfo_t *info)
static int setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
int signo, sigset_t *oldset, siginfo_t *info)
{
struct rt_signal_frame __user *sf;
int sigframe_size;
@ -466,22 +469,30 @@ static void setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
/* Flush instruction space. */
flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
}
return;
return 0;
sigill:
do_exit(SIGILL);
return -EINVAL;
sigsegv:
force_sigsegv(signo, current);
return -EFAULT;
}
static inline void
static inline int
handle_signal(unsigned long signr, struct k_sigaction *ka,
siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
{
int err;
if (ka->sa.sa_flags & SA_SIGINFO)
setup_rt_frame(ka, regs, signr, oldset, info);
err = setup_rt_frame(ka, regs, signr, oldset, info);
else
setup_frame(ka, regs, signr, oldset);
err = setup_frame(ka, regs, signr, oldset);
if (err)
return err;
spin_lock_irq(&current->sighand->siglock);
sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
@ -489,6 +500,10 @@ handle_signal(unsigned long signr, struct k_sigaction *ka,
sigaddset(&current->blocked, signr);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
tracehook_signal_handler(signr, info, ka, regs, 0);
return 0;
}
static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
@ -546,17 +561,15 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
if (signr > 0) {
if (restart_syscall)
syscall_restart(orig_i0, regs, &ka.sa);
handle_signal(signr, &ka, &info, oldset, regs);
/* a signal was successfully delivered; the saved
* sigmask will have been stored in the signal frame,
* and will be restored by sigreturn, so we can simply
* clear the TIF_RESTORE_SIGMASK flag.
*/
if (test_thread_flag(TIF_RESTORE_SIGMASK))
clear_thread_flag(TIF_RESTORE_SIGMASK);
tracehook_signal_handler(signr, &info, &ka, regs, 0);
if (handle_signal(signr, &ka, &info, oldset, regs) == 0) {
/* a signal was successfully delivered; the saved
* sigmask will have been stored in the signal frame,
* and will be restored by sigreturn, so we can simply
* clear the TIF_RESTORE_SIGMASK flag.
*/
if (test_thread_flag(TIF_RESTORE_SIGMASK))
clear_thread_flag(TIF_RESTORE_SIGMASK);
}
return;
}
if (restart_syscall &&
@ -567,12 +580,14 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
regs->u_regs[UREG_I0] = orig_i0;
regs->pc -= 4;
regs->npc -= 4;
pt_regs_clear_syscall(regs);
}
if (restart_syscall &&
regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
regs->u_regs[UREG_G1] = __NR_restart_syscall;
regs->pc -= 4;
regs->npc -= 4;
pt_regs_clear_syscall(regs);
}
/* if there's no signal to deliver, we just put the saved sigmask

View File

@ -409,7 +409,7 @@ static inline void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *
return (void __user *) sp;
}
static inline void
static inline int
setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
int signo, sigset_t *oldset, siginfo_t *info)
{
@ -483,26 +483,37 @@ setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
}
/* 4. return to kernel instructions */
regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
return;
return 0;
sigill:
do_exit(SIGILL);
return -EINVAL;
sigsegv:
force_sigsegv(signo, current);
return -EFAULT;
}
static inline void handle_signal(unsigned long signr, struct k_sigaction *ka,
siginfo_t *info,
sigset_t *oldset, struct pt_regs *regs)
static inline int handle_signal(unsigned long signr, struct k_sigaction *ka,
siginfo_t *info,
sigset_t *oldset, struct pt_regs *regs)
{
setup_rt_frame(ka, regs, signr, oldset,
(ka->sa.sa_flags & SA_SIGINFO) ? info : NULL);
int err;
err = setup_rt_frame(ka, regs, signr, oldset,
(ka->sa.sa_flags & SA_SIGINFO) ? info : NULL);
if (err)
return err;
spin_lock_irq(&current->sighand->siglock);
sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
if (!(ka->sa.sa_flags & SA_NOMASK))
sigaddset(&current->blocked,signr);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
tracehook_signal_handler(signr, info, ka, regs, 0);
return 0;
}
static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
@ -571,16 +582,14 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
if (signr > 0) {
if (restart_syscall)
syscall_restart(orig_i0, regs, &ka.sa);
handle_signal(signr, &ka, &info, oldset, regs);
/* A signal was successfully delivered; the saved
* sigmask will have been stored in the signal frame,
* and will be restored by sigreturn, so we can simply
* clear the TS_RESTORE_SIGMASK flag.
*/
current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
tracehook_signal_handler(signr, &info, &ka, regs, 0);
if (handle_signal(signr, &ka, &info, oldset, regs) == 0) {
/* A signal was successfully delivered; the saved
* sigmask will have been stored in the signal frame,
* and will be restored by sigreturn, so we can simply
* clear the TS_RESTORE_SIGMASK flag.
*/
current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
}
return;
}
if (restart_syscall &&
@ -591,12 +600,14 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
regs->u_regs[UREG_I0] = orig_i0;
regs->tpc -= 4;
regs->tnpc -= 4;
pt_regs_clear_syscall(regs);
}
if (restart_syscall &&
regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
regs->u_regs[UREG_G1] = __NR_restart_syscall;
regs->tpc -= 4;
regs->tnpc -= 4;
pt_regs_clear_syscall(regs);
}
/* If there's no signal to deliver, we just put the saved sigmask

View File

@ -1506,13 +1506,6 @@ handle_ill:
}
STD_ENDPROC(handle_ill)
.pushsection .rodata, "a"
.align 8
bpt_code:
bpt
ENDPROC(bpt_code)
.popsection
/* Various stub interrupt handlers and syscall handlers */
STD_ENTRY_LOCAL(_kernel_double_fault)

View File

@ -62,7 +62,7 @@ static long execve1(const char *file,
return error;
}
long um_execve(const char *file, char __user *__user *argv, char __user *__user *env)
long um_execve(const char *file, const char __user *const __user *argv, const char __user *const __user *env)
{
long err;
@ -72,8 +72,8 @@ long um_execve(const char *file, char __user *__user *argv, char __user *__user
return err;
}
long sys_execve(const char __user *file, char __user *__user *argv,
char __user *__user *env)
long sys_execve(const char __user *file, const char __user *const __user *argv,
const char __user *const __user *env)
{
long error;
char *filename;

View File

@ -1 +1 @@
extern long um_execve(const char *file, char __user *__user *argv, char __user *__user *env);
extern long um_execve(const char *file, const char __user *const __user *argv, const char __user *const __user *env);

View File

@ -60,8 +60,8 @@ int kernel_execve(const char *filename,
fs = get_fs();
set_fs(KERNEL_DS);
ret = um_execve(filename, (char __user *__user *)argv,
(char __user *__user *) envp);
ret = um_execve(filename, (const char __user *const __user *)argv,
(const char __user *const __user *) envp);
set_fs(fs);
return ret;

View File

@ -58,7 +58,19 @@ static void parse_earlyprintk(void)
if (arg[pos] == ',')
pos++;
if (!strncmp(arg, "ttyS", 4)) {
/*
* make sure we have
* "serial,0x3f8,115200"
* "serial,ttyS0,115200"
* "ttyS0,115200"
*/
if (pos == 7 && !strncmp(arg + pos, "0x", 2)) {
port = simple_strtoull(arg + pos, &e, 16);
if (port == 0 || arg + pos == e)
port = DEFAULT_SERIAL_PORT;
else
pos = e - arg;
} else if (!strncmp(arg + pos, "ttyS", 4)) {
static const int bases[] = { 0x3f8, 0x2f8 };
int idx = 0;

View File

@ -38,4 +38,10 @@ static inline void amd_iommu_stats_init(void) { }
#endif /* !CONFIG_AMD_IOMMU_STATS */
static inline bool is_rd890_iommu(struct pci_dev *pdev)
{
return (pdev->vendor == PCI_VENDOR_ID_ATI) &&
(pdev->device == PCI_DEVICE_ID_RD890_IOMMU);
}
#endif /* _ASM_X86_AMD_IOMMU_PROTO_H */

View File

@ -368,6 +368,9 @@ struct amd_iommu {
/* capabilities of that IOMMU read from ACPI */
u32 cap;
/* flags read from acpi table */
u8 acpi_flags;
/*
* Capability pointer. There could be more than one IOMMU per PCI
* device function if there are more than one AMD IOMMU capability
@ -411,6 +414,15 @@ struct amd_iommu {
/* default dma_ops domain for that IOMMU */
struct dma_ops_domain *default_dom;
/*
* This array is required to work around a potential BIOS bug.
* The BIOS may miss to restore parts of the PCI configuration
* space when the system resumes from S3. The result is that the
* IOMMU does not execute commands anymore which leads to system
* failure.
*/
u32 cache_cfg[4];
};
/*

View File

@ -309,7 +309,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
{
return ((1UL << (nr % BITS_PER_LONG)) &
(((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0;
(addr[nr / BITS_PER_LONG])) != 0;
}
static inline int variable_test_bit(int nr, volatile const unsigned long *addr)

View File

@ -168,6 +168,7 @@
#define X86_FEATURE_XSAVEOPT (7*32+ 4) /* Optimized Xsave */
#define X86_FEATURE_PLN (7*32+ 5) /* Intel Power Limit Notification */
#define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */
#define X86_FEATURE_DTS (7*32+ 7) /* Digital Thermal Sensor */
/* Virtualization flags: Linux defined, word 8 */
#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */

View File

@ -20,7 +20,7 @@ struct arch_hw_breakpoint {
#include <linux/list.h>
/* Available HW breakpoint length encodings */
#define X86_BREAKPOINT_LEN_X 0x00
#define X86_BREAKPOINT_LEN_X 0x40
#define X86_BREAKPOINT_LEN_1 0x40
#define X86_BREAKPOINT_LEN_2 0x44
#define X86_BREAKPOINT_LEN_4 0x4c

View File

@ -11,6 +11,8 @@ ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_tsc.o = -pg
CFLAGS_REMOVE_rtc.o = -pg
CFLAGS_REMOVE_paravirt-spinlocks.o = -pg
CFLAGS_REMOVE_pvclock.o = -pg
CFLAGS_REMOVE_kvmclock.o = -pg
CFLAGS_REMOVE_ftrace.o = -pg
CFLAGS_REMOVE_early_printk.o = -pg
endif

View File

@ -1953,6 +1953,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
size_t size,
int dir)
{
dma_addr_t flush_addr;
dma_addr_t i, start;
unsigned int pages;
@ -1960,6 +1961,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
(dma_addr + size > dma_dom->aperture_size))
return;
flush_addr = dma_addr;
pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
dma_addr &= PAGE_MASK;
start = dma_addr;
@ -1974,7 +1976,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
dma_ops_free_addresses(dma_dom, dma_addr, pages);
if (amd_iommu_unmap_flush || dma_dom->need_flush) {
iommu_flush_pages(&dma_dom->domain, dma_addr, size);
iommu_flush_pages(&dma_dom->domain, flush_addr, size);
dma_dom->need_flush = false;
}
}

View File

@ -632,6 +632,13 @@ static void __init init_iommu_from_pci(struct amd_iommu *iommu)
iommu->last_device = calc_devid(MMIO_GET_BUS(range),
MMIO_GET_LD(range));
iommu->evt_msi_num = MMIO_MSI_NUM(misc);
if (is_rd890_iommu(iommu->dev)) {
pci_read_config_dword(iommu->dev, 0xf0, &iommu->cache_cfg[0]);
pci_read_config_dword(iommu->dev, 0xf4, &iommu->cache_cfg[1]);
pci_read_config_dword(iommu->dev, 0xf8, &iommu->cache_cfg[2]);
pci_read_config_dword(iommu->dev, 0xfc, &iommu->cache_cfg[3]);
}
}
/*
@ -649,29 +656,9 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
struct ivhd_entry *e;
/*
* First set the recommended feature enable bits from ACPI
* into the IOMMU control registers
* First save the recommended feature enable bits from ACPI
*/
h->flags & IVHD_FLAG_HT_TUN_EN_MASK ?
iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
h->flags & IVHD_FLAG_PASSPW_EN_MASK ?
iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
h->flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
h->flags & IVHD_FLAG_ISOC_EN_MASK ?
iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
iommu_feature_disable(iommu, CONTROL_ISOC_EN);
/*
* make IOMMU memory accesses cache coherent
*/
iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
iommu->acpi_flags = h->flags;
/*
* Done. Now parse the device entries
@ -1116,6 +1103,40 @@ static void init_device_table(void)
}
}
static void iommu_init_flags(struct amd_iommu *iommu)
{
iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
iommu_feature_disable(iommu, CONTROL_ISOC_EN);
/*
* make IOMMU memory accesses cache coherent
*/
iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
}
static void iommu_apply_quirks(struct amd_iommu *iommu)
{
if (is_rd890_iommu(iommu->dev)) {
pci_write_config_dword(iommu->dev, 0xf0, iommu->cache_cfg[0]);
pci_write_config_dword(iommu->dev, 0xf4, iommu->cache_cfg[1]);
pci_write_config_dword(iommu->dev, 0xf8, iommu->cache_cfg[2]);
pci_write_config_dword(iommu->dev, 0xfc, iommu->cache_cfg[3]);
}
}
/*
* This function finally enables all IOMMUs found in the system after
* they have been initialized
@ -1126,6 +1147,8 @@ static void enable_iommus(void)
for_each_iommu(iommu) {
iommu_disable(iommu);
iommu_apply_quirks(iommu);
iommu_init_flags(iommu);
iommu_set_device_table(iommu);
iommu_enable_command_buffer(iommu);
iommu_enable_event_buffer(iommu);

View File

@ -306,14 +306,19 @@ void arch_init_copy_chip_data(struct irq_desc *old_desc,
old_cfg = old_desc->chip_data;
memcpy(cfg, old_cfg, sizeof(struct irq_cfg));
cfg->vector = old_cfg->vector;
cfg->move_in_progress = old_cfg->move_in_progress;
cpumask_copy(cfg->domain, old_cfg->domain);
cpumask_copy(cfg->old_domain, old_cfg->old_domain);
init_copy_irq_2_pin(old_cfg, cfg, node);
}
static void free_irq_cfg(struct irq_cfg *old_cfg)
static void free_irq_cfg(struct irq_cfg *cfg)
{
kfree(old_cfg);
free_cpumask_var(cfg->domain);
free_cpumask_var(cfg->old_domain);
kfree(cfg);
}
void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc)

View File

@ -545,7 +545,7 @@ void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
}
}
static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
{
u32 tfms, xlvl;
u32 ebx;

View File

@ -33,5 +33,6 @@ extern const struct cpu_dev *const __x86_cpu_dev_start[],
*const __x86_cpu_dev_end[];
extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
extern void get_cpu_cap(struct cpuinfo_x86 *c);
#endif

View File

@ -39,6 +39,7 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID;
wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
c->cpuid_level = cpuid_eax(0);
get_cpu_cap(c);
}
}

View File

@ -102,6 +102,7 @@ struct cpu_hw_events {
*/
struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
int enabled;
int n_events;
@ -1010,6 +1011,7 @@ static int x86_pmu_start(struct perf_event *event)
x86_perf_event_set_period(event);
cpuc->events[idx] = event;
__set_bit(idx, cpuc->active_mask);
__set_bit(idx, cpuc->running);
x86_pmu.enable(event);
perf_event_update_userpage(event);
@ -1141,8 +1143,16 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
if (!test_bit(idx, cpuc->active_mask))
if (!test_bit(idx, cpuc->active_mask)) {
/*
* Though we deactivated the counter some cpus
* might still deliver spurious interrupts still
* in flight. Catch them:
*/
if (__test_and_clear_bit(idx, cpuc->running))
handled++;
continue;
}
event = cpuc->events[idx];
hwc = &event->hw;

View File

@ -31,6 +31,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
const struct cpuid_bit *cb;
static const struct cpuid_bit __cpuinitconst cpuid_bits[] = {
{ X86_FEATURE_DTS, CR_EAX, 0, 0x00000006, 0 },
{ X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 },
{ X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 },
{ X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 },

View File

@ -506,7 +506,7 @@ static int hpet_assign_irq(struct hpet_dev *dev)
{
unsigned int irq;
irq = create_irq();
irq = create_irq_nr(0, -1);
if (!irq)
return -EINVAL;

View File

@ -206,11 +206,27 @@ int arch_check_bp_in_kernelspace(struct perf_event *bp)
int arch_bp_generic_fields(int x86_len, int x86_type,
int *gen_len, int *gen_type)
{
/* Type */
switch (x86_type) {
case X86_BREAKPOINT_EXECUTE:
if (x86_len != X86_BREAKPOINT_LEN_X)
return -EINVAL;
*gen_type = HW_BREAKPOINT_X;
*gen_len = sizeof(long);
return 0;
case X86_BREAKPOINT_WRITE:
*gen_type = HW_BREAKPOINT_W;
break;
case X86_BREAKPOINT_RW:
*gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
break;
default:
return -EINVAL;
}
/* Len */
switch (x86_len) {
case X86_BREAKPOINT_LEN_X:
*gen_len = sizeof(long);
break;
case X86_BREAKPOINT_LEN_1:
*gen_len = HW_BREAKPOINT_LEN_1;
break;
@ -229,21 +245,6 @@ int arch_bp_generic_fields(int x86_len, int x86_type,
return -EINVAL;
}
/* Type */
switch (x86_type) {
case X86_BREAKPOINT_EXECUTE:
*gen_type = HW_BREAKPOINT_X;
break;
case X86_BREAKPOINT_WRITE:
*gen_type = HW_BREAKPOINT_W;
break;
case X86_BREAKPOINT_RW:
*gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
break;
default:
return -EINVAL;
}
return 0;
}
@ -316,9 +317,6 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
ret = -EINVAL;
switch (info->len) {
case X86_BREAKPOINT_LEN_X:
align = sizeof(long) -1;
break;
case X86_BREAKPOINT_LEN_1:
align = 0;
break;

View File

@ -324,9 +324,8 @@ static void lguest_load_gdt(const struct desc_ptr *desc)
}
/*
* For a single GDT entry which changes, we do the lazy thing: alter our GDT,
* then tell the Host to reload the entire thing. This operation is so rare
* that this naive implementation is reasonable.
* For a single GDT entry which changes, we simply change our copy and
* then tell the host about it.
*/
static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum,
const void *desc, int type)
@ -338,9 +337,13 @@ static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum,
}
/*
* OK, I lied. There are three "thread local storage" GDT entries which change
* There are three "thread local storage" GDT entries which change
* on every context switch (these three entries are how glibc implements
* __thread variables). So we have a hypercall specifically for this case.
* __thread variables). As an optimization, we have a hypercall
* specifically for this case.
*
* Wouldn't it be nicer to have a general LOAD_GDT_ENTRIES hypercall
* which took a range of entries?
*/
static void lguest_load_tls(struct thread_struct *t, unsigned int cpu)
{

View File

@ -671,7 +671,9 @@ static int __init ppro_init(char **cpu_type)
case 14:
*cpu_type = "i386/core";
break;
case 15: case 23:
case 0x0f:
case 0x16:
case 0x17:
*cpu_type = "i386/core_2";
break;
case 0x1a:

View File

@ -307,7 +307,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
return PTR_ERR(bio);
if (rq_data_dir(rq) == WRITE)
bio->bi_rw |= (1 << REQ_WRITE);
bio->bi_rw |= REQ_WRITE;
if (do_copy)
rq->cmd_flags |= REQ_COPY_USER;

View File

@ -361,6 +361,18 @@ static int attempt_merge(struct request_queue *q, struct request *req,
if (!rq_mergeable(req) || !rq_mergeable(next))
return 0;
/*
* Don't merge file system requests and discard requests
*/
if ((req->cmd_flags & REQ_DISCARD) != (next->cmd_flags & REQ_DISCARD))
return 0;
/*
* Don't merge discard requests and secure discard requests
*/
if ((req->cmd_flags & REQ_SECURE) != (next->cmd_flags & REQ_SECURE))
return 0;
/*
* not contiguous
*/

View File

@ -1019,10 +1019,20 @@ cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
*/
atomic_set(&cfqg->ref, 1);
/* Add group onto cgroup list */
sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
/*
* Add group onto cgroup list. It might happen that bdi->dev is
* not initiliazed yet. Initialize this new group without major
* and minor info and this info will be filled in once a new thread
* comes for IO. See code above.
*/
if (bdi->dev) {
sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
MKDEV(major, minor));
} else
cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
0);
cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev);
/* Add group on cfqd list */

View File

@ -90,6 +90,10 @@ static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
static int ahci_pci_device_resume(struct pci_dev *pdev);
#endif
static struct scsi_host_template ahci_sht = {
AHCI_SHT("ahci"),
};
static struct ata_port_operations ahci_vt8251_ops = {
.inherits = &ahci_ops,
.hardreset = ahci_vt8251_hardreset,

View File

@ -298,7 +298,17 @@ struct ahci_host_priv {
extern int ahci_ignore_sss;
extern struct scsi_host_template ahci_sht;
extern struct device_attribute *ahci_shost_attrs[];
extern struct device_attribute *ahci_sdev_attrs[];
#define AHCI_SHT(drv_name) \
ATA_NCQ_SHT(drv_name), \
.can_queue = AHCI_MAX_CMDS - 1, \
.sg_tablesize = AHCI_MAX_SG, \
.dma_boundary = AHCI_DMA_BOUNDARY, \
.shost_attrs = ahci_shost_attrs, \
.sdev_attrs = ahci_sdev_attrs
extern struct ata_port_operations ahci_ops;
void ahci_save_initial_config(struct device *dev,

View File

@ -23,6 +23,10 @@
#include <linux/ahci_platform.h>
#include "ahci.h"
static struct scsi_host_template ahci_platform_sht = {
AHCI_SHT("ahci_platform"),
};
static int __init ahci_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@ -145,7 +149,7 @@ static int __init ahci_probe(struct platform_device *pdev)
ahci_print_info(host, "platform");
rc = ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED,
&ahci_sht);
&ahci_platform_sht);
if (rc)
goto err0;

View File

@ -121,7 +121,7 @@ static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO,
ahci_read_em_buffer, ahci_store_em_buffer);
static struct device_attribute *ahci_shost_attrs[] = {
struct device_attribute *ahci_shost_attrs[] = {
&dev_attr_link_power_management_policy,
&dev_attr_em_message_type,
&dev_attr_em_message,
@ -132,22 +132,14 @@ static struct device_attribute *ahci_shost_attrs[] = {
&dev_attr_em_buffer,
NULL
};
EXPORT_SYMBOL_GPL(ahci_shost_attrs);
static struct device_attribute *ahci_sdev_attrs[] = {
struct device_attribute *ahci_sdev_attrs[] = {
&dev_attr_sw_activity,
&dev_attr_unload_heads,
NULL
};
struct scsi_host_template ahci_sht = {
ATA_NCQ_SHT("ahci"),
.can_queue = AHCI_MAX_CMDS - 1,
.sg_tablesize = AHCI_MAX_SG,
.dma_boundary = AHCI_DMA_BOUNDARY,
.shost_attrs = ahci_shost_attrs,
.sdev_attrs = ahci_sdev_attrs,
};
EXPORT_SYMBOL_GPL(ahci_sht);
EXPORT_SYMBOL_GPL(ahci_sdev_attrs);
struct ata_port_operations ahci_ops = {
.inherits = &sata_pmp_port_ops,

View File

@ -4792,7 +4792,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
clean4:
kfree(h->cmd_pool_bits);
/* Free up sg elements */
for (k = 0; k < h->nr_cmds; k++)
for (k-- ; k >= 0; k--)
kfree(h->scatter_list[k]);
kfree(h->scatter_list);
cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds);

View File

@ -2369,7 +2369,7 @@ static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
pkt_shrink_pktlist(pd);
}
static struct pktcdvd_device *pkt_find_dev_from_minor(int dev_minor)
static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor)
{
if (dev_minor >= MAX_WRITERS)
return NULL;

View File

@ -806,6 +806,8 @@ static const struct intel_driver_description {
"G45/G43", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG,
"B43", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_B43_1_HB, PCI_DEVICE_ID_INTEL_B43_1_IG,
"B43", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG,
"G41", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,

View File

@ -186,6 +186,8 @@
#define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2
#define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40
#define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42
#define PCI_DEVICE_ID_INTEL_B43_1_HB 0x2E90
#define PCI_DEVICE_ID_INTEL_B43_1_IG 0x2E92
#define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40
#define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42
#define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB 0x2E00

View File

@ -305,6 +305,9 @@ static int num_force_kipmid;
#ifdef CONFIG_PCI
static int pci_registered;
#endif
#ifdef CONFIG_ACPI
static int pnp_registered;
#endif
#ifdef CONFIG_PPC_OF
static int of_registered;
#endif
@ -2126,7 +2129,7 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
{
struct acpi_device *acpi_dev;
struct smi_info *info;
struct resource *res;
struct resource *res, *res_second;
acpi_handle handle;
acpi_status status;
unsigned long long tmp;
@ -2182,13 +2185,13 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev,
info->io.addr_data = res->start;
info->io.regspacing = DEFAULT_REGSPACING;
res = pnp_get_resource(dev,
res_second = pnp_get_resource(dev,
(info->io.addr_type == IPMI_IO_ADDR_SPACE) ?
IORESOURCE_IO : IORESOURCE_MEM,
1);
if (res) {
if (res->start > info->io.addr_data)
info->io.regspacing = res->start - info->io.addr_data;
if (res_second) {
if (res_second->start > info->io.addr_data)
info->io.regspacing = res_second->start - info->io.addr_data;
}
info->io.regsize = DEFAULT_REGSPACING;
info->io.regshift = 0;
@ -3359,6 +3362,7 @@ static __devinit int init_ipmi_si(void)
#ifdef CONFIG_ACPI
pnp_register_driver(&ipmi_pnp_driver);
pnp_registered = 1;
#endif
#ifdef CONFIG_DMI
@ -3526,7 +3530,8 @@ static __exit void cleanup_ipmi_si(void)
pci_unregister_driver(&ipmi_pci_driver);
#endif
#ifdef CONFIG_ACPI
pnp_unregister_driver(&ipmi_pnp_driver);
if (pnp_registered)
pnp_unregister_driver(&ipmi_pnp_driver);
#endif
#ifdef CONFIG_PPC_OF

View File

@ -788,10 +788,11 @@ static const struct file_operations zero_fops = {
/*
* capabilities for /dev/zero
* - permits private mappings, "copies" are taken of the source of zeros
* - no writeback happens
*/
static struct backing_dev_info zero_bdi = {
.name = "char/mem",
.capabilities = BDI_CAP_MAP_COPY,
.capabilities = BDI_CAP_MAP_COPY | BDI_CAP_NO_ACCT_AND_WRITEBACK,
};
static const struct file_operations full_fops = {

View File

@ -596,6 +596,10 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
ssize_t ret;
bool nonblock;
/* Userspace could be out to fool us */
if (!count)
return 0;
port = filp->private_data;
nonblock = filp->f_flags & O_NONBLOCK;
@ -642,7 +646,7 @@ static unsigned int port_fops_poll(struct file *filp, poll_table *wait)
poll_wait(filp, &port->waitqueue, wait);
ret = 0;
if (port->inbuf)
if (!will_read_block(port))
ret |= POLLIN | POLLRDNORM;
if (!will_write_block(port))
ret |= POLLOUT;

View File

@ -162,7 +162,7 @@ static int mv_is_err_intr(u32 intr_cause)
static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
{
u32 val = (1 << (1 + (chan->idx * 16)));
u32 val = ~(1 << (chan->idx * 16));
dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val);
__raw_writel(val, XOR_INTR_CAUSE(chan));
}

View File

@ -339,6 +339,9 @@ static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
{
int status;
if (mci->op_state != OP_RUNNING_POLL)
return;
status = cancel_delayed_work(&mci->work);
if (status == 0) {
debugf0("%s() not canceled, flush the queue\n",

View File

@ -98,8 +98,8 @@ EXPORT_SYMBOL(drm_buffer_alloc);
* user_data: A pointer the data that is copied to the buffer.
* size: The Number of bytes to copy.
*/
extern int drm_buffer_copy_from_user(struct drm_buffer *buf,
void __user *user_data, int size)
int drm_buffer_copy_from_user(struct drm_buffer *buf,
void __user *user_data, int size)
{
int nr_pages = size / PAGE_SIZE + 1;
int idx;
@ -163,7 +163,7 @@ void *drm_buffer_read_object(struct drm_buffer *buf,
{
int idx = drm_buffer_index(buf);
int page = drm_buffer_page(buf);
void *obj = 0;
void *obj = NULL;
if (idx + objsize <= PAGE_SIZE) {
obj = &buf->data[page][idx];

View File

@ -170,6 +170,7 @@ static const struct pci_device_id pciidlist[] = { /* aka */
INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), /* G45_G */
INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), /* G41_G */
INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), /* B43_G */
INTEL_VGA_DEVICE(0x2e92, &intel_g45_info), /* B43_G.1 */
INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),

View File

@ -2351,14 +2351,21 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
reg->obj = obj;
if (IS_GEN6(dev))
switch (INTEL_INFO(dev)->gen) {
case 6:
sandybridge_write_fence_reg(reg);
else if (IS_I965G(dev))
break;
case 5:
case 4:
i965_write_fence_reg(reg);
else if (IS_I9XX(dev))
break;
case 3:
i915_write_fence_reg(reg);
else
break;
case 2:
i830_write_fence_reg(reg);
break;
}
trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg,
obj_priv->tiling_mode);
@ -2381,22 +2388,26 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct drm_i915_fence_reg *reg =
&dev_priv->fence_regs[obj_priv->fence_reg];
uint32_t fence_reg;
if (IS_GEN6(dev)) {
switch (INTEL_INFO(dev)->gen) {
case 6:
I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
(obj_priv->fence_reg * 8), 0);
} else if (IS_I965G(dev)) {
break;
case 5:
case 4:
I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
} else {
uint32_t fence_reg;
if (obj_priv->fence_reg < 8)
fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
break;
case 3:
if (obj_priv->fence_reg >= 8)
fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4;
else
fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg -
8) * 4;
case 2:
fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
I915_WRITE(fence_reg, 0);
break;
}
reg->obj = NULL;

View File

@ -79,6 +79,7 @@ mark_free(struct drm_i915_gem_object *obj_priv,
struct list_head *unwind)
{
list_add(&obj_priv->evict_list, unwind);
drm_gem_object_reference(&obj_priv->base);
return drm_mm_scan_add_block(obj_priv->gtt_space);
}
@ -165,6 +166,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
list_for_each_entry(obj_priv, &unwind_list, evict_list) {
ret = drm_mm_scan_remove_block(obj_priv->gtt_space);
BUG_ON(ret);
drm_gem_object_unreference(&obj_priv->base);
}
/* We expect the caller to unpin, evict all and try again, or give up.
@ -181,18 +183,21 @@ found:
* scanning, therefore store to be evicted objects on a
* temporary list. */
list_move(&obj_priv->evict_list, &eviction_list);
}
} else
drm_gem_object_unreference(&obj_priv->base);
}
/* Unbinding will emit any required flushes */
list_for_each_entry_safe(obj_priv, tmp_obj_priv,
&eviction_list, evict_list) {
#if WATCH_LRU
DRM_INFO("%s: evicting %p\n", __func__, obj);
DRM_INFO("%s: evicting %p\n", __func__, &obj_priv->base);
#endif
ret = i915_gem_object_unbind(&obj_priv->base);
if (ret)
return ret;
drm_gem_object_unreference(&obj_priv->base);
}
/* The just created free hole should be on the top of the free stack

View File

@ -789,16 +789,25 @@ int i915_save_state(struct drm_device *dev)
dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
/* Fences */
if (IS_I965G(dev)) {
switch (INTEL_INFO(dev)->gen) {
case 6:
for (i = 0; i < 16; i++)
dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
break;
case 5:
case 4:
for (i = 0; i < 16; i++)
dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
} else {
for (i = 0; i < 8; i++)
dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
break;
case 3:
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
for (i = 0; i < 8; i++)
dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
case 2:
for (i = 0; i < 8; i++)
dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
break;
}
return 0;
@ -815,15 +824,24 @@ int i915_restore_state(struct drm_device *dev)
I915_WRITE(HWS_PGA, dev_priv->saveHWS);
/* Fences */
if (IS_I965G(dev)) {
switch (INTEL_INFO(dev)->gen) {
case 6:
for (i = 0; i < 16; i++)
I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
break;
case 5:
case 4:
for (i = 0; i < 16; i++)
I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
} else {
for (i = 0; i < 8; i++)
I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
break;
case 3:
case 2:
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
for (i = 0; i < 8; i++)
I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
for (i = 0; i < 8; i++)
I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
break;
}
i915_restore_display(dev);

View File

@ -188,7 +188,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
1000, 1))
DRM_ERROR("timed out waiting for FORCE_TRIGGER");
DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
if (turn_off_dac) {
I915_WRITE(PCH_ADPA, temp);
@ -245,7 +245,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
if (wait_for((I915_READ(PORT_HOTPLUG_EN) &
CRT_HOTPLUG_FORCE_DETECT) == 0,
1000, 1))
DRM_ERROR("timed out waiting for FORCE_DETECT to go off");
DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
}
stat = I915_READ(PORT_HOTPLUG_STAT);

View File

@ -2463,11 +2463,19 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
if (HAS_PCH_SPLIT(dev)) {
/* FDI link clock is fixed at 2.7G */
if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
return false;
}
/* XXX some encoders set the crtcinfo, others don't.
* Obviously we need some form of conflict resolution here...
*/
if (adjusted_mode->crtc_htotal == 0)
drm_mode_set_crtcinfo(adjusted_mode, 0);
return true;
}

View File

@ -2170,8 +2170,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
return true;
err:
intel_sdvo_destroy_enhance_property(connector);
kfree(intel_sdvo_connector);
intel_sdvo_destroy(connector);
return false;
}
@ -2243,8 +2242,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
return true;
err:
intel_sdvo_destroy_enhance_property(connector);
kfree(intel_sdvo_connector);
intel_sdvo_destroy(connector);
return false;
}
@ -2522,11 +2520,10 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
uint16_t response;
} enhancements;
if (!intel_sdvo_get_value(intel_sdvo,
SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
&enhancements, sizeof(enhancements)))
return false;
enhancements.response = 0;
intel_sdvo_get_value(intel_sdvo,
SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
&enhancements, sizeof(enhancements));
if (enhancements.response == 0) {
DRM_DEBUG_KMS("No enhancement is supported\n");
return true;

View File

@ -558,8 +558,10 @@ nouveau_connector_get_modes(struct drm_connector *connector)
if (nv_encoder->dcb->type == OUTPUT_LVDS &&
(nv_encoder->dcb->lvdsconf.use_straps_for_mode ||
dev_priv->vbios.fp_no_ddc) && nouveau_bios_fp_mode(dev, NULL)) {
nv_connector->native_mode = drm_mode_create(dev);
nouveau_bios_fp_mode(dev, nv_connector->native_mode);
struct drm_display_mode mode;
nouveau_bios_fp_mode(dev, &mode);
nv_connector->native_mode = drm_mode_duplicate(dev, &mode);
}
/* Find the native mode if this is a digital panel, if we didn't

View File

@ -4999,7 +4999,7 @@ typedef struct _SW_I2C_IO_DATA_PARAMETERS
#define SW_I2C_CNTL_WRITE1BIT 6
//==============================VESA definition Portion===============================
#define VESA_OEM_PRODUCT_REV '01.00'
#define VESA_OEM_PRODUCT_REV "01.00"
#define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB //refer to VBE spec p.32, no TTY support
#define VESA_MODE_WIN_ATTRIBUTE 7
#define VESA_WIN_SIZE 64

Some files were not shown because too many files have changed in this diff Show More