mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-24 04:34:08 +08:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/ethernet/emulex/benet/be_main.c drivers/net/ethernet/intel/igb/igb_main.c drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c include/net/scm.h net/batman-adv/routing.c net/ipv4/tcp_input.c The e{uid,gid} --> {uid,gid} credentials fix conflicted with the cleanup in net-next to now pass cred structs around. The be2net driver had a bug fix in 'net' that overlapped with the VLAN interface changes by Patrick McHardy in net-next. An IGB conflict existed because in 'net' the build_skb() support was reverted, and in 'net-next' there was a comment style fix within that code. Several batman-adv conflicts were resolved by making sure that all calls to batadv_is_my_mac() are changed to have a new bat_priv first argument. Eric Dumazet's TS ECR fix in TCP in 'net' conflicted with the F-RTO rewrite in 'net-next', mostly overlapping changes. Thanks to Stephen Rothwell and Antonio Quartulli for help with several of these merge resolutions. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
6e0895c2ea
@ -227,7 +227,7 @@ X!Isound/sound_firmware.c
|
||||
<chapter id="uart16x50">
|
||||
<title>16x50 UART Driver</title>
|
||||
!Edrivers/tty/serial/serial_core.c
|
||||
!Edrivers/tty/serial/8250/8250.c
|
||||
!Edrivers/tty/serial/8250/8250_core.c
|
||||
</chapter>
|
||||
|
||||
<chapter id="fbdev">
|
||||
|
@ -596,9 +596,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
||||
is selected automatically. Check
|
||||
Documentation/kdump/kdump.txt for further details.
|
||||
|
||||
crashkernel_low=size[KMG]
|
||||
[KNL, x86] parts under 4G.
|
||||
|
||||
crashkernel=range1:size1[,range2:size2,...][@offset]
|
||||
[KNL] Same as above, but depends on the memory
|
||||
in the running system. The syntax of range is
|
||||
@ -606,6 +603,26 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
||||
a memory unit (amount[KMG]). See also
|
||||
Documentation/kdump/kdump.txt for an example.
|
||||
|
||||
crashkernel=size[KMG],high
|
||||
[KNL, x86_64] range could be above 4G. Allow kernel
|
||||
to allocate physical memory region from top, so could
|
||||
be above 4G if system have more than 4G ram installed.
|
||||
Otherwise memory region will be allocated below 4G, if
|
||||
available.
|
||||
It will be ignored if crashkernel=X is specified.
|
||||
crashkernel=size[KMG],low
|
||||
[KNL, x86_64] range under 4G. When crashkernel=X,high
|
||||
is passed, kernel could allocate physical memory region
|
||||
above 4G, that cause second kernel crash on system
|
||||
that require some amount of low memory, e.g. swiotlb
|
||||
requires at least 64M+32K low memory. Kernel would
|
||||
try to allocate 72M below 4G automatically.
|
||||
This one let user to specify own low range under 4G
|
||||
for second kernel instead.
|
||||
0: to disable low allocation.
|
||||
It will be ignored when crashkernel=X,high is not used
|
||||
or memory reserved is below 4G.
|
||||
|
||||
cs89x0_dma= [HW,NET]
|
||||
Format: <dma>
|
||||
|
||||
@ -788,6 +805,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
||||
edd= [EDD]
|
||||
Format: {"off" | "on" | "skip[mbr]"}
|
||||
|
||||
efi_no_storage_paranoia [EFI; X86]
|
||||
Using this parameter you can use more than 50% of
|
||||
your efi variable storage. Use this parameter only if
|
||||
you are really sure that your UEFI does sane gc and
|
||||
fulfills the spec otherwise your board may brick.
|
||||
|
||||
eisa_irq_edge= [PARISC,HW]
|
||||
See header of drivers/parisc/eisa.c.
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
Copyright (c) 2003-2012 QLogic Corporation
|
||||
Copyright (c) 2003-2013 QLogic Corporation
|
||||
QLogic Linux FC-FCoE Driver
|
||||
|
||||
This program includes a device driver for Linux 3.x.
|
||||
|
14
MAINTAINERS
14
MAINTAINERS
@ -4941,6 +4941,12 @@ W: logfs.org
|
||||
S: Maintained
|
||||
F: fs/logfs/
|
||||
|
||||
LPC32XX MACHINE SUPPORT
|
||||
M: Roland Stigge <stigge@antcom.de>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
F: arch/arm/mach-lpc32xx/
|
||||
|
||||
LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI)
|
||||
M: Nagalakshmi Nandigama <Nagalakshmi.Nandigama@lsi.com>
|
||||
M: Sreekanth Reddy <Sreekanth.Reddy@lsi.com>
|
||||
@ -6627,7 +6633,7 @@ S: Supported
|
||||
F: fs/reiserfs/
|
||||
|
||||
REGISTER MAP ABSTRACTION
|
||||
M: Mark Brown <broonie@opensource.wolfsonmicro.com>
|
||||
M: Mark Brown <broonie@kernel.org>
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regmap.git
|
||||
S: Supported
|
||||
F: drivers/base/regmap/
|
||||
@ -7375,7 +7381,7 @@ F: sound/
|
||||
|
||||
SOUND - SOC LAYER / DYNAMIC AUDIO POWER MANAGEMENT (ASoC)
|
||||
M: Liam Girdwood <lgirdwood@gmail.com>
|
||||
M: Mark Brown <broonie@opensource.wolfsonmicro.com>
|
||||
M: Mark Brown <broonie@kernel.org>
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound.git
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
W: http://alsa-project.org/main/index.php/ASoC
|
||||
@ -7464,7 +7470,7 @@ F: drivers/clk/spear/
|
||||
|
||||
SPI SUBSYSTEM
|
||||
M: Grant Likely <grant.likely@secretlab.ca>
|
||||
M: Mark Brown <broonie@opensource.wolfsonmicro.com>
|
||||
M: Mark Brown <broonie@kernel.org>
|
||||
L: spi-devel-general@lists.sourceforge.net
|
||||
Q: http://patchwork.kernel.org/project/spi-devel-general/list/
|
||||
T: git git://git.secretlab.ca/git/linux-2.6.git
|
||||
@ -8709,7 +8715,7 @@ F: drivers/scsi/vmw_pvscsi.h
|
||||
|
||||
VOLTAGE AND CURRENT REGULATOR FRAMEWORK
|
||||
M: Liam Girdwood <lrg@ti.com>
|
||||
M: Mark Brown <broonie@opensource.wolfsonmicro.com>
|
||||
M: Mark Brown <broonie@kernel.org>
|
||||
W: http://opensource.wolfsonmicro.com/node/15
|
||||
W: http://www.slimlogic.co.uk/?p=48
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lrg/regulator.git
|
||||
|
5
Makefile
5
Makefile
@ -1,7 +1,7 @@
|
||||
VERSION = 3
|
||||
PATCHLEVEL = 9
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc5
|
||||
EXTRAVERSION = -rc8
|
||||
NAME = Unicycling Gorilla
|
||||
|
||||
# *DOCUMENTATION*
|
||||
@ -513,7 +513,8 @@ ifeq ($(KBUILD_EXTMOD),)
|
||||
# Carefully list dependencies so we do not try to build scripts twice
|
||||
# in parallel
|
||||
PHONY += scripts
|
||||
scripts: scripts_basic include/config/auto.conf include/config/tristate.conf
|
||||
scripts: scripts_basic include/config/auto.conf include/config/tristate.conf \
|
||||
asm-generic
|
||||
$(Q)$(MAKE) $(build)=$(@)
|
||||
|
||||
# Objects we will link into vmlinux / subdirs we need to visit
|
||||
|
@ -12,7 +12,7 @@ NM := $(NM) -B
|
||||
|
||||
LDFLAGS_vmlinux := -static -N #-relax
|
||||
CHECKFLAGS += -D__alpha__ -m64
|
||||
cflags-y := -pipe -mno-fp-regs -ffixed-8 -msmall-data
|
||||
cflags-y := -pipe -mno-fp-regs -ffixed-8
|
||||
cflags-y += $(call cc-option, -fno-jump-tables)
|
||||
|
||||
cpuflags-$(CONFIG_ALPHA_EV4) := -mcpu=ev4
|
||||
|
@ -26,7 +26,7 @@
|
||||
#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
|
||||
#define fd_cacheflush(addr,size) /* nothing */
|
||||
#define fd_request_irq() request_irq(FLOPPY_IRQ, floppy_interrupt,\
|
||||
IRQF_DISABLED, "floppy", NULL)
|
||||
0, "floppy", NULL)
|
||||
#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL)
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
|
@ -117,13 +117,6 @@ handle_irq(int irq)
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* From here we must proceed with IPL_MAX. Note that we do not
|
||||
* explicitly enable interrupts afterwards - some MILO PALcode
|
||||
* (namely LX164 one) seems to have severe problems with RTI
|
||||
* at IPL 0.
|
||||
*/
|
||||
local_irq_disable();
|
||||
irq_enter();
|
||||
generic_handle_irq_desc(irq, desc);
|
||||
irq_exit();
|
||||
|
@ -45,6 +45,14 @@ do_entInt(unsigned long type, unsigned long vector,
|
||||
unsigned long la_ptr, struct pt_regs *regs)
|
||||
{
|
||||
struct pt_regs *old_regs;
|
||||
|
||||
/*
|
||||
* Disable interrupts during IRQ handling.
|
||||
* Note that there is no matching local_irq_enable() due to
|
||||
* severe problems with RTI at IPL0 and some MILO PALcode
|
||||
* (namely LX164).
|
||||
*/
|
||||
local_irq_disable();
|
||||
switch (type) {
|
||||
case 0:
|
||||
#ifdef CONFIG_SMP
|
||||
@ -62,7 +70,6 @@ do_entInt(unsigned long type, unsigned long vector,
|
||||
{
|
||||
long cpu;
|
||||
|
||||
local_irq_disable();
|
||||
smp_percpu_timer_interrupt(regs);
|
||||
cpu = smp_processor_id();
|
||||
if (cpu != boot_cpuid) {
|
||||
@ -222,7 +229,6 @@ process_mcheck_info(unsigned long vector, unsigned long la_ptr,
|
||||
|
||||
struct irqaction timer_irqaction = {
|
||||
.handler = timer_interrupt,
|
||||
.flags = IRQF_DISABLED,
|
||||
.name = "timer",
|
||||
};
|
||||
|
||||
|
@ -188,6 +188,10 @@ nautilus_machine_check(unsigned long vector, unsigned long la_ptr)
|
||||
extern void free_reserved_mem(void *, void *);
|
||||
extern void pcibios_claim_one_bus(struct pci_bus *);
|
||||
|
||||
static struct resource irongate_io = {
|
||||
.name = "Irongate PCI IO",
|
||||
.flags = IORESOURCE_IO,
|
||||
};
|
||||
static struct resource irongate_mem = {
|
||||
.name = "Irongate PCI MEM",
|
||||
.flags = IORESOURCE_MEM,
|
||||
@ -209,6 +213,7 @@ nautilus_init_pci(void)
|
||||
|
||||
irongate = pci_get_bus_and_slot(0, 0);
|
||||
bus->self = irongate;
|
||||
bus->resource[0] = &irongate_io;
|
||||
bus->resource[1] = &irongate_mem;
|
||||
|
||||
pci_bus_size_bridges(bus);
|
||||
|
@ -280,15 +280,15 @@ titan_late_init(void)
|
||||
* all reported to the kernel as machine checks, so the handler
|
||||
* is a nop so it can be called to count the individual events.
|
||||
*/
|
||||
titan_request_irq(63+16, titan_intr_nop, IRQF_DISABLED,
|
||||
titan_request_irq(63+16, titan_intr_nop, 0,
|
||||
"CChip Error", NULL);
|
||||
titan_request_irq(62+16, titan_intr_nop, IRQF_DISABLED,
|
||||
titan_request_irq(62+16, titan_intr_nop, 0,
|
||||
"PChip 0 H_Error", NULL);
|
||||
titan_request_irq(61+16, titan_intr_nop, IRQF_DISABLED,
|
||||
titan_request_irq(61+16, titan_intr_nop, 0,
|
||||
"PChip 1 H_Error", NULL);
|
||||
titan_request_irq(60+16, titan_intr_nop, IRQF_DISABLED,
|
||||
titan_request_irq(60+16, titan_intr_nop, 0,
|
||||
"PChip 0 C_Error", NULL);
|
||||
titan_request_irq(59+16, titan_intr_nop, IRQF_DISABLED,
|
||||
titan_request_irq(59+16, titan_intr_nop, 0,
|
||||
"PChip 1 C_Error", NULL);
|
||||
|
||||
/*
|
||||
@ -348,9 +348,9 @@ privateer_init_pci(void)
|
||||
* Hook a couple of extra err interrupts that the
|
||||
* common titan code won't.
|
||||
*/
|
||||
titan_request_irq(53+16, titan_intr_nop, IRQF_DISABLED,
|
||||
titan_request_irq(53+16, titan_intr_nop, 0,
|
||||
"NMI", NULL);
|
||||
titan_request_irq(50+16, titan_intr_nop, IRQF_DISABLED,
|
||||
titan_request_irq(50+16, titan_intr_nop, 0,
|
||||
"Temperature Warning", NULL);
|
||||
|
||||
/*
|
||||
|
@ -39,7 +39,7 @@ static inline long arch_local_irq_save(void)
|
||||
" flag.nz %0 \n"
|
||||
: "=r"(temp), "=r"(flags)
|
||||
: "n"((STATUS_E1_MASK | STATUS_E2_MASK))
|
||||
: "cc");
|
||||
: "memory", "cc");
|
||||
|
||||
return flags;
|
||||
}
|
||||
@ -53,7 +53,8 @@ static inline void arch_local_irq_restore(unsigned long flags)
|
||||
__asm__ __volatile__(
|
||||
" flag %0 \n"
|
||||
:
|
||||
: "r"(flags));
|
||||
: "r"(flags)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
/*
|
||||
@ -73,7 +74,8 @@ static inline void arch_local_irq_disable(void)
|
||||
" and %0, %0, %1 \n"
|
||||
" flag %0 \n"
|
||||
: "=&r"(temp)
|
||||
: "n"(~(STATUS_E1_MASK | STATUS_E2_MASK)));
|
||||
: "n"(~(STATUS_E1_MASK | STATUS_E2_MASK))
|
||||
: "memory");
|
||||
}
|
||||
|
||||
/*
|
||||
@ -85,7 +87,9 @@ static inline long arch_local_save_flags(void)
|
||||
|
||||
__asm__ __volatile__(
|
||||
" lr %0, [status32] \n"
|
||||
: "=&r"(temp));
|
||||
: "=&r"(temp)
|
||||
:
|
||||
: "memory");
|
||||
|
||||
return temp;
|
||||
}
|
||||
|
@ -152,7 +152,6 @@
|
||||
i2c0: i2c@80058000 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&i2c0_pins_a>;
|
||||
clock-frequency = <400000>;
|
||||
status = "okay";
|
||||
|
||||
sgtl5000: codec@0a {
|
||||
|
@ -70,7 +70,6 @@
|
||||
i2c0: i2c@80058000 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&i2c0_pins_a>;
|
||||
clock-frequency = <400000>;
|
||||
status = "okay";
|
||||
|
||||
rtc: rtc@51 {
|
||||
|
@ -91,6 +91,7 @@
|
||||
compatible = "arm,cortex-a9-twd-timer";
|
||||
reg = <0x00a00600 0x20>;
|
||||
interrupts = <1 13 0xf01>;
|
||||
clocks = <&clks 15>;
|
||||
};
|
||||
|
||||
L2: l2-cache@00a02000 {
|
||||
|
@ -96,11 +96,11 @@
|
||||
marvell,function = "gpio";
|
||||
};
|
||||
pmx_led_rebuild_brt_ctrl_1: pmx-led-rebuild-brt-ctrl-1 {
|
||||
marvell,pins = "mpp44";
|
||||
marvell,pins = "mpp46";
|
||||
marvell,function = "gpio";
|
||||
};
|
||||
pmx_led_rebuild_brt_ctrl_2: pmx-led-rebuild-brt-ctrl-2 {
|
||||
marvell,pins = "mpp45";
|
||||
marvell,pins = "mpp47";
|
||||
marvell,function = "gpio";
|
||||
};
|
||||
|
||||
@ -157,14 +157,14 @@
|
||||
gpios = <&gpio0 16 0>;
|
||||
linux,default-trigger = "default-on";
|
||||
};
|
||||
health_led1 {
|
||||
rebuild_led {
|
||||
label = "status:white:rebuild_led";
|
||||
gpios = <&gpio1 4 0>;
|
||||
};
|
||||
health_led {
|
||||
label = "status:red:health_led";
|
||||
gpios = <&gpio1 5 0>;
|
||||
};
|
||||
health_led2 {
|
||||
label = "status:white:health_led";
|
||||
gpios = <&gpio1 4 0>;
|
||||
};
|
||||
backup_led {
|
||||
label = "status:blue:backup_led";
|
||||
gpios = <&gpio0 15 0>;
|
||||
|
@ -19,14 +19,6 @@
|
||||
#undef _CACHE
|
||||
#undef MULTI_CACHE
|
||||
|
||||
#if defined(CONFIG_CPU_CACHE_V3)
|
||||
# ifdef _CACHE
|
||||
# define MULTI_CACHE 1
|
||||
# else
|
||||
# define _CACHE v3
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_CPU_CACHE_V4)
|
||||
# ifdef _CACHE
|
||||
# define MULTI_CACHE 1
|
||||
|
@ -37,7 +37,7 @@ extern int iop3xx_get_init_atu(void);
|
||||
* IOP3XX processor registers
|
||||
*/
|
||||
#define IOP3XX_PERIPHERAL_PHYS_BASE 0xffffe000
|
||||
#define IOP3XX_PERIPHERAL_VIRT_BASE 0xfeffe000
|
||||
#define IOP3XX_PERIPHERAL_VIRT_BASE 0xfedfe000
|
||||
#define IOP3XX_PERIPHERAL_SIZE 0x00002000
|
||||
#define IOP3XX_PERIPHERAL_UPPER_PA (IOP3XX_PERIPHERAL_PHYS_BASE +\
|
||||
IOP3XX_PERIPHERAL_SIZE - 1)
|
||||
|
@ -111,7 +111,7 @@
|
||||
#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */
|
||||
#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */
|
||||
#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */
|
||||
#define L_PTE_S2_RDWR (_AT(pteval_t, 2) << 6) /* HAP[2:1] */
|
||||
#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
|
||||
|
||||
/*
|
||||
* Hyp-mode PL2 PTE definitions for LPAE.
|
||||
|
@ -14,7 +14,6 @@
|
||||
|
||||
#include <asm/glue.h>
|
||||
|
||||
#define TLB_V3_PAGE (1 << 0)
|
||||
#define TLB_V4_U_PAGE (1 << 1)
|
||||
#define TLB_V4_D_PAGE (1 << 2)
|
||||
#define TLB_V4_I_PAGE (1 << 3)
|
||||
@ -22,7 +21,6 @@
|
||||
#define TLB_V6_D_PAGE (1 << 5)
|
||||
#define TLB_V6_I_PAGE (1 << 6)
|
||||
|
||||
#define TLB_V3_FULL (1 << 8)
|
||||
#define TLB_V4_U_FULL (1 << 9)
|
||||
#define TLB_V4_D_FULL (1 << 10)
|
||||
#define TLB_V4_I_FULL (1 << 11)
|
||||
@ -52,7 +50,6 @@
|
||||
* =============
|
||||
*
|
||||
* We have the following to choose from:
|
||||
* v3 - ARMv3
|
||||
* v4 - ARMv4 without write buffer
|
||||
* v4wb - ARMv4 with write buffer without I TLB flush entry instruction
|
||||
* v4wbi - ARMv4 with write buffer with I TLB flush entry instruction
|
||||
@ -330,7 +327,6 @@ static inline void local_flush_tlb_all(void)
|
||||
if (tlb_flag(TLB_WB))
|
||||
dsb();
|
||||
|
||||
tlb_op(TLB_V3_FULL, "c6, c0, 0", zero);
|
||||
tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero);
|
||||
tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero);
|
||||
tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero);
|
||||
@ -351,9 +347,8 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
|
||||
if (tlb_flag(TLB_WB))
|
||||
dsb();
|
||||
|
||||
if (possible_tlb_flags & (TLB_V3_FULL|TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) {
|
||||
if (possible_tlb_flags & (TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) {
|
||||
if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) {
|
||||
tlb_op(TLB_V3_FULL, "c6, c0, 0", zero);
|
||||
tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero);
|
||||
tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero);
|
||||
tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero);
|
||||
@ -385,9 +380,8 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
|
||||
if (tlb_flag(TLB_WB))
|
||||
dsb();
|
||||
|
||||
if (possible_tlb_flags & (TLB_V3_PAGE|TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) &&
|
||||
if (possible_tlb_flags & (TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) &&
|
||||
cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
|
||||
tlb_op(TLB_V3_PAGE, "c6, c0, 0", uaddr);
|
||||
tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr);
|
||||
tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", uaddr);
|
||||
tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", uaddr);
|
||||
@ -418,7 +412,6 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
|
||||
if (tlb_flag(TLB_WB))
|
||||
dsb();
|
||||
|
||||
tlb_op(TLB_V3_PAGE, "c6, c0, 0", kaddr);
|
||||
tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr);
|
||||
tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr);
|
||||
tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr);
|
||||
|
@ -1043,7 +1043,7 @@ static int dbg_cpu_pm_notify(struct notifier_block *self, unsigned long action,
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static struct notifier_block __cpuinitdata dbg_cpu_pm_nb = {
|
||||
static struct notifier_block dbg_cpu_pm_nb = {
|
||||
.notifier_call = dbg_cpu_pm_notify,
|
||||
};
|
||||
|
||||
|
@ -253,7 +253,10 @@ validate_event(struct pmu_hw_events *hw_events,
|
||||
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
|
||||
struct pmu *leader_pmu = event->group_leader->pmu;
|
||||
|
||||
if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
|
||||
if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
|
||||
return 1;
|
||||
|
||||
if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
|
||||
return 1;
|
||||
|
||||
return armpmu->get_event_idx(hw_events, event) >= 0;
|
||||
|
@ -45,12 +45,12 @@ static u32 notrace jiffy_sched_clock_read(void)
|
||||
|
||||
static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read;
|
||||
|
||||
static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift)
|
||||
static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
|
||||
{
|
||||
return (cyc * mult) >> shift;
|
||||
}
|
||||
|
||||
static unsigned long long cyc_to_sched_clock(u32 cyc, u32 mask)
|
||||
static unsigned long long notrace cyc_to_sched_clock(u32 cyc, u32 mask)
|
||||
{
|
||||
u64 epoch_ns;
|
||||
u32 epoch_cyc;
|
||||
|
@ -56,7 +56,6 @@
|
||||
#include <asm/virt.h>
|
||||
|
||||
#include "atags.h"
|
||||
#include "tcm.h"
|
||||
|
||||
|
||||
#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
|
||||
@ -798,8 +797,6 @@ void __init setup_arch(char **cmdline_p)
|
||||
|
||||
reserve_crashkernel();
|
||||
|
||||
tcm_init();
|
||||
|
||||
#ifdef CONFIG_MULTI_IRQ_HANDLER
|
||||
handle_arch_irq = mdesc->handle_irq;
|
||||
#endif
|
||||
|
@ -17,7 +17,6 @@
|
||||
#include <asm/mach/map.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/system_info.h>
|
||||
#include "tcm.h"
|
||||
|
||||
static struct gen_pool *tcm_pool;
|
||||
static bool dtcm_present;
|
||||
|
@ -201,6 +201,7 @@ int kvm_dev_ioctl_check_extension(long ext)
|
||||
break;
|
||||
case KVM_CAP_ARM_SET_DEVICE_ADDR:
|
||||
r = 1;
|
||||
break;
|
||||
case KVM_CAP_NR_VCPUS:
|
||||
r = num_online_cpus();
|
||||
break;
|
||||
|
@ -79,11 +79,11 @@ static bool access_dcsw(struct kvm_vcpu *vcpu,
|
||||
u32 val;
|
||||
int cpu;
|
||||
|
||||
cpu = get_cpu();
|
||||
|
||||
if (!p->is_write)
|
||||
return read_from_write_only(vcpu, p);
|
||||
|
||||
cpu = get_cpu();
|
||||
|
||||
cpumask_setall(&vcpu->arch.require_dcache_flush);
|
||||
cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
|
||||
|
||||
|
@ -28,13 +28,11 @@ extern void secondary_startup(void);
|
||||
*/
|
||||
void __ref highbank_cpu_die(unsigned int cpu)
|
||||
{
|
||||
flush_cache_all();
|
||||
|
||||
highbank_set_cpu_jump(cpu, phys_to_virt(0));
|
||||
|
||||
flush_cache_louis();
|
||||
highbank_set_core_pwr();
|
||||
|
||||
cpu_do_idle();
|
||||
|
||||
/* We should never return from idle */
|
||||
panic("highbank: cpu %d unexpectedly exit from shutdown\n", cpu);
|
||||
while (1)
|
||||
cpu_do_idle();
|
||||
}
|
||||
|
@ -257,6 +257,7 @@ int __init mx35_clocks_init(void)
|
||||
clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0");
|
||||
clk_register_clkdev(clk[nfc_div], NULL, "imx25-nand.0");
|
||||
clk_register_clkdev(clk[csi_gate], NULL, "mx3-camera.0");
|
||||
clk_register_clkdev(clk[admux_gate], "audmux", NULL);
|
||||
|
||||
clk_prepare_enable(clk[spba_gate]);
|
||||
clk_prepare_enable(clk[gpio1_gate]);
|
||||
@ -265,6 +266,7 @@ int __init mx35_clocks_init(void)
|
||||
clk_prepare_enable(clk[iim_gate]);
|
||||
clk_prepare_enable(clk[emi_gate]);
|
||||
clk_prepare_enable(clk[max_gate]);
|
||||
clk_prepare_enable(clk[iomuxc_gate]);
|
||||
|
||||
/*
|
||||
* SCC is needed to boot via mmc after a watchdog reset. The clock code
|
||||
|
@ -115,7 +115,7 @@ static const char *gpu2d_core_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd0_352m"
|
||||
static const char *gpu3d_core_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd2_396m", };
|
||||
static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd9_720m", };
|
||||
static const char *ipu_sels[] = { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", };
|
||||
static const char *ldb_di_sels[] = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_pfd1_540m", };
|
||||
static const char *ldb_di_sels[] = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_usb_otg", };
|
||||
static const char *ipu_di_pre_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", };
|
||||
static const char *ipu1_di0_sels[] = { "ipu1_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
|
||||
static const char *ipu1_di1_sels[] = { "ipu1_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
|
||||
@ -443,7 +443,6 @@ int __init mx6q_clocks_init(void)
|
||||
|
||||
clk_register_clkdev(clk[gpt_ipg], "ipg", "imx-gpt.0");
|
||||
clk_register_clkdev(clk[gpt_ipg_per], "per", "imx-gpt.0");
|
||||
clk_register_clkdev(clk[twd], NULL, "smp_twd");
|
||||
clk_register_clkdev(clk[cko1_sel], "cko1_sel", NULL);
|
||||
clk_register_clkdev(clk[ahb], "ahb", NULL);
|
||||
clk_register_clkdev(clk[cko1], "cko1", NULL);
|
||||
|
@ -20,10 +20,15 @@ static struct mv643xx_eth_platform_data iomega_ix2_200_ge00_data = {
|
||||
.duplex = DUPLEX_FULL,
|
||||
};
|
||||
|
||||
static struct mv643xx_eth_platform_data iomega_ix2_200_ge01_data = {
|
||||
.phy_addr = MV643XX_ETH_PHY_ADDR(11),
|
||||
};
|
||||
|
||||
void __init iomega_ix2_200_init(void)
|
||||
{
|
||||
/*
|
||||
* Basic setup. Needs to be called early.
|
||||
*/
|
||||
kirkwood_ge01_init(&iomega_ix2_200_ge00_data);
|
||||
kirkwood_ge00_init(&iomega_ix2_200_ge00_data);
|
||||
kirkwood_ge01_init(&iomega_ix2_200_ge01_data);
|
||||
}
|
||||
|
@ -61,7 +61,6 @@ static struct irq_domain *armada_370_xp_mpic_domain;
|
||||
*/
|
||||
static void armada_370_xp_irq_mask(struct irq_data *d)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
irq_hw_number_t hwirq = irqd_to_hwirq(d);
|
||||
|
||||
if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
|
||||
@ -70,15 +69,10 @@ static void armada_370_xp_irq_mask(struct irq_data *d)
|
||||
else
|
||||
writel(hwirq, per_cpu_int_base +
|
||||
ARMADA_370_XP_INT_SET_MASK_OFFS);
|
||||
#else
|
||||
writel(irqd_to_hwirq(d),
|
||||
per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void armada_370_xp_irq_unmask(struct irq_data *d)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
irq_hw_number_t hwirq = irqd_to_hwirq(d);
|
||||
|
||||
if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
|
||||
@ -87,10 +81,6 @@ static void armada_370_xp_irq_unmask(struct irq_data *d)
|
||||
else
|
||||
writel(hwirq, per_cpu_int_base +
|
||||
ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
|
||||
#else
|
||||
writel(irqd_to_hwirq(d),
|
||||
per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
@ -146,7 +136,11 @@ static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
|
||||
unsigned int virq, irq_hw_number_t hw)
|
||||
{
|
||||
armada_370_xp_irq_mask(irq_get_irq_data(virq));
|
||||
writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS);
|
||||
if (hw != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
|
||||
writel(hw, per_cpu_int_base +
|
||||
ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
|
||||
else
|
||||
writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS);
|
||||
irq_set_status_flags(virq, IRQ_LEVEL);
|
||||
|
||||
if (hw == ARMADA_370_XP_TIMER0_PER_CPU_IRQ) {
|
||||
|
@ -188,10 +188,8 @@
|
||||
|
||||
#if defined(CONFIG_CPU_S3C2416)
|
||||
#define NR_IRQS (IRQ_S3C2416_I2S1 + 1)
|
||||
#elif defined(CONFIG_CPU_S3C2443)
|
||||
#define NR_IRQS (IRQ_S3C2443_AC97+1)
|
||||
#else
|
||||
#define NR_IRQS (IRQ_S3C2440_AC97+1)
|
||||
#define NR_IRQS (IRQ_S3C2443_AC97 + 1)
|
||||
#endif
|
||||
|
||||
/* compatibility define. */
|
||||
|
@ -500,7 +500,7 @@ struct s3c_irq_intc *s3c24xx_init_intc(struct device_node *np,
|
||||
base = (void *)0xfd000000;
|
||||
|
||||
intc->reg_mask = base + 0xa4;
|
||||
intc->reg_pending = base + 0x08;
|
||||
intc->reg_pending = base + 0xa8;
|
||||
irq_num = 20;
|
||||
irq_start = S3C2410_IRQ(32);
|
||||
irq_offset = 4;
|
||||
|
@ -43,7 +43,7 @@ config CPU_ARM740T
|
||||
depends on !MMU
|
||||
select CPU_32v4T
|
||||
select CPU_ABRT_LV4T
|
||||
select CPU_CACHE_V3 # although the core is v4t
|
||||
select CPU_CACHE_V4
|
||||
select CPU_CP15_MPU
|
||||
select CPU_PABRT_LEGACY
|
||||
help
|
||||
@ -469,9 +469,6 @@ config CPU_PABRT_V7
|
||||
bool
|
||||
|
||||
# The cache model
|
||||
config CPU_CACHE_V3
|
||||
bool
|
||||
|
||||
config CPU_CACHE_V4
|
||||
bool
|
||||
|
||||
|
@ -33,7 +33,6 @@ obj-$(CONFIG_CPU_PABRT_LEGACY) += pabort-legacy.o
|
||||
obj-$(CONFIG_CPU_PABRT_V6) += pabort-v6.o
|
||||
obj-$(CONFIG_CPU_PABRT_V7) += pabort-v7.o
|
||||
|
||||
obj-$(CONFIG_CPU_CACHE_V3) += cache-v3.o
|
||||
obj-$(CONFIG_CPU_CACHE_V4) += cache-v4.o
|
||||
obj-$(CONFIG_CPU_CACHE_V4WT) += cache-v4wt.o
|
||||
obj-$(CONFIG_CPU_CACHE_V4WB) += cache-v4wb.o
|
||||
|
@ -343,6 +343,7 @@ void __init feroceon_l2_init(int __l2_wt_override)
|
||||
outer_cache.inv_range = feroceon_l2_inv_range;
|
||||
outer_cache.clean_range = feroceon_l2_clean_range;
|
||||
outer_cache.flush_range = feroceon_l2_flush_range;
|
||||
outer_cache.inv_all = l2_inv_all;
|
||||
|
||||
enable_l2();
|
||||
|
||||
|
@ -1,137 +0,0 @@
|
||||
/*
|
||||
* linux/arch/arm/mm/cache-v3.S
|
||||
*
|
||||
* Copyright (C) 1997-2002 Russell king
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/init.h>
|
||||
#include <asm/page.h>
|
||||
#include "proc-macros.S"
|
||||
|
||||
/*
|
||||
* flush_icache_all()
|
||||
*
|
||||
* Unconditionally clean and invalidate the entire icache.
|
||||
*/
|
||||
ENTRY(v3_flush_icache_all)
|
||||
mov pc, lr
|
||||
ENDPROC(v3_flush_icache_all)
|
||||
|
||||
/*
|
||||
* flush_user_cache_all()
|
||||
*
|
||||
* Invalidate all cache entries in a particular address
|
||||
* space.
|
||||
*
|
||||
* - mm - mm_struct describing address space
|
||||
*/
|
||||
ENTRY(v3_flush_user_cache_all)
|
||||
/* FALLTHROUGH */
|
||||
/*
|
||||
* flush_kern_cache_all()
|
||||
*
|
||||
* Clean and invalidate the entire cache.
|
||||
*/
|
||||
ENTRY(v3_flush_kern_cache_all)
|
||||
/* FALLTHROUGH */
|
||||
|
||||
/*
|
||||
* flush_user_cache_range(start, end, flags)
|
||||
*
|
||||
* Invalidate a range of cache entries in the specified
|
||||
* address space.
|
||||
*
|
||||
* - start - start address (may not be aligned)
|
||||
* - end - end address (exclusive, may not be aligned)
|
||||
* - flags - vma_area_struct flags describing address space
|
||||
*/
|
||||
ENTRY(v3_flush_user_cache_range)
|
||||
mov ip, #0
|
||||
mcreq p15, 0, ip, c7, c0, 0 @ flush ID cache
|
||||
mov pc, lr
|
||||
|
||||
/*
|
||||
* coherent_kern_range(start, end)
|
||||
*
|
||||
* Ensure coherency between the Icache and the Dcache in the
|
||||
* region described by start. If you have non-snooping
|
||||
* Harvard caches, you need to implement this function.
|
||||
*
|
||||
* - start - virtual start address
|
||||
* - end - virtual end address
|
||||
*/
|
||||
ENTRY(v3_coherent_kern_range)
|
||||
/* FALLTHROUGH */
|
||||
|
||||
/*
|
||||
* coherent_user_range(start, end)
|
||||
*
|
||||
* Ensure coherency between the Icache and the Dcache in the
|
||||
* region described by start. If you have non-snooping
|
||||
* Harvard caches, you need to implement this function.
|
||||
*
|
||||
* - start - virtual start address
|
||||
* - end - virtual end address
|
||||
*/
|
||||
ENTRY(v3_coherent_user_range)
|
||||
mov r0, #0
|
||||
mov pc, lr
|
||||
|
||||
/*
|
||||
* flush_kern_dcache_area(void *page, size_t size)
|
||||
*
|
||||
* Ensure no D cache aliasing occurs, either with itself or
|
||||
* the I cache
|
||||
*
|
||||
* - addr - kernel address
|
||||
* - size - region size
|
||||
*/
|
||||
ENTRY(v3_flush_kern_dcache_area)
|
||||
/* FALLTHROUGH */
|
||||
|
||||
/*
|
||||
* dma_flush_range(start, end)
|
||||
*
|
||||
* Clean and invalidate the specified virtual address range.
|
||||
*
|
||||
* - start - virtual start address
|
||||
* - end - virtual end address
|
||||
*/
|
||||
ENTRY(v3_dma_flush_range)
|
||||
mov r0, #0
|
||||
mcr p15, 0, r0, c7, c0, 0 @ flush ID cache
|
||||
mov pc, lr
|
||||
|
||||
/*
|
||||
* dma_unmap_area(start, size, dir)
|
||||
* - start - kernel virtual start address
|
||||
* - size - size of region
|
||||
* - dir - DMA direction
|
||||
*/
|
||||
ENTRY(v3_dma_unmap_area)
|
||||
teq r2, #DMA_TO_DEVICE
|
||||
bne v3_dma_flush_range
|
||||
/* FALLTHROUGH */
|
||||
|
||||
/*
|
||||
* dma_map_area(start, size, dir)
|
||||
* - start - kernel virtual start address
|
||||
* - size - size of region
|
||||
* - dir - DMA direction
|
||||
*/
|
||||
ENTRY(v3_dma_map_area)
|
||||
mov pc, lr
|
||||
ENDPROC(v3_dma_unmap_area)
|
||||
ENDPROC(v3_dma_map_area)
|
||||
|
||||
.globl v3_flush_kern_cache_louis
|
||||
.equ v3_flush_kern_cache_louis, v3_flush_kern_cache_all
|
||||
|
||||
__INITDATA
|
||||
|
||||
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
|
||||
define_cache_functions v3
|
@ -58,7 +58,7 @@ ENTRY(v4_flush_kern_cache_all)
|
||||
ENTRY(v4_flush_user_cache_range)
|
||||
#ifdef CONFIG_CPU_CP15
|
||||
mov ip, #0
|
||||
mcreq p15, 0, ip, c7, c7, 0 @ flush ID cache
|
||||
mcr p15, 0, ip, c7, c7, 0 @ flush ID cache
|
||||
mov pc, lr
|
||||
#else
|
||||
/* FALLTHROUGH */
|
||||
|
@ -34,6 +34,7 @@
|
||||
#include <asm/mach/pci.h>
|
||||
|
||||
#include "mm.h"
|
||||
#include "tcm.h"
|
||||
|
||||
/*
|
||||
* empty_zero_page is a special page that is used for
|
||||
@ -1277,6 +1278,7 @@ void __init paging_init(struct machine_desc *mdesc)
|
||||
dma_contiguous_remap();
|
||||
devicemaps_init(mdesc);
|
||||
kmap_init();
|
||||
tcm_init();
|
||||
|
||||
top_pmd = pmd_off_k(0xffff0000);
|
||||
|
||||
|
@ -77,24 +77,27 @@ __arm740_setup:
|
||||
mcr p15, 0, r0, c6, c0 @ set area 0, default
|
||||
|
||||
ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM
|
||||
ldr r1, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB)
|
||||
mov r2, #10 @ 11 is the minimum (4KB)
|
||||
1: add r2, r2, #1 @ area size *= 2
|
||||
mov r1, r1, lsr #1
|
||||
ldr r3, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB)
|
||||
mov r4, #10 @ 11 is the minimum (4KB)
|
||||
1: add r4, r4, #1 @ area size *= 2
|
||||
movs r3, r3, lsr #1
|
||||
bne 1b @ count not zero r-shift
|
||||
orr r0, r0, r2, lsl #1 @ the area register value
|
||||
orr r0, r0, r4, lsl #1 @ the area register value
|
||||
orr r0, r0, #1 @ set enable bit
|
||||
mcr p15, 0, r0, c6, c1 @ set area 1, RAM
|
||||
|
||||
ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH
|
||||
ldr r1, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB)
|
||||
mov r2, #10 @ 11 is the minimum (4KB)
|
||||
1: add r2, r2, #1 @ area size *= 2
|
||||
mov r1, r1, lsr #1
|
||||
ldr r3, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB)
|
||||
cmp r3, #0
|
||||
moveq r0, #0
|
||||
beq 2f
|
||||
mov r4, #10 @ 11 is the minimum (4KB)
|
||||
1: add r4, r4, #1 @ area size *= 2
|
||||
movs r3, r3, lsr #1
|
||||
bne 1b @ count not zero r-shift
|
||||
orr r0, r0, r2, lsl #1 @ the area register value
|
||||
orr r0, r0, r4, lsl #1 @ the area register value
|
||||
orr r0, r0, #1 @ set enable bit
|
||||
mcr p15, 0, r0, c6, c2 @ set area 2, ROM/FLASH
|
||||
2: mcr p15, 0, r0, c6, c2 @ set area 2, ROM/FLASH
|
||||
|
||||
mov r0, #0x06
|
||||
mcr p15, 0, r0, c2, c0 @ Region 1&2 cacheable
|
||||
@ -137,13 +140,14 @@ __arm740_proc_info:
|
||||
.long 0x41807400
|
||||
.long 0xfffffff0
|
||||
.long 0
|
||||
.long 0
|
||||
b __arm740_setup
|
||||
.long cpu_arch_name
|
||||
.long cpu_elf_name
|
||||
.long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT
|
||||
.long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_26BIT
|
||||
.long cpu_arm740_name
|
||||
.long arm740_processor_functions
|
||||
.long 0
|
||||
.long 0
|
||||
.long v3_cache_fns @ cache model
|
||||
.long v4_cache_fns @ cache model
|
||||
.size __arm740_proc_info, . - __arm740_proc_info
|
||||
|
@ -387,7 +387,7 @@ ENTRY(cpu_arm920_set_pte_ext)
|
||||
/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
|
||||
.globl cpu_arm920_suspend_size
|
||||
.equ cpu_arm920_suspend_size, 4 * 3
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
#ifdef CONFIG_ARM_CPU_SUSPEND
|
||||
ENTRY(cpu_arm920_do_suspend)
|
||||
stmfd sp!, {r4 - r6, lr}
|
||||
mrc p15, 0, r4, c13, c0, 0 @ PID
|
||||
|
@ -402,7 +402,7 @@ ENTRY(cpu_arm926_set_pte_ext)
|
||||
/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
|
||||
.globl cpu_arm926_suspend_size
|
||||
.equ cpu_arm926_suspend_size, 4 * 3
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
#ifdef CONFIG_ARM_CPU_SUSPEND
|
||||
ENTRY(cpu_arm926_do_suspend)
|
||||
stmfd sp!, {r4 - r6, lr}
|
||||
mrc p15, 0, r4, c13, c0, 0 @ PID
|
||||
|
@ -350,7 +350,7 @@ ENTRY(cpu_mohawk_set_pte_ext)
|
||||
|
||||
.globl cpu_mohawk_suspend_size
|
||||
.equ cpu_mohawk_suspend_size, 4 * 6
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
#ifdef CONFIG_ARM_CPU_SUSPEND
|
||||
ENTRY(cpu_mohawk_do_suspend)
|
||||
stmfd sp!, {r4 - r9, lr}
|
||||
mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
|
||||
|
@ -172,7 +172,7 @@ ENTRY(cpu_sa1100_set_pte_ext)
|
||||
|
||||
.globl cpu_sa1100_suspend_size
|
||||
.equ cpu_sa1100_suspend_size, 4 * 3
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
#ifdef CONFIG_ARM_CPU_SUSPEND
|
||||
ENTRY(cpu_sa1100_do_suspend)
|
||||
stmfd sp!, {r4 - r6, lr}
|
||||
mrc p15, 0, r4, c3, c0, 0 @ domain ID
|
||||
|
@ -17,7 +17,9 @@
|
||||
|
||||
#ifndef MULTI_CPU
|
||||
EXPORT_SYMBOL(cpu_dcache_clean_area);
|
||||
#ifdef CONFIG_MMU
|
||||
EXPORT_SYMBOL(cpu_set_pte_ext);
|
||||
#endif
|
||||
#else
|
||||
EXPORT_SYMBOL(processor);
|
||||
#endif
|
||||
|
@ -138,7 +138,7 @@ ENTRY(cpu_v6_set_pte_ext)
|
||||
/* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */
|
||||
.globl cpu_v6_suspend_size
|
||||
.equ cpu_v6_suspend_size, 4 * 6
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
#ifdef CONFIG_ARM_CPU_SUSPEND
|
||||
ENTRY(cpu_v6_do_suspend)
|
||||
stmfd sp!, {r4 - r9, lr}
|
||||
mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
|
||||
|
@ -413,7 +413,7 @@ ENTRY(cpu_xsc3_set_pte_ext)
|
||||
|
||||
.globl cpu_xsc3_suspend_size
|
||||
.equ cpu_xsc3_suspend_size, 4 * 6
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
#ifdef CONFIG_ARM_CPU_SUSPEND
|
||||
ENTRY(cpu_xsc3_do_suspend)
|
||||
stmfd sp!, {r4 - r9, lr}
|
||||
mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
|
||||
|
@ -528,7 +528,7 @@ ENTRY(cpu_xscale_set_pte_ext)
|
||||
|
||||
.globl cpu_xscale_suspend_size
|
||||
.equ cpu_xscale_suspend_size, 4 * 6
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
#ifdef CONFIG_ARM_CPU_SUSPEND
|
||||
ENTRY(cpu_xscale_do_suspend)
|
||||
stmfd sp!, {r4 - r9, lr}
|
||||
mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
|
||||
|
@ -165,6 +165,10 @@ BUILDIO_IOPORT(l, u32)
|
||||
#define readw_be __raw_readw
|
||||
#define readl_be __raw_readl
|
||||
|
||||
#define writeb_relaxed writeb
|
||||
#define writew_relaxed writew
|
||||
#define writel_relaxed writel
|
||||
|
||||
#define writeb_be __raw_writeb
|
||||
#define writew_be __raw_writew
|
||||
#define writel_be __raw_writel
|
||||
|
@ -27,7 +27,7 @@ static inline unsigned long arch_local_save_flags(void)
|
||||
/* set interrupt enabled status */
|
||||
static inline void arch_local_irq_restore(unsigned long flags)
|
||||
{
|
||||
asm volatile (" mvc .s2 %0,CSR\n" : : "b"(flags));
|
||||
asm volatile (" mvc .s2 %0,CSR\n" : : "b"(flags) : "memory");
|
||||
}
|
||||
|
||||
/* unconditionally enable interrupts */
|
||||
|
@ -849,17 +849,6 @@ static palinfo_entry_t palinfo_entries[]={
|
||||
|
||||
#define NR_PALINFO_ENTRIES (int) ARRAY_SIZE(palinfo_entries)
|
||||
|
||||
/*
|
||||
* this array is used to keep track of the proc entries we create. This is
|
||||
* required in the module mode when we need to remove all entries. The procfs code
|
||||
* does not do recursion of deletion
|
||||
*
|
||||
* Notes:
|
||||
* - +1 accounts for the cpuN directory entry in /proc/pal
|
||||
*/
|
||||
#define NR_PALINFO_PROC_ENTRIES (NR_CPUS*(NR_PALINFO_ENTRIES+1))
|
||||
|
||||
static struct proc_dir_entry *palinfo_proc_entries[NR_PALINFO_PROC_ENTRIES];
|
||||
static struct proc_dir_entry *palinfo_dir;
|
||||
|
||||
/*
|
||||
@ -971,60 +960,32 @@ palinfo_read_entry(char *page, char **start, off_t off, int count, int *eof, voi
|
||||
static void __cpuinit
|
||||
create_palinfo_proc_entries(unsigned int cpu)
|
||||
{
|
||||
# define CPUSTR "cpu%d"
|
||||
|
||||
pal_func_cpu_u_t f;
|
||||
struct proc_dir_entry **pdir;
|
||||
struct proc_dir_entry *cpu_dir;
|
||||
int j;
|
||||
char cpustr[sizeof(CPUSTR)];
|
||||
|
||||
|
||||
/*
|
||||
* we keep track of created entries in a depth-first order for
|
||||
* cleanup purposes. Each entry is stored into palinfo_proc_entries
|
||||
*/
|
||||
sprintf(cpustr,CPUSTR, cpu);
|
||||
char cpustr[3+4+1]; /* cpu numbers are up to 4095 on itanic */
|
||||
sprintf(cpustr, "cpu%d", cpu);
|
||||
|
||||
cpu_dir = proc_mkdir(cpustr, palinfo_dir);
|
||||
if (!cpu_dir)
|
||||
return;
|
||||
|
||||
f.req_cpu = cpu;
|
||||
|
||||
/*
|
||||
* Compute the location to store per cpu entries
|
||||
* We dont store the top level entry in this list, but
|
||||
* remove it finally after removing all cpu entries.
|
||||
*/
|
||||
pdir = &palinfo_proc_entries[cpu*(NR_PALINFO_ENTRIES+1)];
|
||||
*pdir++ = cpu_dir;
|
||||
for (j=0; j < NR_PALINFO_ENTRIES; j++) {
|
||||
f.func_id = j;
|
||||
*pdir = create_proc_read_entry(
|
||||
palinfo_entries[j].name, 0, cpu_dir,
|
||||
palinfo_read_entry, (void *)f.value);
|
||||
pdir++;
|
||||
create_proc_read_entry(
|
||||
palinfo_entries[j].name, 0, cpu_dir,
|
||||
palinfo_read_entry, (void *)f.value);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
remove_palinfo_proc_entries(unsigned int hcpu)
|
||||
{
|
||||
int j;
|
||||
struct proc_dir_entry *cpu_dir, **pdir;
|
||||
|
||||
pdir = &palinfo_proc_entries[hcpu*(NR_PALINFO_ENTRIES+1)];
|
||||
cpu_dir = *pdir;
|
||||
*pdir++=NULL;
|
||||
for (j=0; j < (NR_PALINFO_ENTRIES); j++) {
|
||||
if ((*pdir)) {
|
||||
remove_proc_entry ((*pdir)->name, cpu_dir);
|
||||
*pdir ++= NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if (cpu_dir) {
|
||||
remove_proc_entry(cpu_dir->name, palinfo_dir);
|
||||
}
|
||||
char cpustr[3+4+1]; /* cpu numbers are up to 4095 on itanic */
|
||||
sprintf(cpustr, "cpu%d", hcpu);
|
||||
remove_proc_subtree(cpustr, palinfo_dir);
|
||||
}
|
||||
|
||||
static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb,
|
||||
@ -1058,6 +1019,8 @@ palinfo_init(void)
|
||||
|
||||
printk(KERN_INFO "PAL Information Facility v%s\n", PALINFO_VERSION);
|
||||
palinfo_dir = proc_mkdir("pal", NULL);
|
||||
if (!palinfo_dir)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Create palinfo dirs in /proc for all online cpus */
|
||||
for_each_online_cpu(i) {
|
||||
@ -1073,22 +1036,8 @@ palinfo_init(void)
|
||||
static void __exit
|
||||
palinfo_exit(void)
|
||||
{
|
||||
int i = 0;
|
||||
|
||||
/* remove all nodes: depth first pass. Could optimize this */
|
||||
for_each_online_cpu(i) {
|
||||
remove_palinfo_proc_entries(i);
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove the top level entry finally
|
||||
*/
|
||||
remove_proc_entry(palinfo_dir->name, NULL);
|
||||
|
||||
/*
|
||||
* Unregister from cpu notifier callbacks
|
||||
*/
|
||||
unregister_hotcpu_notifier(&palinfo_cpu_notifier);
|
||||
remove_proc_subtree("pal", NULL);
|
||||
}
|
||||
|
||||
module_init(palinfo_init);
|
||||
|
@ -86,4 +86,24 @@ static inline int gpio_cansleep(unsigned gpio)
|
||||
return gpio < MCFGPIO_PIN_MAX ? 0 : __gpio_cansleep(gpio);
|
||||
}
|
||||
|
||||
static inline int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = gpio_request(gpio, label);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (flags & GPIOF_DIR_IN)
|
||||
err = gpio_direction_input(gpio);
|
||||
else
|
||||
err = gpio_direction_output(gpio,
|
||||
(flags & GPIOF_INIT_HIGH) ? 1 : 0);
|
||||
|
||||
if (err)
|
||||
gpio_free(gpio);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -304,7 +304,7 @@ syscall_exit_work:
|
||||
subi r12,r12,TI_FLAGS
|
||||
|
||||
4: /* Anything else left to do? */
|
||||
SET_DEFAULT_THREAD_PPR(r3, r9) /* Set thread.ppr = 3 */
|
||||
SET_DEFAULT_THREAD_PPR(r3, r10) /* Set thread.ppr = 3 */
|
||||
andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
|
||||
beq .ret_from_except_lite
|
||||
|
||||
@ -657,7 +657,7 @@ resume_kernel:
|
||||
/* Clear _TIF_EMULATE_STACK_STORE flag */
|
||||
lis r11,_TIF_EMULATE_STACK_STORE@h
|
||||
addi r5,r9,TI_FLAGS
|
||||
ldarx r4,0,r5
|
||||
0: ldarx r4,0,r5
|
||||
andc r4,r4,r11
|
||||
stdcx. r4,0,r5
|
||||
bne- 0b
|
||||
|
@ -555,10 +555,12 @@ static inline void tm_recheckpoint_new_task(struct task_struct *new)
|
||||
new->thread.regs->msr |=
|
||||
(MSR_FP | new->thread.fpexc_mode);
|
||||
}
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
if (msr & MSR_VEC) {
|
||||
do_load_up_transact_altivec(&new->thread);
|
||||
new->thread.regs->msr |= MSR_VEC;
|
||||
}
|
||||
#endif
|
||||
/* We may as well turn on VSX too since all the state is restored now */
|
||||
if (msr & MSR_VSX)
|
||||
new->thread.regs->msr |= MSR_VSX;
|
||||
|
@ -866,10 +866,12 @@ static long restore_tm_user_regs(struct pt_regs *regs,
|
||||
do_load_up_transact_fpu(¤t->thread);
|
||||
regs->msr |= (MSR_FP | current->thread.fpexc_mode);
|
||||
}
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
if (msr & MSR_VEC) {
|
||||
do_load_up_transact_altivec(¤t->thread);
|
||||
regs->msr |= MSR_VEC;
|
||||
}
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -522,10 +522,12 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
|
||||
do_load_up_transact_fpu(¤t->thread);
|
||||
regs->msr |= (MSR_FP | current->thread.fpexc_mode);
|
||||
}
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
if (msr & MSR_VEC) {
|
||||
do_load_up_transact_altivec(¤t->thread);
|
||||
regs->msr |= MSR_VEC;
|
||||
}
|
||||
#endif
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -309,6 +309,7 @@ _GLOBAL(tm_recheckpoint)
|
||||
or r5, r6, r5 /* Set MSR.FP+.VSX/.VEC */
|
||||
mtmsr r5
|
||||
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
/* FP and VEC registers: These are recheckpointed from thread.fpr[]
|
||||
* and thread.vr[] respectively. The thread.transact_fpr[] version
|
||||
* is more modern, and will be loaded subsequently by any FPUnavailable
|
||||
@ -323,6 +324,7 @@ _GLOBAL(tm_recheckpoint)
|
||||
REST_32VRS(0, r5, r3) /* r5 scratch, r3 THREAD ptr */
|
||||
ld r5, THREAD_VRSAVE(r3)
|
||||
mtspr SPRN_VRSAVE, r5
|
||||
#endif
|
||||
|
||||
dont_restore_vec:
|
||||
andi. r0, r4, MSR_FP
|
||||
|
@ -26,17 +26,20 @@
|
||||
#define E500_PID_NUM 3
|
||||
#define E500_TLB_NUM 2
|
||||
|
||||
#define E500_TLB_VALID 1
|
||||
#define E500_TLB_BITMAP 2
|
||||
/* entry is mapped somewhere in host TLB */
|
||||
#define E500_TLB_VALID (1 << 0)
|
||||
/* TLB1 entry is mapped by host TLB1, tracked by bitmaps */
|
||||
#define E500_TLB_BITMAP (1 << 1)
|
||||
/* TLB1 entry is mapped by host TLB0 */
|
||||
#define E500_TLB_TLB0 (1 << 2)
|
||||
|
||||
struct tlbe_ref {
|
||||
pfn_t pfn;
|
||||
unsigned int flags; /* E500_TLB_* */
|
||||
pfn_t pfn; /* valid only for TLB0, except briefly */
|
||||
unsigned int flags; /* E500_TLB_* */
|
||||
};
|
||||
|
||||
struct tlbe_priv {
|
||||
struct tlbe_ref ref; /* TLB0 only -- TLB1 uses tlb_refs */
|
||||
struct tlbe_ref ref;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_KVM_E500V2
|
||||
@ -63,17 +66,6 @@ struct kvmppc_vcpu_e500 {
|
||||
|
||||
unsigned int gtlb_nv[E500_TLB_NUM];
|
||||
|
||||
/*
|
||||
* information associated with each host TLB entry --
|
||||
* TLB1 only for now. If/when guest TLB1 entries can be
|
||||
* mapped with host TLB0, this will be used for that too.
|
||||
*
|
||||
* We don't want to use this for guest TLB0 because then we'd
|
||||
* have the overhead of doing the translation again even if
|
||||
* the entry is still in the guest TLB (e.g. we swapped out
|
||||
* and back, and our host TLB entries got evicted).
|
||||
*/
|
||||
struct tlbe_ref *tlb_refs[E500_TLB_NUM];
|
||||
unsigned int host_tlb1_nv;
|
||||
|
||||
u32 svr;
|
||||
|
@ -193,8 +193,11 @@ void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
|
||||
struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref;
|
||||
|
||||
/* Don't bother with unmapped entries */
|
||||
if (!(ref->flags & E500_TLB_VALID))
|
||||
return;
|
||||
if (!(ref->flags & E500_TLB_VALID)) {
|
||||
WARN(ref->flags & (E500_TLB_BITMAP | E500_TLB_TLB0),
|
||||
"%s: flags %x\n", __func__, ref->flags);
|
||||
WARN_ON(tlbsel == 1 && vcpu_e500->g2h_tlb1_map[esel]);
|
||||
}
|
||||
|
||||
if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) {
|
||||
u64 tmp = vcpu_e500->g2h_tlb1_map[esel];
|
||||
@ -248,7 +251,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
|
||||
pfn_t pfn)
|
||||
{
|
||||
ref->pfn = pfn;
|
||||
ref->flags = E500_TLB_VALID;
|
||||
ref->flags |= E500_TLB_VALID;
|
||||
|
||||
if (tlbe_is_writable(gtlbe))
|
||||
kvm_set_pfn_dirty(pfn);
|
||||
@ -257,6 +260,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
|
||||
static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
|
||||
{
|
||||
if (ref->flags & E500_TLB_VALID) {
|
||||
/* FIXME: don't log bogus pfn for TLB1 */
|
||||
trace_kvm_booke206_ref_release(ref->pfn, ref->flags);
|
||||
ref->flags = 0;
|
||||
}
|
||||
@ -274,36 +278,23 @@ static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
|
||||
|
||||
static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
|
||||
{
|
||||
int tlbsel = 0;
|
||||
int tlbsel;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
|
||||
struct tlbe_ref *ref =
|
||||
&vcpu_e500->gtlb_priv[tlbsel][i].ref;
|
||||
kvmppc_e500_ref_release(ref);
|
||||
for (tlbsel = 0; tlbsel <= 1; tlbsel++) {
|
||||
for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
|
||||
struct tlbe_ref *ref =
|
||||
&vcpu_e500->gtlb_priv[tlbsel][i].ref;
|
||||
kvmppc_e500_ref_release(ref);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500)
|
||||
{
|
||||
int stlbsel = 1;
|
||||
int i;
|
||||
|
||||
kvmppc_e500_tlbil_all(vcpu_e500);
|
||||
|
||||
for (i = 0; i < host_tlb_params[stlbsel].entries; i++) {
|
||||
struct tlbe_ref *ref =
|
||||
&vcpu_e500->tlb_refs[stlbsel][i];
|
||||
kvmppc_e500_ref_release(ref);
|
||||
}
|
||||
|
||||
clear_tlb_privs(vcpu_e500);
|
||||
}
|
||||
|
||||
void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
|
||||
clear_tlb_refs(vcpu_e500);
|
||||
kvmppc_e500_tlbil_all(vcpu_e500);
|
||||
clear_tlb_privs(vcpu_e500);
|
||||
clear_tlb1_bitmap(vcpu_e500);
|
||||
}
|
||||
|
||||
@ -458,8 +449,6 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
|
||||
gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
|
||||
}
|
||||
|
||||
/* Drop old ref and setup new one. */
|
||||
kvmppc_e500_ref_release(ref);
|
||||
kvmppc_e500_ref_setup(ref, gtlbe, pfn);
|
||||
|
||||
kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
|
||||
@ -507,14 +496,15 @@ static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500,
|
||||
if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size()))
|
||||
vcpu_e500->host_tlb1_nv = 0;
|
||||
|
||||
vcpu_e500->tlb_refs[1][sesel] = *ref;
|
||||
vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
|
||||
vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
|
||||
if (vcpu_e500->h2g_tlb1_rmap[sesel]) {
|
||||
unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel];
|
||||
unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel] - 1;
|
||||
vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel);
|
||||
}
|
||||
vcpu_e500->h2g_tlb1_rmap[sesel] = esel;
|
||||
|
||||
vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
|
||||
vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
|
||||
vcpu_e500->h2g_tlb1_rmap[sesel] = esel + 1;
|
||||
WARN_ON(!(ref->flags & E500_TLB_VALID));
|
||||
|
||||
return sesel;
|
||||
}
|
||||
@ -526,13 +516,12 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
|
||||
u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
|
||||
struct kvm_book3e_206_tlb_entry *stlbe, int esel)
|
||||
{
|
||||
struct tlbe_ref ref;
|
||||
struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[1][esel].ref;
|
||||
int sesel;
|
||||
int r;
|
||||
|
||||
ref.flags = 0;
|
||||
r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe,
|
||||
&ref);
|
||||
ref);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
@ -544,7 +533,7 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
|
||||
}
|
||||
|
||||
/* Otherwise map into TLB1 */
|
||||
sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, &ref, esel);
|
||||
sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, ref, esel);
|
||||
write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel);
|
||||
|
||||
return 0;
|
||||
@ -565,7 +554,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
|
||||
case 0:
|
||||
priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
|
||||
|
||||
/* Triggers after clear_tlb_refs or on initial mapping */
|
||||
/* Triggers after clear_tlb_privs or on initial mapping */
|
||||
if (!(priv->ref.flags & E500_TLB_VALID)) {
|
||||
kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
|
||||
} else {
|
||||
@ -665,35 +654,16 @@ int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500)
|
||||
host_tlb_params[0].entries / host_tlb_params[0].ways;
|
||||
host_tlb_params[1].sets = 1;
|
||||
|
||||
vcpu_e500->tlb_refs[0] =
|
||||
kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[0].entries,
|
||||
GFP_KERNEL);
|
||||
if (!vcpu_e500->tlb_refs[0])
|
||||
goto err;
|
||||
|
||||
vcpu_e500->tlb_refs[1] =
|
||||
kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[1].entries,
|
||||
GFP_KERNEL);
|
||||
if (!vcpu_e500->tlb_refs[1])
|
||||
goto err;
|
||||
|
||||
vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) *
|
||||
host_tlb_params[1].entries,
|
||||
GFP_KERNEL);
|
||||
if (!vcpu_e500->h2g_tlb1_rmap)
|
||||
goto err;
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
kfree(vcpu_e500->tlb_refs[0]);
|
||||
kfree(vcpu_e500->tlb_refs[1]);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
|
||||
{
|
||||
kfree(vcpu_e500->h2g_tlb1_rmap);
|
||||
kfree(vcpu_e500->tlb_refs[0]);
|
||||
kfree(vcpu_e500->tlb_refs[1]);
|
||||
}
|
||||
|
@ -108,6 +108,8 @@ void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
|
||||
{
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu_on_cpu);
|
||||
|
||||
void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
{
|
||||
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
|
||||
@ -136,8 +138,11 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
mtspr(SPRN_GDEAR, vcpu->arch.shared->dar);
|
||||
mtspr(SPRN_GESR, vcpu->arch.shared->esr);
|
||||
|
||||
if (vcpu->arch.oldpir != mfspr(SPRN_PIR))
|
||||
if (vcpu->arch.oldpir != mfspr(SPRN_PIR) ||
|
||||
__get_cpu_var(last_vcpu_on_cpu) != vcpu) {
|
||||
kvmppc_e500_tlbil_all(vcpu_e500);
|
||||
__get_cpu_var(last_vcpu_on_cpu) = vcpu;
|
||||
}
|
||||
|
||||
kvmppc_load_guest_fp(vcpu);
|
||||
}
|
||||
|
@ -186,7 +186,13 @@ static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
|
||||
(0x1UL << 4), &dummy1, &dummy2);
|
||||
if (lpar_rc == H_SUCCESS)
|
||||
return i;
|
||||
BUG_ON(lpar_rc != H_NOT_FOUND);
|
||||
|
||||
/*
|
||||
* The test for adjunct partition is performed before the
|
||||
* ANDCOND test. H_RESOURCE may be returned, so we need to
|
||||
* check for that as well.
|
||||
*/
|
||||
BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE);
|
||||
|
||||
slot_offset++;
|
||||
slot_offset &= 0x7;
|
||||
|
@ -50,10 +50,6 @@ void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
|
||||
#define ioremap_nocache(addr, size) ioremap(addr, size)
|
||||
#define ioremap_wc ioremap_nocache
|
||||
|
||||
/* TODO: s390 cannot support io_remap_pfn_range... */
|
||||
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
|
||||
remap_pfn_range(vma, vaddr, pfn, size, prot)
|
||||
|
||||
static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
|
||||
{
|
||||
return (void __iomem *) offset;
|
||||
|
@ -57,6 +57,10 @@ extern unsigned long zero_page_mask;
|
||||
(((unsigned long)(vaddr)) &zero_page_mask))))
|
||||
#define __HAVE_COLOR_ZERO_PAGE
|
||||
|
||||
/* TODO: s390 cannot support io_remap_pfn_range... */
|
||||
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
|
||||
remap_pfn_range(vma, vaddr, pfn, size, prot)
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
/*
|
||||
|
@ -2,11 +2,16 @@
|
||||
|
||||
|
||||
generic-y += clkdev.h
|
||||
generic-y += cputime.h
|
||||
generic-y += div64.h
|
||||
generic-y += emergency-restart.h
|
||||
generic-y += exec.h
|
||||
generic-y += local64.h
|
||||
generic-y += mutex.h
|
||||
generic-y += irq_regs.h
|
||||
generic-y += local.h
|
||||
generic-y += module.h
|
||||
generic-y += serial.h
|
||||
generic-y += trace_clock.h
|
||||
generic-y += types.h
|
||||
generic-y += word-at-a-time.h
|
||||
|
@ -1,6 +0,0 @@
|
||||
#ifndef __SPARC_CPUTIME_H
|
||||
#define __SPARC_CPUTIME_H
|
||||
|
||||
#include <asm-generic/cputime.h>
|
||||
|
||||
#endif /* __SPARC_CPUTIME_H */
|
@ -1,6 +0,0 @@
|
||||
#ifndef _ASM_EMERGENCY_RESTART_H
|
||||
#define _ASM_EMERGENCY_RESTART_H
|
||||
|
||||
#include <asm-generic/emergency-restart.h>
|
||||
|
||||
#endif /* _ASM_EMERGENCY_RESTART_H */
|
@ -1,9 +0,0 @@
|
||||
/*
|
||||
* Pull in the generic implementation for the mutex fastpath.
|
||||
*
|
||||
* TODO: implement optimized primitives instead, or leave the generic
|
||||
* implementation in place, or pick the atomic_xchg() based generic
|
||||
* implementation. (see asm-generic/mutex-xchg.h for details)
|
||||
*/
|
||||
|
||||
#include <asm-generic/mutex-dec.h>
|
@ -915,6 +915,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
|
||||
return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
|
||||
}
|
||||
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm-generic/pgtable.h>
|
||||
|
||||
/* We provide our own get_unmapped_area to cope with VA holes and
|
||||
|
@ -1,6 +0,0 @@
|
||||
#ifndef __SPARC_SERIAL_H
|
||||
#define __SPARC_SERIAL_H
|
||||
|
||||
#define BASE_BAUD ( 1843200 / 16 )
|
||||
|
||||
#endif /* __SPARC_SERIAL_H */
|
@ -36,7 +36,6 @@ typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long,
|
||||
unsigned long, unsigned long);
|
||||
|
||||
void cpu_panic(void);
|
||||
extern void smp4m_irq_rotate(int cpu);
|
||||
|
||||
/*
|
||||
* General functions that each host system must provide.
|
||||
@ -46,7 +45,6 @@ void sun4m_init_smp(void);
|
||||
void sun4d_init_smp(void);
|
||||
|
||||
void smp_callin(void);
|
||||
void smp_boot_cpus(void);
|
||||
void smp_store_cpu_info(int);
|
||||
|
||||
void smp_resched_interrupt(void);
|
||||
@ -107,9 +105,6 @@ extern int hard_smp_processor_id(void);
|
||||
|
||||
#define raw_smp_processor_id() (current_thread_info()->cpu)
|
||||
|
||||
#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
|
||||
#define prof_counter(__cpu) cpu_data(__cpu).counter
|
||||
|
||||
void smp_setup_cpu_possible_map(void);
|
||||
|
||||
#endif /* !(__ASSEMBLY__) */
|
||||
|
@ -18,8 +18,7 @@ do { \
|
||||
* and 2 stores in this critical code path. -DaveM
|
||||
*/
|
||||
#define switch_to(prev, next, last) \
|
||||
do { flush_tlb_pending(); \
|
||||
save_and_clear_fpu(); \
|
||||
do { save_and_clear_fpu(); \
|
||||
/* If you are tempted to conditionalize the following */ \
|
||||
/* so that ASI is only written if it changes, think again. */ \
|
||||
__asm__ __volatile__("wr %%g0, %0, %%asi" \
|
||||
|
@ -11,24 +11,40 @@
|
||||
struct tlb_batch {
|
||||
struct mm_struct *mm;
|
||||
unsigned long tlb_nr;
|
||||
unsigned long active;
|
||||
unsigned long vaddrs[TLB_BATCH_NR];
|
||||
};
|
||||
|
||||
extern void flush_tsb_kernel_range(unsigned long start, unsigned long end);
|
||||
extern void flush_tsb_user(struct tlb_batch *tb);
|
||||
extern void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr);
|
||||
|
||||
/* TLB flush operations. */
|
||||
|
||||
extern void flush_tlb_pending(void);
|
||||
static inline void flush_tlb_mm(struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
|
||||
#define flush_tlb_range(vma,start,end) \
|
||||
do { (void)(start); flush_tlb_pending(); } while (0)
|
||||
#define flush_tlb_page(vma,addr) flush_tlb_pending()
|
||||
#define flush_tlb_mm(mm) flush_tlb_pending()
|
||||
static inline void flush_tlb_page(struct vm_area_struct *vma,
|
||||
unsigned long vmaddr)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void flush_tlb_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
|
||||
|
||||
extern void flush_tlb_pending(void);
|
||||
extern void arch_enter_lazy_mmu_mode(void);
|
||||
extern void arch_leave_lazy_mmu_mode(void);
|
||||
#define arch_flush_lazy_mmu_mode() do {} while (0)
|
||||
|
||||
/* Local cpu only. */
|
||||
extern void __flush_tlb_all(void);
|
||||
|
||||
extern void __flush_tlb_page(unsigned long context, unsigned long vaddr);
|
||||
extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
@ -38,15 +54,24 @@ do { flush_tsb_kernel_range(start,end); \
|
||||
__flush_tlb_kernel_range(start,end); \
|
||||
} while (0)
|
||||
|
||||
static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
|
||||
{
|
||||
__flush_tlb_page(CTX_HWBITS(mm->context), vaddr);
|
||||
}
|
||||
|
||||
#else /* CONFIG_SMP */
|
||||
|
||||
extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
|
||||
extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
|
||||
|
||||
#define flush_tlb_kernel_range(start, end) \
|
||||
do { flush_tsb_kernel_range(start,end); \
|
||||
smp_flush_tlb_kernel_range(start, end); \
|
||||
} while (0)
|
||||
|
||||
#define global_flush_tlb_page(mm, vaddr) \
|
||||
smp_flush_tlb_page(mm, vaddr)
|
||||
|
||||
#endif /* ! CONFIG_SMP */
|
||||
|
||||
#endif /* _SPARC64_TLBFLUSH_H */
|
||||
|
@ -44,7 +44,6 @@ header-y += swab.h
|
||||
header-y += termbits.h
|
||||
header-y += termios.h
|
||||
header-y += traps.h
|
||||
header-y += types.h
|
||||
header-y += uctx.h
|
||||
header-y += unistd.h
|
||||
header-y += utrap.h
|
||||
|
@ -1,17 +0,0 @@
|
||||
#ifndef _SPARC_TYPES_H
|
||||
#define _SPARC_TYPES_H
|
||||
/*
|
||||
* This file is never included by application software unless
|
||||
* explicitly requested (e.g., via linux/types.h) in which case the
|
||||
* application is Linux specific so (user-) name space pollution is
|
||||
* not a major issue. However, for interoperability, libraries still
|
||||
* need to be careful to avoid a name clashes.
|
||||
*/
|
||||
|
||||
#if defined(__sparc__)
|
||||
|
||||
#include <asm-generic/int-ll64.h>
|
||||
|
||||
#endif /* defined(__sparc__) */
|
||||
|
||||
#endif /* defined(_SPARC_TYPES_H) */
|
@ -849,7 +849,7 @@ void smp_tsb_sync(struct mm_struct *mm)
|
||||
}
|
||||
|
||||
extern unsigned long xcall_flush_tlb_mm;
|
||||
extern unsigned long xcall_flush_tlb_pending;
|
||||
extern unsigned long xcall_flush_tlb_page;
|
||||
extern unsigned long xcall_flush_tlb_kernel_range;
|
||||
extern unsigned long xcall_fetch_glob_regs;
|
||||
extern unsigned long xcall_fetch_glob_pmu;
|
||||
@ -1074,19 +1074,52 @@ local_flush_and_out:
|
||||
put_cpu();
|
||||
}
|
||||
|
||||
struct tlb_pending_info {
|
||||
unsigned long ctx;
|
||||
unsigned long nr;
|
||||
unsigned long *vaddrs;
|
||||
};
|
||||
|
||||
static void tlb_pending_func(void *info)
|
||||
{
|
||||
struct tlb_pending_info *t = info;
|
||||
|
||||
__flush_tlb_pending(t->ctx, t->nr, t->vaddrs);
|
||||
}
|
||||
|
||||
void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
|
||||
{
|
||||
u32 ctx = CTX_HWBITS(mm->context);
|
||||
struct tlb_pending_info info;
|
||||
int cpu = get_cpu();
|
||||
|
||||
info.ctx = ctx;
|
||||
info.nr = nr;
|
||||
info.vaddrs = vaddrs;
|
||||
|
||||
if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
|
||||
cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
|
||||
else
|
||||
smp_call_function_many(mm_cpumask(mm), tlb_pending_func,
|
||||
&info, 1);
|
||||
|
||||
__flush_tlb_pending(ctx, nr, vaddrs);
|
||||
|
||||
put_cpu();
|
||||
}
|
||||
|
||||
void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
|
||||
{
|
||||
unsigned long context = CTX_HWBITS(mm->context);
|
||||
int cpu = get_cpu();
|
||||
|
||||
if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
|
||||
cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
|
||||
else
|
||||
smp_cross_call_masked(&xcall_flush_tlb_pending,
|
||||
ctx, nr, (unsigned long) vaddrs,
|
||||
smp_cross_call_masked(&xcall_flush_tlb_page,
|
||||
context, vaddr, 0,
|
||||
mm_cpumask(mm));
|
||||
|
||||
__flush_tlb_pending(ctx, nr, vaddrs);
|
||||
__flush_tlb_page(context, vaddr);
|
||||
|
||||
put_cpu();
|
||||
}
|
||||
|
@ -119,11 +119,7 @@ void bit_map_clear(struct bit_map *t, int offset, int len)
|
||||
|
||||
void bit_map_init(struct bit_map *t, unsigned long *map, int size)
|
||||
{
|
||||
|
||||
if ((size & 07) != 0)
|
||||
BUG();
|
||||
memset(map, 0, size>>3);
|
||||
|
||||
bitmap_zero(map, size);
|
||||
memset(t, 0, sizeof *t);
|
||||
spin_lock_init(&t->lock);
|
||||
t->map = map;
|
||||
|
@ -34,7 +34,7 @@
|
||||
#define IOMMU_RNGE IOMMU_RNGE_256MB
|
||||
#define IOMMU_START 0xF0000000
|
||||
#define IOMMU_WINSIZE (256*1024*1024U)
|
||||
#define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 265KB */
|
||||
#define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 256KB */
|
||||
#define IOMMU_ORDER 6 /* 4096 * (1<<6) */
|
||||
|
||||
/* srmmu.c */
|
||||
|
@ -280,7 +280,9 @@ static void __init srmmu_nocache_init(void)
|
||||
SRMMU_NOCACHE_ALIGN_MAX, 0UL);
|
||||
memset(srmmu_nocache_pool, 0, srmmu_nocache_size);
|
||||
|
||||
srmmu_nocache_bitmap = __alloc_bootmem(bitmap_bits >> 3, SMP_CACHE_BYTES, 0UL);
|
||||
srmmu_nocache_bitmap =
|
||||
__alloc_bootmem(BITS_TO_LONGS(bitmap_bits) * sizeof(long),
|
||||
SMP_CACHE_BYTES, 0UL);
|
||||
bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits);
|
||||
|
||||
srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
|
||||
|
@ -24,11 +24,17 @@ static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
|
||||
void flush_tlb_pending(void)
|
||||
{
|
||||
struct tlb_batch *tb = &get_cpu_var(tlb_batch);
|
||||
struct mm_struct *mm = tb->mm;
|
||||
|
||||
if (tb->tlb_nr) {
|
||||
flush_tsb_user(tb);
|
||||
if (!tb->tlb_nr)
|
||||
goto out;
|
||||
|
||||
if (CTX_VALID(tb->mm->context)) {
|
||||
flush_tsb_user(tb);
|
||||
|
||||
if (CTX_VALID(mm->context)) {
|
||||
if (tb->tlb_nr == 1) {
|
||||
global_flush_tlb_page(mm, tb->vaddrs[0]);
|
||||
} else {
|
||||
#ifdef CONFIG_SMP
|
||||
smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
|
||||
&tb->vaddrs[0]);
|
||||
@ -37,12 +43,30 @@ void flush_tlb_pending(void)
|
||||
tb->tlb_nr, &tb->vaddrs[0]);
|
||||
#endif
|
||||
}
|
||||
tb->tlb_nr = 0;
|
||||
}
|
||||
|
||||
tb->tlb_nr = 0;
|
||||
|
||||
out:
|
||||
put_cpu_var(tlb_batch);
|
||||
}
|
||||
|
||||
void arch_enter_lazy_mmu_mode(void)
|
||||
{
|
||||
struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
|
||||
|
||||
tb->active = 1;
|
||||
}
|
||||
|
||||
void arch_leave_lazy_mmu_mode(void)
|
||||
{
|
||||
struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
|
||||
|
||||
if (tb->tlb_nr)
|
||||
flush_tlb_pending();
|
||||
tb->active = 0;
|
||||
}
|
||||
|
||||
static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
|
||||
bool exec)
|
||||
{
|
||||
@ -60,6 +84,12 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
|
||||
nr = 0;
|
||||
}
|
||||
|
||||
if (!tb->active) {
|
||||
global_flush_tlb_page(mm, vaddr);
|
||||
flush_tsb_user_page(mm, vaddr);
|
||||
return;
|
||||
}
|
||||
|
||||
if (nr == 0)
|
||||
tb->mm = mm;
|
||||
|
||||
|
@ -7,11 +7,10 @@
|
||||
#include <linux/preempt.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/tsb.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/oplib.h>
|
||||
|
||||
extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
|
||||
@ -46,23 +45,27 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
|
||||
}
|
||||
}
|
||||
|
||||
static void __flush_tsb_one_entry(unsigned long tsb, unsigned long v,
|
||||
unsigned long hash_shift,
|
||||
unsigned long nentries)
|
||||
{
|
||||
unsigned long tag, ent, hash;
|
||||
|
||||
v &= ~0x1UL;
|
||||
hash = tsb_hash(v, hash_shift, nentries);
|
||||
ent = tsb + (hash * sizeof(struct tsb));
|
||||
tag = (v >> 22UL);
|
||||
|
||||
tsb_flush(ent, tag);
|
||||
}
|
||||
|
||||
static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
|
||||
unsigned long tsb, unsigned long nentries)
|
||||
{
|
||||
unsigned long i;
|
||||
|
||||
for (i = 0; i < tb->tlb_nr; i++) {
|
||||
unsigned long v = tb->vaddrs[i];
|
||||
unsigned long tag, ent, hash;
|
||||
|
||||
v &= ~0x1UL;
|
||||
|
||||
hash = tsb_hash(v, hash_shift, nentries);
|
||||
ent = tsb + (hash * sizeof(struct tsb));
|
||||
tag = (v >> 22UL);
|
||||
|
||||
tsb_flush(ent, tag);
|
||||
}
|
||||
for (i = 0; i < tb->tlb_nr; i++)
|
||||
__flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries);
|
||||
}
|
||||
|
||||
void flush_tsb_user(struct tlb_batch *tb)
|
||||
@ -90,6 +93,30 @@ void flush_tsb_user(struct tlb_batch *tb)
|
||||
spin_unlock_irqrestore(&mm->context.lock, flags);
|
||||
}
|
||||
|
||||
void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr)
|
||||
{
|
||||
unsigned long nentries, base, flags;
|
||||
|
||||
spin_lock_irqsave(&mm->context.lock, flags);
|
||||
|
||||
base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
|
||||
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
|
||||
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
|
||||
base = __pa(base);
|
||||
__flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries);
|
||||
|
||||
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
|
||||
if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
|
||||
base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
|
||||
nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
|
||||
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
|
||||
base = __pa(base);
|
||||
__flush_tsb_one_entry(base, vaddr, HPAGE_SHIFT, nentries);
|
||||
}
|
||||
#endif
|
||||
spin_unlock_irqrestore(&mm->context.lock, flags);
|
||||
}
|
||||
|
||||
#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K
|
||||
#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K
|
||||
|
||||
|
@ -52,6 +52,33 @@ __flush_tlb_mm: /* 18 insns */
|
||||
nop
|
||||
nop
|
||||
|
||||
.align 32
|
||||
.globl __flush_tlb_page
|
||||
__flush_tlb_page: /* 22 insns */
|
||||
/* %o0 = context, %o1 = vaddr */
|
||||
rdpr %pstate, %g7
|
||||
andn %g7, PSTATE_IE, %g2
|
||||
wrpr %g2, %pstate
|
||||
mov SECONDARY_CONTEXT, %o4
|
||||
ldxa [%o4] ASI_DMMU, %g2
|
||||
stxa %o0, [%o4] ASI_DMMU
|
||||
andcc %o1, 1, %g0
|
||||
andn %o1, 1, %o3
|
||||
be,pn %icc, 1f
|
||||
or %o3, 0x10, %o3
|
||||
stxa %g0, [%o3] ASI_IMMU_DEMAP
|
||||
1: stxa %g0, [%o3] ASI_DMMU_DEMAP
|
||||
membar #Sync
|
||||
stxa %g2, [%o4] ASI_DMMU
|
||||
sethi %hi(KERNBASE), %o4
|
||||
flush %o4
|
||||
retl
|
||||
wrpr %g7, 0x0, %pstate
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
|
||||
.align 32
|
||||
.globl __flush_tlb_pending
|
||||
__flush_tlb_pending: /* 26 insns */
|
||||
@ -203,6 +230,31 @@ __cheetah_flush_tlb_mm: /* 19 insns */
|
||||
retl
|
||||
wrpr %g7, 0x0, %pstate
|
||||
|
||||
__cheetah_flush_tlb_page: /* 22 insns */
|
||||
/* %o0 = context, %o1 = vaddr */
|
||||
rdpr %pstate, %g7
|
||||
andn %g7, PSTATE_IE, %g2
|
||||
wrpr %g2, 0x0, %pstate
|
||||
wrpr %g0, 1, %tl
|
||||
mov PRIMARY_CONTEXT, %o4
|
||||
ldxa [%o4] ASI_DMMU, %g2
|
||||
srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3
|
||||
sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3
|
||||
or %o0, %o3, %o0 /* Preserve nucleus page size fields */
|
||||
stxa %o0, [%o4] ASI_DMMU
|
||||
andcc %o1, 1, %g0
|
||||
be,pn %icc, 1f
|
||||
andn %o1, 1, %o3
|
||||
stxa %g0, [%o3] ASI_IMMU_DEMAP
|
||||
1: stxa %g0, [%o3] ASI_DMMU_DEMAP
|
||||
membar #Sync
|
||||
stxa %g2, [%o4] ASI_DMMU
|
||||
sethi %hi(KERNBASE), %o4
|
||||
flush %o4
|
||||
wrpr %g0, 0, %tl
|
||||
retl
|
||||
wrpr %g7, 0x0, %pstate
|
||||
|
||||
__cheetah_flush_tlb_pending: /* 27 insns */
|
||||
/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
|
||||
rdpr %pstate, %g7
|
||||
@ -269,6 +321,20 @@ __hypervisor_flush_tlb_mm: /* 10 insns */
|
||||
retl
|
||||
nop
|
||||
|
||||
__hypervisor_flush_tlb_page: /* 11 insns */
|
||||
/* %o0 = context, %o1 = vaddr */
|
||||
mov %o0, %g2
|
||||
mov %o1, %o0 /* ARG0: vaddr + IMMU-bit */
|
||||
mov %g2, %o1 /* ARG1: mmu context */
|
||||
mov HV_MMU_ALL, %o2 /* ARG2: flags */
|
||||
srlx %o0, PAGE_SHIFT, %o0
|
||||
sllx %o0, PAGE_SHIFT, %o0
|
||||
ta HV_MMU_UNMAP_ADDR_TRAP
|
||||
brnz,pn %o0, __hypervisor_tlb_tl0_error
|
||||
mov HV_MMU_UNMAP_ADDR_TRAP, %o1
|
||||
retl
|
||||
nop
|
||||
|
||||
__hypervisor_flush_tlb_pending: /* 16 insns */
|
||||
/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
|
||||
sllx %o1, 3, %g1
|
||||
@ -339,6 +405,13 @@ cheetah_patch_cachetlbops:
|
||||
call tlb_patch_one
|
||||
mov 19, %o2
|
||||
|
||||
sethi %hi(__flush_tlb_page), %o0
|
||||
or %o0, %lo(__flush_tlb_page), %o0
|
||||
sethi %hi(__cheetah_flush_tlb_page), %o1
|
||||
or %o1, %lo(__cheetah_flush_tlb_page), %o1
|
||||
call tlb_patch_one
|
||||
mov 22, %o2
|
||||
|
||||
sethi %hi(__flush_tlb_pending), %o0
|
||||
or %o0, %lo(__flush_tlb_pending), %o0
|
||||
sethi %hi(__cheetah_flush_tlb_pending), %o1
|
||||
@ -397,10 +470,9 @@ xcall_flush_tlb_mm: /* 21 insns */
|
||||
nop
|
||||
nop
|
||||
|
||||
.globl xcall_flush_tlb_pending
|
||||
xcall_flush_tlb_pending: /* 21 insns */
|
||||
/* %g5=context, %g1=nr, %g7=vaddrs[] */
|
||||
sllx %g1, 3, %g1
|
||||
.globl xcall_flush_tlb_page
|
||||
xcall_flush_tlb_page: /* 17 insns */
|
||||
/* %g5=context, %g1=vaddr */
|
||||
mov PRIMARY_CONTEXT, %g4
|
||||
ldxa [%g4] ASI_DMMU, %g2
|
||||
srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4
|
||||
@ -408,20 +480,16 @@ xcall_flush_tlb_pending: /* 21 insns */
|
||||
or %g5, %g4, %g5
|
||||
mov PRIMARY_CONTEXT, %g4
|
||||
stxa %g5, [%g4] ASI_DMMU
|
||||
1: sub %g1, (1 << 3), %g1
|
||||
ldx [%g7 + %g1], %g5
|
||||
andcc %g5, 0x1, %g0
|
||||
andcc %g1, 0x1, %g0
|
||||
be,pn %icc, 2f
|
||||
|
||||
andn %g5, 0x1, %g5
|
||||
andn %g1, 0x1, %g5
|
||||
stxa %g0, [%g5] ASI_IMMU_DEMAP
|
||||
2: stxa %g0, [%g5] ASI_DMMU_DEMAP
|
||||
membar #Sync
|
||||
brnz,pt %g1, 1b
|
||||
nop
|
||||
stxa %g2, [%g4] ASI_DMMU
|
||||
retry
|
||||
nop
|
||||
nop
|
||||
|
||||
.globl xcall_flush_tlb_kernel_range
|
||||
xcall_flush_tlb_kernel_range: /* 25 insns */
|
||||
@ -656,15 +724,13 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */
|
||||
membar #Sync
|
||||
retry
|
||||
|
||||
.globl __hypervisor_xcall_flush_tlb_pending
|
||||
__hypervisor_xcall_flush_tlb_pending: /* 21 insns */
|
||||
/* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4,g6=scratch */
|
||||
sllx %g1, 3, %g1
|
||||
.globl __hypervisor_xcall_flush_tlb_page
|
||||
__hypervisor_xcall_flush_tlb_page: /* 17 insns */
|
||||
/* %g5=ctx, %g1=vaddr */
|
||||
mov %o0, %g2
|
||||
mov %o1, %g3
|
||||
mov %o2, %g4
|
||||
1: sub %g1, (1 << 3), %g1
|
||||
ldx [%g7 + %g1], %o0 /* ARG0: virtual address */
|
||||
mov %g1, %o0 /* ARG0: virtual address */
|
||||
mov %g5, %o1 /* ARG1: mmu context */
|
||||
mov HV_MMU_ALL, %o2 /* ARG2: flags */
|
||||
srlx %o0, PAGE_SHIFT, %o0
|
||||
@ -673,8 +739,6 @@ __hypervisor_xcall_flush_tlb_pending: /* 21 insns */
|
||||
mov HV_MMU_UNMAP_ADDR_TRAP, %g6
|
||||
brnz,a,pn %o0, __hypervisor_tlb_xcall_error
|
||||
mov %o0, %g5
|
||||
brnz,pt %g1, 1b
|
||||
nop
|
||||
mov %g2, %o0
|
||||
mov %g3, %o1
|
||||
mov %g4, %o2
|
||||
@ -757,6 +821,13 @@ hypervisor_patch_cachetlbops:
|
||||
call tlb_patch_one
|
||||
mov 10, %o2
|
||||
|
||||
sethi %hi(__flush_tlb_page), %o0
|
||||
or %o0, %lo(__flush_tlb_page), %o0
|
||||
sethi %hi(__hypervisor_flush_tlb_page), %o1
|
||||
or %o1, %lo(__hypervisor_flush_tlb_page), %o1
|
||||
call tlb_patch_one
|
||||
mov 11, %o2
|
||||
|
||||
sethi %hi(__flush_tlb_pending), %o0
|
||||
or %o0, %lo(__flush_tlb_pending), %o0
|
||||
sethi %hi(__hypervisor_flush_tlb_pending), %o1
|
||||
@ -788,12 +859,12 @@ hypervisor_patch_cachetlbops:
|
||||
call tlb_patch_one
|
||||
mov 21, %o2
|
||||
|
||||
sethi %hi(xcall_flush_tlb_pending), %o0
|
||||
or %o0, %lo(xcall_flush_tlb_pending), %o0
|
||||
sethi %hi(__hypervisor_xcall_flush_tlb_pending), %o1
|
||||
or %o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1
|
||||
sethi %hi(xcall_flush_tlb_page), %o0
|
||||
or %o0, %lo(xcall_flush_tlb_page), %o0
|
||||
sethi %hi(__hypervisor_xcall_flush_tlb_page), %o1
|
||||
or %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1
|
||||
call tlb_patch_one
|
||||
mov 21, %o2
|
||||
mov 17, %o2
|
||||
|
||||
sethi %hi(xcall_flush_tlb_kernel_range), %o0
|
||||
or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
|
||||
|
@ -40,7 +40,15 @@
|
||||
#include <asm/percpu.h>
|
||||
#include <arch/spr_def.h>
|
||||
|
||||
/* Set and clear kernel interrupt masks. */
|
||||
/*
|
||||
* Set and clear kernel interrupt masks.
|
||||
*
|
||||
* NOTE: __insn_mtspr() is a compiler builtin marked as a memory
|
||||
* clobber. We rely on it being equivalent to a compiler barrier in
|
||||
* this code since arch_local_irq_save() and friends must act as
|
||||
* compiler barriers. This compiler semantic is baked into enough
|
||||
* places that the compiler will maintain it going forward.
|
||||
*/
|
||||
#if CHIP_HAS_SPLIT_INTR_MASK()
|
||||
#if INT_PERF_COUNT < 32 || INT_AUX_PERF_COUNT < 32 || INT_MEM_ERROR >= 32
|
||||
# error Fix assumptions about which word various interrupts are in
|
||||
|
@ -1549,6 +1549,7 @@ config X86_SMAP
|
||||
config EFI
|
||||
bool "EFI runtime service support"
|
||||
depends on ACPI
|
||||
select UCS2_STRING
|
||||
---help---
|
||||
This enables the kernel to use EFI runtime services that are
|
||||
available (such as the EFI variable services).
|
||||
|
@ -4,7 +4,7 @@
|
||||
# create a compressed vmlinux image from the original vmlinux
|
||||
#
|
||||
|
||||
targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo head_$(BITS).o misc.o string.o cmdline.o early_serial_console.o piggy.o
|
||||
targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo
|
||||
|
||||
KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
|
||||
KBUILD_CFLAGS += -fno-strict-aliasing -fPIC
|
||||
@ -29,7 +29,6 @@ VMLINUX_OBJS = $(obj)/vmlinux.lds $(obj)/head_$(BITS).o $(obj)/misc.o \
|
||||
$(obj)/piggy.o
|
||||
|
||||
$(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone
|
||||
$(obj)/efi_stub_$(BITS).o: KBUILD_CLFAGS += -fshort-wchar -mno-red-zone
|
||||
|
||||
ifeq ($(CONFIG_EFI_STUB), y)
|
||||
VMLINUX_OBJS += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o
|
||||
@ -43,7 +42,7 @@ OBJCOPYFLAGS_vmlinux.bin := -R .comment -S
|
||||
$(obj)/vmlinux.bin: vmlinux FORCE
|
||||
$(call if_changed,objcopy)
|
||||
|
||||
targets += vmlinux.bin.all vmlinux.relocs
|
||||
targets += $(patsubst $(obj)/%,%,$(VMLINUX_OBJS)) vmlinux.bin.all vmlinux.relocs
|
||||
|
||||
CMD_RELOCS = arch/x86/tools/relocs
|
||||
quiet_cmd_relocs = RELOCS $@
|
||||
|
@ -251,6 +251,51 @@ static void find_bits(unsigned long mask, u8 *pos, u8 *size)
|
||||
*size = len;
|
||||
}
|
||||
|
||||
static efi_status_t setup_efi_vars(struct boot_params *params)
|
||||
{
|
||||
struct setup_data *data;
|
||||
struct efi_var_bootdata *efidata;
|
||||
u64 store_size, remaining_size, var_size;
|
||||
efi_status_t status;
|
||||
|
||||
if (!sys_table->runtime->query_variable_info)
|
||||
return EFI_UNSUPPORTED;
|
||||
|
||||
data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
|
||||
|
||||
while (data && data->next)
|
||||
data = (struct setup_data *)(unsigned long)data->next;
|
||||
|
||||
status = efi_call_phys4(sys_table->runtime->query_variable_info,
|
||||
EFI_VARIABLE_NON_VOLATILE |
|
||||
EFI_VARIABLE_BOOTSERVICE_ACCESS |
|
||||
EFI_VARIABLE_RUNTIME_ACCESS, &store_size,
|
||||
&remaining_size, &var_size);
|
||||
|
||||
if (status != EFI_SUCCESS)
|
||||
return status;
|
||||
|
||||
status = efi_call_phys3(sys_table->boottime->allocate_pool,
|
||||
EFI_LOADER_DATA, sizeof(*efidata), &efidata);
|
||||
|
||||
if (status != EFI_SUCCESS)
|
||||
return status;
|
||||
|
||||
efidata->data.type = SETUP_EFI_VARS;
|
||||
efidata->data.len = sizeof(struct efi_var_bootdata) -
|
||||
sizeof(struct setup_data);
|
||||
efidata->data.next = 0;
|
||||
efidata->store_size = store_size;
|
||||
efidata->remaining_size = remaining_size;
|
||||
efidata->max_var_size = var_size;
|
||||
|
||||
if (data)
|
||||
data->next = (unsigned long)efidata;
|
||||
else
|
||||
params->hdr.setup_data = (unsigned long)efidata;
|
||||
|
||||
}
|
||||
|
||||
static efi_status_t setup_efi_pci(struct boot_params *params)
|
||||
{
|
||||
efi_pci_io_protocol *pci;
|
||||
@ -1157,6 +1202,8 @@ struct boot_params *efi_main(void *handle, efi_system_table_t *_table,
|
||||
|
||||
setup_graphics(boot_params);
|
||||
|
||||
setup_efi_vars(boot_params);
|
||||
|
||||
setup_efi_pci(boot_params);
|
||||
|
||||
status = efi_call_phys3(sys_table->boottime->allocate_pool,
|
||||
|
@ -102,6 +102,13 @@ extern void efi_call_phys_epilog(void);
|
||||
extern void efi_unmap_memmap(void);
|
||||
extern void efi_memory_uc(u64 addr, unsigned long size);
|
||||
|
||||
struct efi_var_bootdata {
|
||||
struct setup_data data;
|
||||
u64 store_size;
|
||||
u64 remaining_size;
|
||||
u64 max_var_size;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_EFI
|
||||
|
||||
static inline bool efi_is_native(void)
|
||||
|
@ -703,7 +703,10 @@ static inline void arch_leave_lazy_mmu_mode(void)
|
||||
PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
|
||||
}
|
||||
|
||||
void arch_flush_lazy_mmu_mode(void);
|
||||
static inline void arch_flush_lazy_mmu_mode(void)
|
||||
{
|
||||
PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush);
|
||||
}
|
||||
|
||||
static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
|
||||
phys_addr_t phys, pgprot_t flags)
|
||||
|
@ -91,6 +91,7 @@ struct pv_lazy_ops {
|
||||
/* Set deferred update mode, used for batching operations. */
|
||||
void (*enter)(void);
|
||||
void (*leave)(void);
|
||||
void (*flush)(void);
|
||||
};
|
||||
|
||||
struct pv_time_ops {
|
||||
@ -679,6 +680,7 @@ void paravirt_end_context_switch(struct task_struct *next);
|
||||
|
||||
void paravirt_enter_lazy_mmu(void);
|
||||
void paravirt_leave_lazy_mmu(void);
|
||||
void paravirt_flush_lazy_mmu(void);
|
||||
|
||||
void _paravirt_nop(void);
|
||||
u32 _paravirt_ident_32(u32);
|
||||
|
@ -29,13 +29,13 @@ extern const unsigned long sys_call_table[];
|
||||
*/
|
||||
static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
|
||||
{
|
||||
return regs->orig_ax & __SYSCALL_MASK;
|
||||
return regs->orig_ax;
|
||||
}
|
||||
|
||||
static inline void syscall_rollback(struct task_struct *task,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
regs->ax = regs->orig_ax & __SYSCALL_MASK;
|
||||
regs->ax = regs->orig_ax;
|
||||
}
|
||||
|
||||
static inline long syscall_get_error(struct task_struct *task,
|
||||
|
@ -7,7 +7,7 @@
|
||||
|
||||
#define tlb_flush(tlb) \
|
||||
{ \
|
||||
if (tlb->fullmm == 0) \
|
||||
if (!tlb->fullmm && !tlb->need_flush_all) \
|
||||
flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, 0UL); \
|
||||
else \
|
||||
flush_tlb_mm_range(tlb->mm, 0UL, TLB_FLUSH_ALL, 0UL); \
|
||||
|
@ -6,6 +6,7 @@
|
||||
#define SETUP_E820_EXT 1
|
||||
#define SETUP_DTB 2
|
||||
#define SETUP_PCI 3
|
||||
#define SETUP_EFI_VARS 4
|
||||
|
||||
/* ram_size flags */
|
||||
#define RAMDISK_IMAGE_START_MASK 0x07FF
|
||||
|
@ -35,13 +35,6 @@ static bool __init ms_hyperv_platform(void)
|
||||
if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Xen emulates Hyper-V to support enlightened Windows.
|
||||
* Check to see first if we are on a Xen Hypervisor.
|
||||
*/
|
||||
if (xen_cpuid_base())
|
||||
return false;
|
||||
|
||||
cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS,
|
||||
&eax, &hyp_signature[0], &hyp_signature[1], &hyp_signature[2]);
|
||||
|
||||
@ -82,12 +75,6 @@ static void __init ms_hyperv_init_platform(void)
|
||||
|
||||
if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
|
||||
clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100);
|
||||
#if IS_ENABLED(CONFIG_HYPERV)
|
||||
/*
|
||||
* Setup the IDT for hypervisor callback.
|
||||
*/
|
||||
alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector);
|
||||
#endif
|
||||
}
|
||||
|
||||
const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
|
||||
@ -103,6 +90,11 @@ static irq_handler_t vmbus_isr;
|
||||
|
||||
void hv_register_vmbus_handler(int irq, irq_handler_t handler)
|
||||
{
|
||||
/*
|
||||
* Setup the IDT for hypervisor callback.
|
||||
*/
|
||||
alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector);
|
||||
|
||||
vmbus_irq = irq;
|
||||
vmbus_isr = handler;
|
||||
}
|
||||
|
@ -153,8 +153,14 @@ static struct event_constraint intel_gen_event_constraints[] __read_mostly =
|
||||
};
|
||||
|
||||
static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
|
||||
INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
|
||||
INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
|
||||
INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
|
||||
INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
|
||||
EVENT_EXTRA_END
|
||||
};
|
||||
|
||||
static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
|
||||
INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
|
||||
INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
|
||||
EVENT_EXTRA_END
|
||||
};
|
||||
|
||||
@ -2097,7 +2103,10 @@ __init int intel_pmu_init(void)
|
||||
x86_pmu.event_constraints = intel_snb_event_constraints;
|
||||
x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
|
||||
x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
|
||||
x86_pmu.extra_regs = intel_snb_extra_regs;
|
||||
if (boot_cpu_data.x86_model == 45)
|
||||
x86_pmu.extra_regs = intel_snbep_extra_regs;
|
||||
else
|
||||
x86_pmu.extra_regs = intel_snb_extra_regs;
|
||||
/* all extra regs are per-cpu when HT is on */
|
||||
x86_pmu.er_flags |= ERF_HAS_RSP_1;
|
||||
x86_pmu.er_flags |= ERF_NO_HT_SHARING;
|
||||
@ -2123,7 +2132,10 @@ __init int intel_pmu_init(void)
|
||||
x86_pmu.event_constraints = intel_ivb_event_constraints;
|
||||
x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
|
||||
x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
|
||||
x86_pmu.extra_regs = intel_snb_extra_regs;
|
||||
if (boot_cpu_data.x86_model == 62)
|
||||
x86_pmu.extra_regs = intel_snbep_extra_regs;
|
||||
else
|
||||
x86_pmu.extra_regs = intel_snb_extra_regs;
|
||||
/* all extra regs are per-cpu when HT is on */
|
||||
x86_pmu.er_flags |= ERF_HAS_RSP_1;
|
||||
x86_pmu.er_flags |= ERF_NO_HT_SHARING;
|
||||
|
@ -314,10 +314,11 @@ int intel_pmu_drain_bts_buffer(void)
|
||||
if (top <= at)
|
||||
return 0;
|
||||
|
||||
memset(®s, 0, sizeof(regs));
|
||||
|
||||
ds->bts_index = ds->bts_buffer_base;
|
||||
|
||||
perf_sample_data_init(&data, 0, event->hw.last_period);
|
||||
regs.ip = 0;
|
||||
|
||||
/*
|
||||
* Prepare a generic sample, i.e. fill in the invariant fields.
|
||||
|
@ -45,9 +45,6 @@ static int __cpuinit x86_vendor(void)
|
||||
u32 eax = 0x00000000;
|
||||
u32 ebx, ecx = 0, edx;
|
||||
|
||||
if (!have_cpuid_p())
|
||||
return X86_VENDOR_UNKNOWN;
|
||||
|
||||
native_cpuid(&eax, &ebx, &ecx, &edx);
|
||||
|
||||
if (CPUID_IS(CPUID_INTEL1, CPUID_INTEL2, CPUID_INTEL3, ebx, ecx, edx))
|
||||
@ -59,18 +56,45 @@ static int __cpuinit x86_vendor(void)
|
||||
return X86_VENDOR_UNKNOWN;
|
||||
}
|
||||
|
||||
static int __cpuinit x86_family(void)
|
||||
{
|
||||
u32 eax = 0x00000001;
|
||||
u32 ebx, ecx = 0, edx;
|
||||
int x86;
|
||||
|
||||
native_cpuid(&eax, &ebx, &ecx, &edx);
|
||||
|
||||
x86 = (eax >> 8) & 0xf;
|
||||
if (x86 == 15)
|
||||
x86 += (eax >> 20) & 0xff;
|
||||
|
||||
return x86;
|
||||
}
|
||||
|
||||
void __init load_ucode_bsp(void)
|
||||
{
|
||||
int vendor = x86_vendor();
|
||||
int vendor, x86;
|
||||
|
||||
if (vendor == X86_VENDOR_INTEL)
|
||||
if (!have_cpuid_p())
|
||||
return;
|
||||
|
||||
vendor = x86_vendor();
|
||||
x86 = x86_family();
|
||||
|
||||
if (vendor == X86_VENDOR_INTEL && x86 >= 6)
|
||||
load_ucode_intel_bsp();
|
||||
}
|
||||
|
||||
void __cpuinit load_ucode_ap(void)
|
||||
{
|
||||
int vendor = x86_vendor();
|
||||
int vendor, x86;
|
||||
|
||||
if (vendor == X86_VENDOR_INTEL)
|
||||
if (!have_cpuid_p())
|
||||
return;
|
||||
|
||||
vendor = x86_vendor();
|
||||
x86 = x86_family();
|
||||
|
||||
if (vendor == X86_VENDOR_INTEL && x86 >= 6)
|
||||
load_ucode_intel_ap();
|
||||
}
|
||||
|
@ -263,6 +263,18 @@ void paravirt_leave_lazy_mmu(void)
|
||||
leave_lazy(PARAVIRT_LAZY_MMU);
|
||||
}
|
||||
|
||||
void paravirt_flush_lazy_mmu(void)
|
||||
{
|
||||
preempt_disable();
|
||||
|
||||
if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
|
||||
arch_leave_lazy_mmu_mode();
|
||||
arch_enter_lazy_mmu_mode();
|
||||
}
|
||||
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
void paravirt_start_context_switch(struct task_struct *prev)
|
||||
{
|
||||
BUG_ON(preemptible());
|
||||
@ -292,18 +304,6 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
|
||||
return this_cpu_read(paravirt_lazy_mode);
|
||||
}
|
||||
|
||||
void arch_flush_lazy_mmu_mode(void)
|
||||
{
|
||||
preempt_disable();
|
||||
|
||||
if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
|
||||
arch_leave_lazy_mmu_mode();
|
||||
arch_enter_lazy_mmu_mode();
|
||||
}
|
||||
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
struct pv_info pv_info = {
|
||||
.name = "bare hardware",
|
||||
.paravirt_enabled = 0,
|
||||
@ -475,6 +475,7 @@ struct pv_mmu_ops pv_mmu_ops = {
|
||||
.lazy_mode = {
|
||||
.enter = paravirt_nop,
|
||||
.leave = paravirt_nop,
|
||||
.flush = paravirt_nop,
|
||||
},
|
||||
|
||||
.set_fixmap = native_set_fixmap,
|
||||
|
@ -507,11 +507,14 @@ static void __init memblock_x86_reserve_range_setup_data(void)
|
||||
/*
|
||||
* Keep the crash kernel below this limit. On 32 bits earlier kernels
|
||||
* would limit the kernel to the low 512 MiB due to mapping restrictions.
|
||||
* On 64bit, old kexec-tools need to under 896MiB.
|
||||
*/
|
||||
#ifdef CONFIG_X86_32
|
||||
# define CRASH_KERNEL_ADDR_MAX (512 << 20)
|
||||
# define CRASH_KERNEL_ADDR_LOW_MAX (512 << 20)
|
||||
# define CRASH_KERNEL_ADDR_HIGH_MAX (512 << 20)
|
||||
#else
|
||||
# define CRASH_KERNEL_ADDR_MAX MAXMEM
|
||||
# define CRASH_KERNEL_ADDR_LOW_MAX (896UL<<20)
|
||||
# define CRASH_KERNEL_ADDR_HIGH_MAX MAXMEM
|
||||
#endif
|
||||
|
||||
static void __init reserve_crashkernel_low(void)
|
||||
@ -521,19 +524,35 @@ static void __init reserve_crashkernel_low(void)
|
||||
unsigned long long low_base = 0, low_size = 0;
|
||||
unsigned long total_low_mem;
|
||||
unsigned long long base;
|
||||
bool auto_set = false;
|
||||
int ret;
|
||||
|
||||
total_low_mem = memblock_mem_size(1UL<<(32-PAGE_SHIFT));
|
||||
/* crashkernel=Y,low */
|
||||
ret = parse_crashkernel_low(boot_command_line, total_low_mem,
|
||||
&low_size, &base);
|
||||
if (ret != 0 || low_size <= 0)
|
||||
return;
|
||||
if (ret != 0) {
|
||||
/*
|
||||
* two parts from lib/swiotlb.c:
|
||||
* swiotlb size: user specified with swiotlb= or default.
|
||||
* swiotlb overflow buffer: now is hardcoded to 32k.
|
||||
* We round it to 8M for other buffers that
|
||||
* may need to stay low too.
|
||||
*/
|
||||
low_size = swiotlb_size_or_default() + (8UL<<20);
|
||||
auto_set = true;
|
||||
} else {
|
||||
/* passed with crashkernel=0,low ? */
|
||||
if (!low_size)
|
||||
return;
|
||||
}
|
||||
|
||||
low_base = memblock_find_in_range(low_size, (1ULL<<32),
|
||||
low_size, alignment);
|
||||
|
||||
if (!low_base) {
|
||||
pr_info("crashkernel low reservation failed - No suitable area found.\n");
|
||||
if (!auto_set)
|
||||
pr_info("crashkernel low reservation failed - No suitable area found.\n");
|
||||
|
||||
return;
|
||||
}
|
||||
@ -554,14 +573,22 @@ static void __init reserve_crashkernel(void)
|
||||
const unsigned long long alignment = 16<<20; /* 16M */
|
||||
unsigned long long total_mem;
|
||||
unsigned long long crash_size, crash_base;
|
||||
bool high = false;
|
||||
int ret;
|
||||
|
||||
total_mem = memblock_phys_mem_size();
|
||||
|
||||
/* crashkernel=XM */
|
||||
ret = parse_crashkernel(boot_command_line, total_mem,
|
||||
&crash_size, &crash_base);
|
||||
if (ret != 0 || crash_size <= 0)
|
||||
return;
|
||||
if (ret != 0 || crash_size <= 0) {
|
||||
/* crashkernel=X,high */
|
||||
ret = parse_crashkernel_high(boot_command_line, total_mem,
|
||||
&crash_size, &crash_base);
|
||||
if (ret != 0 || crash_size <= 0)
|
||||
return;
|
||||
high = true;
|
||||
}
|
||||
|
||||
/* 0 means: find the address automatically */
|
||||
if (crash_base <= 0) {
|
||||
@ -569,7 +596,9 @@ static void __init reserve_crashkernel(void)
|
||||
* kexec want bzImage is below CRASH_KERNEL_ADDR_MAX
|
||||
*/
|
||||
crash_base = memblock_find_in_range(alignment,
|
||||
CRASH_KERNEL_ADDR_MAX, crash_size, alignment);
|
||||
high ? CRASH_KERNEL_ADDR_HIGH_MAX :
|
||||
CRASH_KERNEL_ADDR_LOW_MAX,
|
||||
crash_size, alignment);
|
||||
|
||||
if (!crash_base) {
|
||||
pr_info("crashkernel reservation failed - No suitable area found.\n");
|
||||
|
@ -1857,7 +1857,7 @@ int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data)
|
||||
if (!pv_eoi_enabled(vcpu))
|
||||
return 0;
|
||||
return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data,
|
||||
addr);
|
||||
addr, sizeof(u8));
|
||||
}
|
||||
|
||||
void kvm_lapic_init(void)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user